diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index e622d919..87d7e17e 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -1235,7 +1235,7 @@ static void ggml_cuda_op_mul_mat_cublas( GGML_ASSERT(to_bf16_cuda != nullptr); size_t ne = src1_ncols*ne10; src1_as_bf16.alloc(ne); - to_bf16_cuda(src1_ddf_i, src1_as_bf16.get(), ne, stream); + to_bf16_cuda(src1_ddf_i, src1_as_bf16.get(), src1_ncols, ne10, stream); } const nv_bfloat16 * src1_ptr = src1->type == GGML_TYPE_BF16 ? (const nv_bfloat16 *) src1_ddf_i : src1_as_bf16.get(); const nv_bfloat16 * src0_ptr = (const nv_bfloat16 *)src0_dd_i; @@ -1255,7 +1255,7 @@ static void ggml_cuda_op_mul_mat_cublas( CUBLAS_GEMM_DEFAULT_TENSOR_OP)); const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_BF16); - to_fp32_cuda(dst_bf16.get(), dst_dd_i, row_diff*src1_ncols, stream); + to_fp32_cuda(dst_bf16.get(), dst_dd_i, row_diff, src1_ncols, stream); return; } diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index e9bbe787..4b1be7c1 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -965,13 +965,15 @@ static void convert_unary_cuda(const void * __restrict__ vx, dst_t * __restrict_ } template -static void convert_from_bf16_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k, cudaStream_t stream) { +static void convert_from_bf16_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { + const int64_t k = nrows*n_per_row; const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE; convert_from_bf16<<>>((const nv_bfloat16 *)vx, y, k); } template -static void convert_to_bf16_cuda(const void * __restrict__ vx, nv_bfloat16 * __restrict__ y, const int64_t k, cudaStream_t stream) { +static void convert_to_bf16_cuda(const void * __restrict__ vx, nv_bfloat16 * __restrict__ y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { + const int64_t k = nrows*n_per_row; const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE; convert_to_bf16<<>>((const src_t *)vx, y, k); }