diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index 42012b9a..e622d919 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -1252,13 +1252,14 @@ static void ggml_cuda_op_mul_mat_cublas( src1_ptr, CUDA_R_16BF, ne10, &beta_f32, dst_bf16.get(), CUDA_R_16BF, ldc, CUBLAS_COMPUTE_32F, - //CUBLAS_COMPUTE_16BF, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_BF16); to_fp32_cuda(dst_bf16.get(), dst_dd_i, row_diff*src1_ncols, stream); + return; } - else if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { + + if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 ggml_cuda_pool_alloc src0_as_f16(ctx.pool(id)); if (src0->type != GGML_TYPE_F16) { @@ -1967,9 +1968,6 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_available(cc); } - //printf("%s: %s(%s) x %s(%s), %d %d %d %d %d %d\n", __func__, src0->name, ggml_type_name(src0->type), src1->name, ggml_type_name(src1->type), - // use_dequantize_mul_mat_vec, use_mul_mat_vec_q, use_mul_mat_q, split, use_mul_mat_q, any_gpus_with_slow_fp16); - // debug helpers //printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]); //printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]); @@ -1980,28 +1978,21 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor if (!split && any_gpus_with_slow_fp16 && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { // FP32 precision KQ single-batch for batch size 1 without FlashAttention - //printf(" branch 1\n"); ggml_cuda_mul_mat_vec_p021(ctx, src0, src1, dst); } else if (!split && any_gpus_with_slow_fp16 && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { - //printf(" branch 2\n"); // FP32 precision KQV single-batch for batch size 1 without FlashAttention ggml_cuda_mul_mat_vec_nc(ctx, src0, src1, dst); } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { - //printf(" branch 3\n"); // KQ + KQV multi-batch without FlashAttention ggml_cuda_mul_mat_batched_cublas(ctx, src0, src1, dst); } else if (use_dequantize_mul_mat_vec) { - //printf(" branch 4\n"); ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, nullptr); } else if (use_mul_mat_vec_q) { - //printf(" branch 5\n"); ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_q, quantize_row_q8_1_cuda); } else if (use_mul_mat_q) { - //printf(" branch 6\n"); ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_q, quantize_mmq_q8_1_cuda); } else { - //printf(" branch 7\n"); ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_cublas, nullptr); } } @@ -2775,7 +2766,6 @@ GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, const ggml_tensor * op) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *) backend->context; - //printf("%s(%s, %s)\n", __func__, op->name, ggml_op_name(op->op)); switch (op->op) { case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { @@ -2798,7 +2788,6 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons struct ggml_tensor * a = op->src[0]; struct ggml_tensor * b = op->src[1]; if (b->type == GGML_TYPE_F16 && a->type != GGML_TYPE_F16) { - //printf("%s(%s x %s, %s, %s)\n", __func__, a->name, b->name, ggml_type_name(a->type), ggml_type_name(b->type)); return false; } if (op->op == GGML_OP_MUL_MAT && a->ne[3] != b->ne[3]) { diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index cabe64db..e9bbe787 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -935,14 +935,6 @@ static __global__ void convert_from_bf16(const nv_bfloat16 * __restrict__ x, dst } y[i] = __bfloat162float(x[i]); - - //typedef union { uint32_t u; float f; } aux_t; - - //const uint16_t * u16 = (const uint16_t *) x; - //aux_t aux; - //aux.u = u16[i] << 16; - - //y[i] = aux.f; } static __global__ void convert_to_bf16(const float * __restrict__ x, nv_bfloat16 * __restrict__ y, const int64_t k) { @@ -978,9 +970,6 @@ static void convert_from_bf16_cuda(const void * __restrict__ vx, dst_t * __restr convert_from_bf16<<>>((const nv_bfloat16 *)vx, y, k); } -//=> to_bf16_cuda_t = void(*)(const void * __restrict__ x, nv_bfloat16 * y, int64_t k, cudaStream_t stream); - - template static void convert_to_bf16_cuda(const void * __restrict__ vx, nv_bfloat16 * __restrict__ y, const int64_t k, cudaStream_t stream) { const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;