From b3417c93666c542fbe70d3b5df8eb386aa9ca33a Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Wed, 25 Jun 2025 14:47:59 +0300 Subject: [PATCH] iqk_r4 quants: use MMQ only for batches < 1024 tokens --- ggml/src/ggml-cuda/mmq.cu | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index 16c9d2f5..1788f7a4 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -196,11 +196,13 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { case GGML_TYPE_IQ2_KT: case GGML_TYPE_IQ3_KT: case GGML_TYPE_IQ4_KT: + mmq_supported = true; + break; case GGML_TYPE_IQ2_K_R4: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: case GGML_TYPE_IQ5_K_R4: - mmq_supported = true; + mmq_supported = ne11 < 1024; break; default: mmq_supported = false;