From 2c0d1b8bffbe3034ad57d0fe8eb60b57a252d1fe Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Wed, 23 Jul 2025 17:33:35 +0200 Subject: [PATCH] iq4_kss: repack/convert to q8_k_r8 (NEON) --- ggml/src/iqk/iqk_gemm_iqk_quants.cpp | 52 ++++++++++++++++++++++++++++ ggml/src/iqk/iqk_mul_mat.cpp | 1 + 2 files changed, 53 insertions(+) diff --git a/ggml/src/iqk/iqk_gemm_iqk_quants.cpp b/ggml/src/iqk/iqk_gemm_iqk_quants.cpp index 76c883ea..d8af0209 100644 --- a/ggml/src/iqk/iqk_gemm_iqk_quants.cpp +++ b/ggml/src/iqk/iqk_gemm_iqk_quants.cpp @@ -4773,6 +4773,57 @@ void iqk_convert_iq2_kl_q8_k_r8(int n, const void * vx, size_t bx, void * vy, in } } +void iqk_convert_iq4_kss_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) { + GGML_ASSERT(n%QK_K == 0); + GGML_ASSERT(nrc_x%8 == 0); + + int nb = n/QK_K; + + const block_iq4_kss * x8[8]; + + block_q8_k_r8 * y = (block_q8_k_r8 *)vy; + + auto values = vld1q_s8_x2(iq4k_values); + + float drow[8]; + float dnew[8]; + int8_t ls[16]; + + int8x16x2_t xv[8]; + uint32_t block[8]; + + for (int ix = 0; ix < nrc_x; ix += 8) { + for (int k = 0; k < 8; ++k) { + const float * dptr = (const float *)((const char *)vx + (ix + k)*bx); + drow[k] = dptr[0]; + x8[k] = (const block_iq4_kss *)(dptr + 1); + } + auto vd = vld1q_f32_x2(drow); + for (int i = 0; i < nb; ++i) { + for (int k = 0; k < 8; ++k) { + for (int ib32 = 0; ib32 < 8; ++ib32) { + auto q4 = x8[k][i].qs + 4*ib32; + uint32_t s32 = (q4[0] & 0x00010001) | ((q4[1] & 0x00010001) << 2) | ((q4[2] & 0x00010001) << 4) | ((q4[3] & 0x00010001) << 6); + uint8_t s8 = (s32 | (s32 >> 15)) & 0xff; + ls[2*ib32+0] = ls[2*ib32+1] = ((s8 & 254) - 127); + auto val16 = vandq_u16(vld1q_u16((const uint16_t *)q4), vdupq_n_u16(0xfffe)); + auto val8 = vreinterpretq_u8_u16(veorq_u16(val16, vshrq_n_u16(val16, 1))); + auto& block_values = values.val[s8 & 1]; + xv[ib32].val[0] = vqtbl1q_s8(block_values, vandq_u8(val8, vdupq_n_u8(0xf))); + xv[ib32].val[1] = vqtbl1q_s8(block_values, vshrq_n_u8(val8, 4)); + } + dnew[k] = convert_to_q8_k_r8(1.f/127, xv, ls, block, (uint32_t *)y[i].qs + k); + } + auto d = vld1q_f32_x2(dnew); + d.val[0] = vmulq_f32(d.val[0], vd.val[0]); + d.val[1] = vmulq_f32(d.val[1], vd.val[1]); + vst1_f16((float16_t *)y[i].d + 0, vcvt_f16_f32(d.val[0])); + vst1_f16((float16_t *)y[i].d + 4, vcvt_f16_f32(d.val[1])); + } + y += nb; + } +} + void iqk_convert_iq4_ks_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) { GGML_ASSERT(n%QK_K == 0); GGML_ASSERT(nrc_x%8 == 0); @@ -5218,6 +5269,7 @@ bool iqk_convert_iqk_quants_q80_r8(int type, int n, const void * vx, size_t bx, case GGML_TYPE_IQ2_KL : iqk_convert_iq2_kl_q8_k_r8(n, vx, bx, vy, nrc_x); break; case GGML_TYPE_IQ3_KS : iqk_convert_iq3_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break; case GGML_TYPE_IQ3_K : iqk_convert_iq3_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break; + case GGML_TYPE_IQ4_KSS: iqk_convert_iq4_kss_q8_k_r8(n, vx, bx, vy, nrc_x); break; case GGML_TYPE_IQ4_KS : iqk_convert_iq4_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break; case GGML_TYPE_IQ4_K : iqk_convert_iq4_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break; case GGML_TYPE_IQ5_KS : iqk_convert_iq5_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break; diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp index fc3dbac3..6b972273 100644 --- a/ggml/src/iqk/iqk_mul_mat.cpp +++ b/ggml/src/iqk/iqk_mul_mat.cpp @@ -302,6 +302,7 @@ struct MulMat { case GGML_TYPE_IQ2_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_IQ2_KL : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_IQ3_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; + case GGML_TYPE_IQ4_KSS: return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_IQ4_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_IQ5_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_IQ2_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;