diff --git a/ggml/src/iqk/iqk_gemm_iquants.cpp b/ggml/src/iqk/iqk_gemm_iquants.cpp index 44e73f9a..e0b2b65f 100644 --- a/ggml/src/iqk/iqk_gemm_iquants.cpp +++ b/ggml/src/iqk/iqk_gemm_iquants.cpp @@ -3348,20 +3348,104 @@ static void mul_mat_iq3_s_r4_q8_k(int n, const void * vx, size_t bx, const DataI } } +inline float convert_to_q8_k_r8(float d0, const int8x16x2_t * qx, const int8_t * scales, uint32_t * block, uint32_t * q8_k) { + auto max_i16 = vdupq_n_u16(0); + int16x8x4_t q[8]; + for (int ib32 = 0; ib32 < 8; ++ib32) { + auto scale_l = vdup_n_s8(scales[2*ib32+0]); + auto scale_h = vdup_n_s8(scales[2*ib32+1]); + q[ib32].val[0] = vmull_s8(scale_l, vget_low_s8 (qx[ib32].val[0])); + q[ib32].val[1] = vmull_s8(scale_l, vget_high_s8(qx[ib32].val[0])); + q[ib32].val[2] = vmull_s8(scale_h, vget_low_s8 (qx[ib32].val[1])); + q[ib32].val[3] = vmull_s8(scale_h, vget_high_s8(qx[ib32].val[1])); + max_i16 = vmaxq_u16(max_i16, vmaxq_u16(vabsq_s16(q[ib32].val[0]), vabsq_s16(q[ib32].val[1]))); + max_i16 = vmaxq_u16(max_i16, vmaxq_u16(vabsq_s16(q[ib32].val[2]), vabsq_s16(q[ib32].val[3]))); + } + uint16_t imax = vmaxvq_u16(max_i16); + if (!imax) { + for (int ib32 = 0; ib32 < 8; ++ib32) for (int l = 0; l < 8; ++l) q8_k[64*ib32 + 8*l] = 0; + return 0.f; + } + float dnew = float(imax) * d0; + //auto max_u32 = vmaxq_u32(vmovl_u16(vget_low_u16(max_i16)), vmovl_u16(vget_high_u16(max_i16))); + //auto max_f32 = vcvtq_f32_u32(max_u32); + //auto dnew = vmaxvq_f32(max_f32) * d0; + bool needs_scaling = true; + if (dnew <= 1.f) { + dnew = 1.f; needs_scaling = false; + } + auto scale = vdupq_n_f32(1/dnew); + for (int ib32 = 0; ib32 < 8; ++ib32) { + if (needs_scaling) { + for (int l = 0; l < 4; ++l) { + auto i1 = vcvtnq_s32_f32(vmulq_f32(scale, vcvtq_f32_s32(vmovl_s16(vget_low_s16 (q[ib32].val[l]))))); + auto i2 = vcvtnq_s32_f32(vmulq_f32(scale, vcvtq_f32_s32(vmovl_s16(vget_high_s16(q[ib32].val[l]))))); + q[ib32].val[l] = vcombine_s16(vmovn_s32(i1), vmovn_s32(i2)); + } + } + for (int l = 0; l < 2; ++l) { + auto s8 = vcombine_s8(vmovn_s16(q[ib32].val[2*l+0]), vmovn_s16(q[ib32].val[2*l+1])); + vst1q_s8((int8_t *)block + 16*l, s8); + } + auto qb = q8_k + 64*ib32; + for (int l = 0; l < 8; ++l) { + qb[8*l] = block[l]; + } + } + return dnew; +} + +void iqk_convert_iq2_xxs_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) { + GGML_ASSERT(n%QK_K == 0); + GGML_ASSERT(nrc_x%8 == 0); + + int nb = n/QK_K; + + const block_iq2_xxs * x8[8]; + + block_q8_k_r8 * y = (block_q8_k_r8 *)vy; + + int8_t ls[16]; + uint32_t block[8]; + uint32_t aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + int8x16x2_t xv[8]; + + for (int ix = 0; ix < nrc_x; ix += 8) { + for (int k = 0; k < 8; ++k) x8[k] = (const block_iq2_xxs *)((const char *)vx + (ix + k)*bx); + for (int i = 0; i < nb; ++i) { + for (int k = 0; k < 8; ++k) { + float d = 0.125f * GGML_FP16_TO_FP32(x8[k][i].d); + for (int ib32 = 0; ib32 < 8; ++ib32) { + std::memcpy(aux32, x8[k][i].qs + 4*ib32, 2*sizeof(uint32_t)); + ls[2*ib32+0] = ls[2*ib32+1] = (2*(aux32[1] >> 28) + 1); + xv[ib32].val[0] = vreinterpretq_s8_u64(uint64x2_t{iq2xxs_grid[aux8[0]], iq2xxs_grid[aux8[1]]}); + xv[ib32].val[1] = vreinterpretq_s8_u64(uint64x2_t{iq2xxs_grid[aux8[2]], iq2xxs_grid[aux8[3]]}); + apply_signs_2((uint8x16_t *)xv[ib32].val, keven_signs, aux32[1]); + } + float dnew = convert_to_q8_k_r8(1.f/124, xv, ls, block, (uint32_t *)y[i].qs + k); + y[i].d[k] = GGML_FP32_TO_FP16(d*dnew); + } + } + y += nb; + } +} + + } bool iqk_convert_iquants_q80_r8([[maybe_unused]] int type, int n, [[maybe_unused]] const void * vx, [[maybe_unused]] size_t bx, [[maybe_unused]] void * vy, int nrc_x) { if (n%QK_K != 0 || nrc_x%8 != 0) return false; - return false; - //switch (ggml_type(type)) { - // case GGML_TYPE_IQ2_XXS: iqk_convert_iq2_xxs_q8_k_r8(n, vx, bx, vy, nrc_x); break; + switch (ggml_type(type)) { + case GGML_TYPE_IQ2_XXS: iqk_convert_iq2_xxs_q8_k_r8(n, vx, bx, vy, nrc_x); break; // case GGML_TYPE_IQ2_XS : iqk_convert_iq2_xs_q8_k_r8 (n, vx, bx, vy, nrc_x); break; // case GGML_TYPE_IQ2_S : iqk_convert_iq2_s_q8_k_r8 (n, vx, bx, vy, nrc_x); break; // case GGML_TYPE_IQ3_XXS: iqk_convert_iq3_xxs_q8_k_r8(n, vx, bx, vy, nrc_x); break; // case GGML_TYPE_IQ3_S : iqk_convert_iq3_s_q8_k_r8 (n, vx, bx, vy, nrc_x); break; - // default: return false; - //} - //return true; + default: return false; + } + return true; } bool iqk_set_kernels_iquants(int ne00, int typeA, int typeB, std::array& kernels, mul_mat_t& func16) { diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp index 4d7083e2..cb9c2bf6 100644 --- a/ggml/src/iqk/iqk_mul_mat.cpp +++ b/ggml/src/iqk/iqk_mul_mat.cpp @@ -271,6 +271,7 @@ struct MulMat { } #else switch (type) { + case GGML_TYPE_IQ2_XXS: return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_Q4_0 : return nrc_y >= 32 ? GGML_TYPE_Q8_0_R8 : type; case GGML_TYPE_Q4_1 : return nrc_y >= 32 ? GGML_TYPE_Q8_1 : type; case GGML_TYPE_Q5_0 : return nrc_y >= 32 ? GGML_TYPE_Q8_0_R8 : type;