diff --git a/ggml/src/iqk/iqk_gemm_1bit.cpp b/ggml/src/iqk/iqk_gemm_1bit.cpp index d6d2a3c9..46f1820c 100644 --- a/ggml/src/iqk/iqk_gemm_1bit.cpp +++ b/ggml/src/iqk/iqk_gemm_1bit.cpp @@ -1706,6 +1706,56 @@ void iqk_convert_iq1_s_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int } } +void iqk_convert_iq1_m_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) { + GGML_ASSERT(n%QK_K == 0); + GGML_ASSERT(nrc_x%8 == 0); + + int nb = n/QK_K; + + const block_iq1_m * x8[8]; + + block_q8_k_r8 * y = (block_q8_k_r8 *)vy; + + int16_t ls[16]; + + uint32_t block[8]; + + __m256i qx[8]; + + for (int ix = 0; ix < nrc_x; ix += 8) { + for (int k = 0; k < 8; ++k) x8[k] = (const block_iq1_m *)((const char *)vx + (ix + k)*bx); + for (int i = 0; i < nb; ++i) { + for (int k = 0; k < 8; ++k) { + const uint16_t * sc = (const uint16_t *)x8[k][i].scales; + iq1m_scale_t scale; + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + float d = 0.125f * GGML_FP16_TO_FP32(scale.f16); + auto qs = x8[k][i].qs; + auto qh = x8[k][i].qh; + __m256i value; + for (int ib32 = 0; ib32 < 8; ++ib32) { + ls[2*ib32 + 0] = (2*((sc[ib32/2] >> (6*(ib32%2)+0)) & 0x7) + 1); + ls[2*ib32 + 1] = (2*((sc[ib32/2] >> (6*(ib32%2)+3)) & 0x7) + 1); + value = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | ((qh[1] << 8) & 0x700)], + iq1s_grid[qs[1] | ((qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | ((qh[0] << 8) & 0x700)]); + value = _mm256_slli_epi16(_mm256_add_epi8(value, _mm256_set1_epi8(1)), 3); + int64_t delta1 = qh[0] & 0x08 ? 0x0909090909090909 : 0x0707070707070707; + int64_t delta2 = qh[0] & 0x80 ? 0x0909090909090909 : 0x0707070707070707; + int64_t delta3 = qh[1] & 0x08 ? 0x0909090909090909 : 0x0707070707070707; + int64_t delta4 = qh[1] & 0x80 ? 0x0909090909090909 : 0x0707070707070707; + value = _mm256_sub_epi8(value, _mm256_set_epi64x(delta4, delta3, delta2, delta1)); + qx[ib32] = value; + qs += 4; + qh += 2; + } + float dnew = convert_to_q8_k_r8(k, 126, qx, ls, block, y[i].qs); + y[i].d[k] = GGML_FP32_TO_FP16(d*dnew); + } + } + y += nb; + } +} + void iqk_convert_iq1_s_q8_0_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) { GGML_ASSERT(n%QK_K == 0); GGML_ASSERT(nrc_x%8 == 0); @@ -1822,6 +1872,7 @@ bool iqk_convert_1bit_q80_r8(int type, int n, const void * vx, size_t bx, void * if (n%QK_K != 0 || nrc_x%8 != 0) return false; switch (ggml_type(type)) { case GGML_TYPE_IQ1_S: iqk_convert_iq1_s_q8_k_r8(n, vx, bx, vy, nrc_x); break; + case GGML_TYPE_IQ1_M: iqk_convert_iq1_m_q8_k_r8(n, vx, bx, vy, nrc_x); break; default: return false; } return true; diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp index 4b24f7ec..0b29a572 100644 --- a/ggml/src/iqk/iqk_mul_mat.cpp +++ b/ggml/src/iqk/iqk_mul_mat.cpp @@ -245,6 +245,7 @@ struct MulMat { case GGML_TYPE_IQ3_XXS: return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_IQ3_S : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_IQ1_S : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; + case GGML_TYPE_IQ1_M : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_Q3_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type; case GGML_TYPE_Q4_K : return nrc_y >= 32 ? GGML_TYPE_Q8_1 : type; case GGML_TYPE_Q5_K : return nrc_y >= 32 ? GGML_TYPE_Q8_1 : type; @@ -408,6 +409,7 @@ bool iqk_convert_repack(int typeA, int n, const void * vx, size_t bx, void * vy, //case GGML_TYPE_IQ4_NL_R4: // return iqk_set_kernels_legacy_quants(ne00, typeA, typeB, mm.funcs, mm.func16); case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: //case GGML_TYPE_IQ1_S_R4: //case GGML_TYPE_IQ1_M_R4: //case GGML_TYPE_IQ1_BN: