diff --git a/ggml/src/iqk/iqk_gemm_kquants.cpp b/ggml/src/iqk/iqk_gemm_kquants.cpp index db732383..05baea14 100644 --- a/ggml/src/iqk/iqk_gemm_kquants.cpp +++ b/ggml/src/iqk/iqk_gemm_kquants.cpp @@ -2064,14 +2064,21 @@ typedef struct { } block_q8_1_r8; void iqk_convert_q2_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) { +#ifdef HAVE_FANCY_SIMD + constexpr int k_nr = 16; + using block_q8_k_r = block_q8_k_r16; +#else + constexpr int k_nr = 8; + using block_q8_k_r = block_q8_k_r8; +#endif GGML_ASSERT(n%QK_K == 0); - GGML_ASSERT(nrc_x%8 == 0); + GGML_ASSERT(nrc_x%k_nr == 0); int nb = n/QK_K; - const block_q2_K * x8[8]; + const block_q2_K * x8[k_nr]; - block_q8_k_r8 * y = (block_q8_k_r8 *)vy; + block_q8_k_r * y = (block_q8_k_r *)vy; float f_values[QK_K]; uint32_t block[8]; @@ -2082,10 +2089,10 @@ void iqk_convert_q2_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int auto sign_bit = _mm256_set1_ps(-0.0f); auto perm = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7); - for (int ix = 0; ix < nrc_x; ix += 8) { - for (int k = 0; k < 8; ++k) x8[k] = (const block_q2_K *)((const char *)vx + (ix + k)*bx); + for (int ix = 0; ix < nrc_x; ix += k_nr) { + for (int k = 0; k < k_nr; ++k) x8[k] = (const block_q2_K *)((const char *)vx + (ix + k)*bx); for (int i = 0; i < nb; ++i) { - for (int k = 0; k < 8; ++k) { + for (int k = 0; k < k_nr; ++k) { auto vd = _mm256_set1_ps(GGML_FP16_TO_FP32(x8[k][i].d)); auto vm = _mm256_mul_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x8[k][i].dmin)), _mm256_set1_ps(-1.f)); auto block_max = _mm256_setzero_ps(); @@ -2136,13 +2143,18 @@ void iqk_convert_q2_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int i0 = _mm256_permutevar8x32_epi32(i0, perm); _mm256_storeu_si256((__m256i *)block, i0); - auto q8 = (uint32_t *)y[i].qs + 64*ib32; - for (int l = 0; l < 4; ++l) { - q8[8*l + k + 0] = block[l + 0]; - q8[8*l + k + 32] = block[l + 4]; + auto q8 = (uint32_t *)y[i].qs + 8*k_nr*ib32; + for (int l = 0; l < 8; ++l) { + q8[k_nr*l + k] = block[l]; } } } +#ifdef HAVE_FANCY_SIMD + for (int l = 0; l < 64; ++l) { + auto v = _mm512_xor_si512(_mm512_loadu_si512((const __m512i *)y[i].qs + l), _mm512_set1_epi8(-128)); + _mm512_storeu_si512((__m512i *)y[i].qs + l, v); + } +#endif } y += nb; } @@ -2458,14 +2470,21 @@ void iqk_convert_q3_k_q8_0_r8(int n, const void * vx, size_t bx, void * vy, int } void iqk_convert_q3_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) { +#ifdef HAVE_FANCY_SIMD + constexpr int k_nr = 16; + using block_q8_k_r = block_q8_k_r16; +#else + constexpr int k_nr = 8; + using block_q8_k_r = block_q8_k_r8; +#endif GGML_ASSERT(n%QK_K == 0); - GGML_ASSERT(nrc_x%8 == 0); + GGML_ASSERT(nrc_x%k_nr == 0); int nb = n/QK_K; - const block_q3_K * x8[8]; + const block_q3_K * x8[k_nr]; - block_q8_k_r8 * y = (block_q8_k_r8 *)vy; + block_q8_k_r * y = (block_q8_k_r *)vy; uint32_t block[8]; __m256i values[8]; @@ -2476,10 +2495,10 @@ void iqk_convert_q3_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int union { __m256i vec; int16_t val[16]; } helper; - for (int ix = 0; ix < nrc_x; ix += 8) { - for (int k = 0; k < 8; ++k) x8[k] = (const block_q3_K *)((const char *)vx + (ix + k)*bx); + for (int ix = 0; ix < nrc_x; ix += k_nr) { + for (int k = 0; k < k_nr; ++k) x8[k] = (const block_q3_K *)((const char *)vx + (ix + k)*bx); for (int i = 0; i < nb; ++i) { - for (int k = 0; k < 8; ++k) { + for (int k = 0; k < k_nr; ++k) { float d = GGML_FP16_TO_FP32(x8[k][i].d); auto hbits = _mm256_loadu_si256((const __m256i *)x8[k][i].hmask); helper.vec = _mm256_cvtepi8_epi16(sc3.make_scales((const uint16_t *)x8[k][i].scales)); @@ -2549,12 +2568,18 @@ void iqk_convert_q3_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int _mm_storeu_si128((__m128i *)block+0, _mm_unpacklo_epi64(i0_l, i0_h)); _mm_storeu_si128((__m128i *)block+1, _mm_unpackhi_epi64(i0_l, i0_h)); } - auto qs = (uint32_t *)y[i].qs + 64*ib32; + auto qs = (uint32_t *)y[i].qs + 8*k_nr*ib32; for (int l = 0; l < 8; ++l) { - qs[8*l + k] = block[l]; + qs[k_nr*l + k] = block[l]; } } } +#ifdef HAVE_FANCY_SIMD + for (int l = 0; l < 64; ++l) { + auto v = _mm512_xor_si512(_mm512_loadu_si512((const __m512i *)y[i].qs + l), _mm512_set1_epi8(-128)); + _mm512_storeu_si512((__m512i *)y[i].qs + l, v); + } +#endif } y += nb; }