From 5a279b37bac68e2c5e004ceb2bd931741aedbbac Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Thu, 30 Jan 2025 15:29:22 +0200 Subject: [PATCH] Make iq4_nl_r4 work with row size that are not a multiple of 128 ... on AVX2 --- ggml/src/iqk/iqk_mul_mat.cpp | 64 +++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp index ede02dad..74ec6364 100644 --- a/ggml/src/iqk/iqk_mul_mat.cpp +++ b/ggml/src/iqk/iqk_mul_mat.cpp @@ -2549,37 +2549,57 @@ static void mul_mat_iq4_nl_r4_q8_1(int n, const void * vx, size_t bx, const Data auto values128 = _mm_loadu_si128((const __m128i *)iq4k_values); auto values = MM256_SET_M128I(values128, values128); int nb = n / QK4_NL; - GGML_ASSERT(nb%4 == 0); __m256 acc[nrc_y] = {}; - //__m256 acc[2*nrc_y] = {}; + __m256i qs[4]; + float d8[4*nrc_y]; + auto prepare = [&qs, &values, &m4] (const block_iq4_nl_r4& iq4) { + auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4.d)); + auto scales = _mm256_set_m128(scales128, scales128); + auto bits1 = _mm256_loadu_si256((const __m256i *)iq4.qs+0); + auto bits2 = _mm256_loadu_si256((const __m256i *)iq4.qs+1); + qs[0] = _mm256_shuffle_epi8(values, _mm256_and_si256(bits1, m4)); + qs[1] = _mm256_shuffle_epi8(values, _mm256_and_si256(bits2, m4)); + qs[2] = _mm256_shuffle_epi8(values, _mm256_and_si256(_mm256_srli_epi16(bits1, 4), m4)); + qs[3] = _mm256_shuffle_epi8(values, _mm256_and_si256(_mm256_srli_epi16(bits2, 4), m4)); + return scales; + }; + auto dot = [&qs, &m1] (__m256i y) { + auto u1 = _mm256_sign_epi8(qs[0], qs[0]); + auto u2 = _mm256_sign_epi8(qs[1], qs[1]); + auto sumi1 = _mm256_add_epi32( + _mm256_madd_epi16(m1, _mm256_maddubs_epi16(u1, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0x00), qs[0]))), + _mm256_madd_epi16(m1, _mm256_maddubs_epi16(u2, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0x55), qs[1])))); + u1 = _mm256_sign_epi8(qs[2], qs[2]); + u2 = _mm256_sign_epi8(qs[3], qs[3]); + auto sumi2 = _mm256_add_epi32( + _mm256_madd_epi16(m1, _mm256_maddubs_epi16(u1, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0xaa), qs[2]))), + _mm256_madd_epi16(m1, _mm256_maddubs_epi16(u2, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0xff), qs[3])))); + return _mm256_add_epi32(sumi1, sumi2); + }; for (int ix = 0; ix < nrc_x; ix += 4) { const block_iq4_nl_r4 * iq4 = (const block_iq4_nl_r4 *)((const char *)vx + ix*bx); for (int ib4 = 0; ib4 < nb/4; ++ib4) { + for (int iy = 0; iy < nrc_y; ++iy) { + _mm_storeu_ps(d8+4*iy, _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)q8.y[iy][ib4].d))); + } for (int k = 0; k < 4; ++k) { - auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4[4*ib4+k].d)); - auto scales = _mm256_set_m128(scales128, scales128); - auto bits1 = _mm256_loadu_si256((const __m256i *)iq4[4*ib4+k].qs+0); - auto bits2 = _mm256_loadu_si256((const __m256i *)iq4[4*ib4+k].qs+1); - auto q1 = _mm256_shuffle_epi8(values, _mm256_and_si256(bits1, m4)); - auto q2 = _mm256_shuffle_epi8(values, _mm256_and_si256(bits2, m4)); - auto q3 = _mm256_shuffle_epi8(values, _mm256_and_si256(_mm256_srli_epi16(bits1, 4), m4)); - auto q4 = _mm256_shuffle_epi8(values, _mm256_and_si256(_mm256_srli_epi16(bits2, 4), m4)); - auto s1 = _mm256_sign_epi8(q1, q1); - auto s2 = _mm256_sign_epi8(q2, q2); - auto s3 = _mm256_sign_epi8(q3, q3); - auto s4 = _mm256_sign_epi8(q4, q4); - + auto scales = prepare(iq4[4*ib4+k]); for (int iy = 0; iy < nrc_y; ++iy) { - auto y = _mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k); - auto sumi1 = _mm256_add_epi32(_mm256_madd_epi16(m1, _mm256_maddubs_epi16(s1, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0x00), q1))), - _mm256_madd_epi16(m1, _mm256_maddubs_epi16(s2, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0x55), q2)))); - auto sumi2 = _mm256_add_epi32(_mm256_madd_epi16(m1, _mm256_maddubs_epi16(s3, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0xaa), q3))), - _mm256_madd_epi16(m1, _mm256_maddubs_epi16(s4, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0xff), q4)))); - auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k]))); - acc[iy] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), acc[iy]); + auto sumi = dot(_mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k)); + auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(d8[4*iy+k])); + acc[iy] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(sumi), acc[iy]); } } } + for (int ib = 4*(nb/4); ib < nb; ++ib) { + auto scales = prepare(iq4[ib]); + for (int iy = 0; iy < nrc_y; ++iy) { + auto qy = (const block_q8_1 *)q8.y[iy]; + auto sumi = dot(_mm256_loadu_si256((const __m256i*)qy[ib].qs)); + auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(GGML_FP16_TO_FP32(qy[ib].d))); + acc[iy] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(sumi), acc[iy]); + } + } for (int iy = 0; iy < nrc_y; ++iy) { auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1)); info.store(ix, iy, sum);