q5_k_r4: slightly faster Zen4

248.3 t/s -> 256.7 t/s
This commit is contained in:
Iwan Kawrakow
2024-12-13 15:12:35 +02:00
parent ea6c02a79d
commit 0bb4576b74

View File

@@ -3355,7 +3355,7 @@ static void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn
using helper_t = union { __m512i vec; uint32_t val[16]; };
helper_t hd, hm;
__m512 acc[nrc_y] = {};
__m512 d4s[nrc_y];
__m512i isum[nrc_y] = {};
__m512i qx[4];
for (int ix = 0; ix < nrc_x; ix += 8) {
const block_q5_k_r4 * iq5l = (const block_q5_k_r4 *)((const char *)vx + (ix+0)*bx);
@@ -3368,9 +3368,6 @@ static void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn
auto dh = _mm256_castps256_ps128(d2);
auto mh = _mm256_extractf128_ps(d2, 1);
auto d4 = _mm512_insertf32x8(_mm512_castps256_ps512(_mm256_set_m128(dl, dl)), _mm256_set_m128(dh, dh), 1);
for (int iy = 0; iy < nrc_y; ++iy) {
d4s[iy] = _mm512_mul_ps(d4, _mm512_set1_ps(q8.scale(iy, ibl)));
}
auto m4 = _mm512_insertf32x8(_mm512_castps256_ps512(_mm256_set_m128(ml, ml)), _mm256_set_m128(mh, mh), 1);
m4 = _mm512_mul_ps(m4, _mm512_set1_ps(-0.5f));
auto slbits_l = _mm256_loadu_si256((const __m256i *)iq5l[ibl].scales_l);
@@ -3391,11 +3388,10 @@ static void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn
auto scales1 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hd.val[ib+0]));
auto scales2 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hd.val[ib+8]));
auto iscales = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1);
auto scales = _mm512_cvtepi32_ps(iscales);
scales1 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hm.val[ib+0]));
scales2 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hm.val[ib+8]));
iscales = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1);
auto scales_m = _mm512_mul_ps(m4, _mm512_cvtepi32_ps(iscales));
auto iscales_m = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1);
auto scales_m = _mm512_mul_ps(m4, _mm512_cvtepi32_ps(iscales_m));
auto lbits1 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq5l[ibl].qs+2*ib+0)),
_mm256_loadu_si256((const __m256i *)iq5h[ibl].qs+2*ib+0), 1);
auto lbits2 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq5l[ibl].qs+2*ib+1)),
@@ -3417,11 +3413,15 @@ static void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn
sumi = _mm512_dpbusd_epi32(sumi, qx[1], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x55)));
sumi = _mm512_dpbusd_epi32(sumi, qx[2], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xaa)));
sumi = _mm512_dpbusd_epi32(sumi, qx[3], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xff)));
acc[iy] = _mm512_fmadd_ps(_mm512_mul_ps(scales, d4s[iy]), _mm512_cvtepi32_ps(sumi), acc[iy]);
isum[iy] = _mm512_add_epi32(isum[iy], _mm512_mullo_epi32(iscales, sumi));
float m8 = ((const float *)q8.y[iy][ibl].bsums)[ib];
acc[iy] = _mm512_fmadd_ps(scales_m, _mm512_set1_ps(m8), acc[iy]);
}
}
for (int iy = 0; iy < nrc_y; ++iy) {
acc[iy] = _mm512_fmadd_ps(_mm512_mul_ps(d4, _mm512_set1_ps(q8.scale(iy, ibl))), _mm512_cvtepi32_ps(isum[iy]), acc[iy]);
isum[iy] = _mm512_setzero_si512();
}
}
for (int iy = 0; iy < nrc_y; ++iy) {
auto sum1 = _mm_add_ps(_mm512_extractf32x4_ps(acc[iy], 0), _mm512_extractf32x4_ps(acc[iy], 1));