mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-05-02 20:31:45 +00:00
q8_0_r4: Zen4 matrix-vector specialization
This commit is contained in:
@@ -2290,6 +2290,40 @@ static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
|
||||
Q8<nrc_y, block_q8_1_x4> q8(info);
|
||||
int nb = n / QK8_0;
|
||||
GGML_ASSERT(nb%4 == 0);
|
||||
if constexpr (nrc_y == 1) {
|
||||
auto m127 = _mm256_set1_epi8(127);
|
||||
auto m1 = _mm256_set1_epi16(1);
|
||||
__m256 acc[nrc_y] = {};
|
||||
for (int ix = 0; ix < nrc_x; ix += 4) {
|
||||
const block_q8_0_x4 * iq8 = (const block_q8_0_x4 *)((const char *)vx + ix*bx);
|
||||
for (int ib4 = 0; ib4 < nb/4; ++ib4) {
|
||||
for (int k = 0; k < 4; ++k) {
|
||||
auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq8[4*ib4+k].d));
|
||||
auto scales = _mm256_set_m128(scales128, scales128);
|
||||
auto scales_m = _mm256_mul_ps(scales, _mm256_set1_ps(-63.5f));
|
||||
auto q1 = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+0), m127);
|
||||
auto q2 = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+1), m127);
|
||||
auto q3 = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+2), m127);
|
||||
auto q4 = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+3), m127);
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto y = _mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k);
|
||||
auto sumi1 = _mm256_add_epi32(_mm256_madd_epi16(m1, _mm256_maddubs_epi16(q1, _mm256_shuffle_epi32(y, 0x00))),
|
||||
_mm256_madd_epi16(m1, _mm256_maddubs_epi16(q2, _mm256_shuffle_epi32(y, 0x55))));
|
||||
auto sumi2 = _mm256_add_epi32(_mm256_madd_epi16(m1, _mm256_maddubs_epi16(q3, _mm256_shuffle_epi32(y, 0xaa))),
|
||||
_mm256_madd_epi16(m1, _mm256_maddubs_epi16(q4, _mm256_shuffle_epi32(y, 0xff))));
|
||||
auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k])));
|
||||
acc[iy] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), acc[iy]);
|
||||
acc[iy] = _mm256_fmadd_ps(scales_m, _mm256_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k+4])), acc[iy]);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
|
||||
info.store(ix, iy, sum);
|
||||
acc[iy] = _mm256_setzero_ps();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
__m512 acc[2*nrc_y] = {};
|
||||
__m512i qx[4];
|
||||
auto m127 = _mm512_set1_epi8(127);
|
||||
@@ -2340,6 +2374,7 @@ static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
template <int nrc_y>
|
||||
static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
|
||||
|
||||
Reference in New Issue
Block a user