mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-10 16:30:12 +00:00
Better ARM_NEON implementation for R4 quants (#135)
* q6_k_r4: Better ARM implementation PP-512(LLaMA-3.1-8B) is now 104.2 t/s up from 83.2 t/s. I.e., q6_k_r4 now beats q6_0_r4. * q5_k_r4: Better ARM implementation PP-512(LLaMA-3.1-8B) is now 107.8 t/s up from 96.9 t/s. I.e., q5_k_r4 now beats q5_0_r4. * q4_k_r4: Better ARM implementation PP-512(LLaMA-3.1-8B) is now 122.1 t/s up from 110 t/s. I.e., q4_k_r4 is now (nearly) on par with q4_0_r4. * iq4_xs_r4: Better ARM implementation PP-512(LLaMA-3.1-8B) is now 131.3 t/s up from 115.8 t/s. iq4_xs_r4 is now the prompt processing champion on ARM. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -8309,42 +8309,38 @@ void mul_mat_iq4_xs_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& i
|
||||
int nbl = n / QK_K;
|
||||
int8x16_t qx[8];
|
||||
int8x16x2_t iscales;
|
||||
float32x4x4_t scales;
|
||||
int32x4x4_t scales;
|
||||
float32x4_t acc[nrc_y] = {};
|
||||
for (int ix = 0; ix < nrc_x; ix += 4) {
|
||||
const block_iq4_xs_r4 * iq4 = (const block_iq4_xs_r4 *)((const char *)vx + ix*bx);
|
||||
for (int ibl = 0; ibl < nbl; ++ibl) {
|
||||
auto d4 = vcvt_f32_f16(vld1_f16((const float16_t *)iq4[ibl].d));
|
||||
if constexpr (nrc_y == 1) {
|
||||
d4 = vmulq_f32(d4, vdupq_n_f32(q8.scale(0, ibl)));
|
||||
}
|
||||
auto sl = vld1q_u8(iq4[ibl].scales_l);
|
||||
auto sh8 = vld1_u8(iq4[ibl].scales_h);
|
||||
auto sh = vcombine_u8(sh8, vshr_n_u8(sh8, 2));
|
||||
iscales.val[0] = vaddq_s8(vorrq_u8(vandq_u8(sl, m4), vandq_u8(vshlq_n_u8(sh, 4), m3)), m32);
|
||||
iscales.val[1] = vaddq_s8(vorrq_u8(vshrq_n_u8(sl, 4), vandq_u8(sh, m3)), m32);
|
||||
int32x4_t isum[nrc_y] = {};
|
||||
for (int is = 0; is < 2; ++is) {
|
||||
auto iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is]));
|
||||
auto iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is]));
|
||||
scales.val[0] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_1))));
|
||||
scales.val[1] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_1))));
|
||||
scales.val[2] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_2))));
|
||||
scales.val[3] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_2))));
|
||||
scales.val[0] = vmovl_s16(vget_low_s16(iscales16_1));
|
||||
scales.val[1] = vmovl_s16(vget_high_s16(iscales16_1));
|
||||
scales.val[2] = vmovl_s16(vget_low_s16(iscales16_2));
|
||||
scales.val[3] = vmovl_s16(vget_high_s16(iscales16_2));
|
||||
for (int ib = 0; ib < 4; ++ib) {
|
||||
auto bits = vld1q_u8_x4(iq4[ibl].qs + 256*is + 64*ib);
|
||||
prepare_iq4_nl_quants(values, m4, bits, qx);
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto y = vld1q_s8_x2(q8.y[iy][ibl].qs+128*is+32*ib);
|
||||
auto sumi = interleaved_dotq(qx, y);
|
||||
if constexpr (nrc_y == 1) {
|
||||
acc[iy] = vfmaq_f32(acc[iy], scales.val[ib], vcvtq_f32_s32(sumi));
|
||||
} else {
|
||||
auto d4d8 = vmulq_f32(scales.val[ib], vdupq_n_f32(q8.scale(iy, ibl)));
|
||||
acc[iy] = vfmaq_f32(acc[iy], d4d8, vcvtq_f32_s32(sumi));
|
||||
}
|
||||
isum[iy] = vmlaq_s32(isum[iy], scales.val[ib], sumi);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
acc[iy] = vfmaq_f32(acc[iy], vmulq_f32(d4, vdupq_n_f32(q8.scale(iy, ibl))), vcvtq_f32_s32(isum[iy]));
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
info.store(ix, iy, acc[iy]);
|
||||
@@ -8439,8 +8435,8 @@ void mul_mat_q4_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& inf
|
||||
auto m3 = vdupq_n_u8(0x30);
|
||||
int nbl = n / QK_K;
|
||||
int8x16_t qx[8];
|
||||
int8x16x4_t iscales;
|
||||
float32x4x4_t scales;
|
||||
int8x16x2_t iscales;
|
||||
int32x4x4_t scales;
|
||||
float32x4_t acc[nrc_y] = {};
|
||||
for (int ix = 0; ix < nrc_x; ix += 4) {
|
||||
const block_q4_k_r4 * iq4 = (const block_q4_k_r4 *)((const char *)vx + ix*bx);
|
||||
@@ -8448,50 +8444,49 @@ void mul_mat_q4_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& inf
|
||||
auto d4 = vcvt_f32_f16(vld1_f16((const float16_t *)iq4[ibl].d));
|
||||
auto m4 = vcvt_f32_f16(vld1_f16((const float16_t *)iq4[ibl].d+4));
|
||||
m4 = vmulq_f32(m4, vdupq_n_f32(-1.f));
|
||||
if constexpr (nrc_y == 1) {
|
||||
d4 = vmulq_f32(d4, vdupq_n_f32(q8.scale(0, ibl)));
|
||||
}
|
||||
auto sl = vld1q_u8_x2(iq4[ibl].scales_l);
|
||||
auto sh = vld1q_u8(iq4[ibl].scales_h);
|
||||
iscales.val[0] = vorrq_u8(vandq_u8(sl.val[0], mf), vandq_u8(vshlq_n_u8(sh, 4), m3));
|
||||
iscales.val[1] = vorrq_u8(vandq_u8(sl.val[1], mf), vandq_u8(sh, m3));
|
||||
iscales.val[2] = vorrq_u8(vshrq_n_u8(sl.val[0], 4), vandq_u8(vshlq_n_u8(sh, 2), m3));
|
||||
iscales.val[3] = vorrq_u8(vshrq_n_u8(sl.val[1], 4), vandq_u8(vshrq_n_u8(sh, 2), m3));
|
||||
iscales.val[0] = vorrq_u8(vshrq_n_u8(sl.val[0], 4), vandq_u8(vshlq_n_u8(sh, 2), m3));
|
||||
iscales.val[1] = vorrq_u8(vshrq_n_u8(sl.val[1], 4), vandq_u8(vshrq_n_u8(sh, 2), m3));
|
||||
for (int is = 0; is < 2; ++is) {
|
||||
auto iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is+2]));
|
||||
auto iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is+2]));
|
||||
scales.val[0] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_1))));
|
||||
scales.val[1] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_1))));
|
||||
scales.val[2] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_2))));
|
||||
scales.val[3] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_2))));
|
||||
auto iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is]));
|
||||
auto iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is]));
|
||||
float32x4x4_t fscales;
|
||||
fscales.val[0] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_1))));
|
||||
fscales.val[1] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_1))));
|
||||
fscales.val[2] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_2))));
|
||||
fscales.val[3] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_2))));
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto m8 = vld1q_f32((const float *)q8.y[iy][ibl].bsums + 4*is);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], scales.val[0], m8, 0);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], scales.val[1], m8, 1);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], scales.val[2], m8, 2);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], scales.val[3], m8, 3);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], fscales.val[0], m8, 0);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], fscales.val[1], m8, 1);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], fscales.val[2], m8, 2);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], fscales.val[3], m8, 3);
|
||||
}
|
||||
iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is]));
|
||||
iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is]));
|
||||
scales.val[0] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_1))));
|
||||
scales.val[1] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_1))));
|
||||
scales.val[2] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_2))));
|
||||
scales.val[3] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_2))));
|
||||
}
|
||||
iscales.val[0] = vorrq_u8(vandq_u8(sl.val[0], mf), vandq_u8(vshlq_n_u8(sh, 4), m3));
|
||||
iscales.val[1] = vorrq_u8(vandq_u8(sl.val[1], mf), vandq_u8(sh, m3));
|
||||
int32x4_t isum[nrc_y] = {};
|
||||
for (int is = 0; is < 2; ++is) {
|
||||
auto iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is]));
|
||||
auto iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is]));
|
||||
scales.val[0] = vmovl_s16(vget_low_s16(iscales16_1));
|
||||
scales.val[1] = vmovl_s16(vget_high_s16(iscales16_1));
|
||||
scales.val[2] = vmovl_s16(vget_low_s16(iscales16_2));
|
||||
scales.val[3] = vmovl_s16(vget_high_s16(iscales16_2));
|
||||
for (int ib = 0; ib < 4; ++ib) {
|
||||
auto bits = vld1q_u8_x4(iq4[ibl].qs + 256*is + 64*ib);
|
||||
prepare_q4_k_quants(mf, bits, qx);
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto y = vld1q_s8_x2(q8.y[iy][ibl].qs+128*is+32*ib);
|
||||
auto sumi = interleaved_dotq(qx, y);
|
||||
if constexpr (nrc_y == 1) {
|
||||
acc[iy] = vfmaq_f32(acc[iy], scales.val[ib], vcvtq_f32_s32(sumi));
|
||||
} else {
|
||||
auto d4d8 = vmulq_f32(scales.val[ib], vdupq_n_f32(q8.scale(iy, ibl)));
|
||||
acc[iy] = vfmaq_f32(acc[iy], d4d8, vcvtq_f32_s32(sumi));
|
||||
}
|
||||
isum[iy] = vmlaq_s32(isum[iy], scales.val[ib], sumi);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
acc[iy] = vfmaq_f32(acc[iy], vmulq_f32(d4, vdupq_n_f32(q8.scale(iy, ibl))), vcvtq_f32_s32(isum[iy]));
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
info.store(ix, iy, acc[iy]);
|
||||
@@ -8509,8 +8504,8 @@ void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& inf
|
||||
auto m10 = vdupq_n_u8(0x10);
|
||||
int nbl = n / QK_K;
|
||||
int8x16_t qx[8];
|
||||
int8x16x4_t iscales;
|
||||
float32x4x4_t scales;
|
||||
int8x16x2_t iscales;
|
||||
int32x4x4_t scales;
|
||||
float32x4_t acc[nrc_y] = {};
|
||||
for (int ix = 0; ix < nrc_x; ix += 4) {
|
||||
const block_q5_k_r4 * iq5 = (const block_q5_k_r4 *)((const char *)vx + ix*bx);
|
||||
@@ -8518,35 +8513,36 @@ void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& inf
|
||||
auto d4 = vcvt_f32_f16(vld1_f16((const float16_t *)iq5[ibl].d));
|
||||
auto m4 = vcvt_f32_f16(vld1_f16((const float16_t *)iq5[ibl].d+4));
|
||||
m4 = vmulq_f32(m4, vdupq_n_f32(-1.f));
|
||||
if constexpr (nrc_y == 1) {
|
||||
d4 = vmulq_f32(d4, vdupq_n_f32(q8.scale(0, ibl)));
|
||||
}
|
||||
auto sl = vld1q_u8_x2(iq5[ibl].scales_l);
|
||||
auto sh = vld1q_u8(iq5[ibl].scales_h);
|
||||
iscales.val[0] = vorrq_u8(vandq_u8(sl.val[0], mf), vandq_u8(vshlq_n_u8(sh, 4), m30));
|
||||
iscales.val[1] = vorrq_u8(vandq_u8(sl.val[1], mf), vandq_u8(sh, m30));
|
||||
iscales.val[2] = vorrq_u8(vshrq_n_u8(sl.val[0], 4), vandq_u8(vshlq_n_u8(sh, 2), m30));
|
||||
iscales.val[3] = vorrq_u8(vshrq_n_u8(sl.val[1], 4), vandq_u8(vshrq_n_u8(sh, 2), m30));
|
||||
iscales.val[0] = vorrq_u8(vshrq_n_u8(sl.val[0], 4), vandq_u8(vshlq_n_u8(sh, 2), m30));
|
||||
iscales.val[1] = vorrq_u8(vshrq_n_u8(sl.val[1], 4), vandq_u8(vshrq_n_u8(sh, 2), m30));
|
||||
for (int is = 0; is < 2; ++is) {
|
||||
auto iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is+2]));
|
||||
auto iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is+2]));
|
||||
scales.val[0] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_1))));
|
||||
scales.val[1] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_1))));
|
||||
scales.val[2] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_2))));
|
||||
scales.val[3] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_2))));
|
||||
auto iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is]));
|
||||
auto iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is]));
|
||||
float32x4x4_t fscales;
|
||||
fscales.val[0] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_1))));
|
||||
fscales.val[1] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_1))));
|
||||
fscales.val[2] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_2))));
|
||||
fscales.val[3] = vmulq_f32(m4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_2))));
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto m8 = vld1q_f32((const float *)q8.y[iy][ibl].bsums + 4*is);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], scales.val[0], m8, 0);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], scales.val[1], m8, 1);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], scales.val[2], m8, 2);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], scales.val[3], m8, 3);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], fscales.val[0], m8, 0);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], fscales.val[1], m8, 1);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], fscales.val[2], m8, 2);
|
||||
acc[iy] = vmlaq_laneq_f32(acc[iy], fscales.val[3], m8, 3);
|
||||
}
|
||||
iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is]));
|
||||
iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is]));
|
||||
scales.val[0] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_1))));
|
||||
scales.val[1] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_1))));
|
||||
scales.val[2] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales16_2))));
|
||||
scales.val[3] = vmulq_f32(d4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales16_2))));
|
||||
}
|
||||
iscales.val[0] = vorrq_u8(vandq_u8(sl.val[0], mf), vandq_u8(vshlq_n_u8(sh, 4), m30));
|
||||
iscales.val[1] = vorrq_u8(vandq_u8(sl.val[1], mf), vandq_u8(sh, m30));
|
||||
int32x4_t isum[nrc_y] = {};
|
||||
for (int is = 0; is < 2; ++is) {
|
||||
auto iscales16_1 = vmovl_s8(vget_low_s8(iscales.val[is]));
|
||||
auto iscales16_2 = vmovl_s8(vget_high_s8(iscales.val[is]));
|
||||
scales.val[0] = vmovl_s16(vget_low_s16(iscales16_1));
|
||||
scales.val[1] = vmovl_s16(vget_high_s16(iscales16_1));
|
||||
scales.val[2] = vmovl_s16(vget_low_s16(iscales16_2));
|
||||
scales.val[3] = vmovl_s16(vget_high_s16(iscales16_2));
|
||||
for (int ib = 0; ib < 4; ++ib) {
|
||||
auto lbits = vld1q_u8_x4(iq5[ibl].qs + 256*is + 64*ib);
|
||||
auto hbits2 = vld1q_u8(iq5[ibl].qh + 64*is + 16*ib);
|
||||
@@ -8563,15 +8559,13 @@ void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& inf
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto y = vld1q_s8_x2(q8.y[iy][ibl].qs+128*is+32*ib);
|
||||
auto sumi = interleaved_dotq(qx, y);
|
||||
if constexpr (nrc_y == 1) {
|
||||
acc[iy] = vfmaq_f32(acc[iy], scales.val[ib], vcvtq_f32_s32(sumi));
|
||||
} else {
|
||||
auto d4d8 = vmulq_f32(scales.val[ib], vdupq_n_f32(q8.scale(iy, ibl)));
|
||||
acc[iy] = vfmaq_f32(acc[iy], d4d8, vcvtq_f32_s32(sumi));
|
||||
}
|
||||
isum[iy] = vmlaq_s32(isum[iy], scales.val[ib], sumi);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
acc[iy] = vfmaq_f32(acc[iy], vmulq_f32(d4, vdupq_n_f32(q8.scale(iy, ibl))), vcvtq_f32_s32(isum[iy]));
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
info.store(ix, iy, acc[iy]);
|
||||
@@ -8589,23 +8583,18 @@ void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& inf
|
||||
auto m32 = vdupq_n_s8(-32);
|
||||
int nbl = n / QK_K;
|
||||
int8x16_t qx[4];
|
||||
float32x4x2_t scales;
|
||||
float32x4_t acc[nrc_y] = {};
|
||||
float32x4_t d4[nrc_y] = {};
|
||||
for (int ix = 0; ix < nrc_x; ix += 4) {
|
||||
const block_q6_k_r4 * iq6 = (const block_q6_k_r4 *)((const char *)vx + ix*bx);
|
||||
for (int ibl = 0; ibl < nbl; ++ibl) {
|
||||
auto dtmp = vcvt_f32_f16(vld1_f16((const float16_t *)iq6[ibl].d));
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
d4[iy] = vmulq_f32(dtmp, vdupq_n_f32(q8.scale(iy, ibl)));
|
||||
}
|
||||
auto d4 = vcvt_f32_f16(vld1_f16((const float16_t *)iq6[ibl].d));
|
||||
int32x4_t isum[nrc_y] = {};
|
||||
for (int is = 0; is < 2; ++is) {
|
||||
for (int ib = 0; ib < 4; ++ib) {
|
||||
auto lbits = vld1q_u8_x4(iq6[ibl].ql + 256*is + 64*ib);
|
||||
auto hbits = vld1q_u8(iq6[ibl].qh + 128*is + 32*ib);
|
||||
auto iscales = vmovl_s8(vld1_s8(iq6[ibl].scales + 32*is + 8*ib));
|
||||
scales.val[0] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(iscales)));
|
||||
scales.val[1] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(iscales)));
|
||||
auto scales = vmovl_s16(vget_low_s16(iscales));
|
||||
qx[0] = vaddq_s8(m32, vorrq_u8(vandq_u8 (lbits.val[0], mf), vandq_u8(m3, vshlq_n_u8(hbits, 4))));
|
||||
qx[1] = vaddq_s8(m32, vorrq_u8(vandq_u8 (lbits.val[2], mf), vandq_u8(m3, hbits)));
|
||||
qx[2] = vaddq_s8(m32, vorrq_u8(vshrq_n_u8(lbits.val[0], 4), vandq_u8(m3, vshlq_n_u8(hbits, 2))));
|
||||
@@ -8613,8 +8602,9 @@ void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& inf
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto y = vld1q_s8(q8.y[iy][ibl].qs+128*is+32*ib);
|
||||
auto sumi = interleaved_dotq(qx, y);
|
||||
acc[iy] = vfmaq_f32(acc[iy], vmulq_f32(scales.val[0], d4[iy]), vcvtq_f32_s32(sumi));
|
||||
isum[iy] = vmlaq_s32(isum[iy], scales, sumi);
|
||||
}
|
||||
scales = vmovl_s16(vget_high_s16(iscales));
|
||||
hbits = vld1q_u8(iq6[ibl].qh + 128*is + 32*ib + 16);
|
||||
qx[0] = vaddq_s8(m32, vorrq_u8(vandq_u8 (lbits.val[1], mf), vandq_u8(m3, vshlq_n_u8(hbits, 4))));
|
||||
qx[1] = vaddq_s8(m32, vorrq_u8(vandq_u8 (lbits.val[3], mf), vandq_u8(m3, hbits)));
|
||||
@@ -8623,10 +8613,13 @@ void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& inf
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
auto y = vld1q_s8(q8.y[iy][ibl].qs+128*is+32*ib+16);
|
||||
auto sumi = interleaved_dotq(qx, y);
|
||||
acc[iy] = vfmaq_f32(acc[iy], vmulq_f32(scales.val[1], d4[iy]), vcvtq_f32_s32(sumi));
|
||||
isum[iy] = vmlaq_s32(isum[iy], scales, sumi);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
acc[iy] = vfmaq_f32(acc[iy], vmulq_f32(d4, vdupq_n_f32(q8.scale(iy, ibl))), vcvtq_f32_s32(isum[iy]));
|
||||
}
|
||||
}
|
||||
for (int iy = 0; iy < nrc_y; ++iy) {
|
||||
info.store(ix, iy, acc[iy]);
|
||||
@@ -9030,7 +9023,7 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& m, int /*Ny*/) {
|
||||
break;
|
||||
case GGML_TYPE_IQ4_XS_R4:
|
||||
SET_MUL_MAT_FUNCTIONS(m, mul_mat_iq4_xs_r4_q8_k);
|
||||
expected_Btype = GGML_TYPE_Q8_K;
|
||||
expected_Btype = GGML_TYPE_Q8_K32;
|
||||
break;
|
||||
case GGML_TYPE_Q3_K_R4:
|
||||
SET_MUL_MAT_FUNCTIONS(m, mul_mat_q3_k_r4_q8_k);
|
||||
|
||||
Reference in New Issue
Block a user