Minor performance improvements (#179)

* Try interleaving 8 rows for iq4_xs

On Zen4, PP-512 goes up from ~260 t/s to 288 t/s for L3-8B.
TG-128 reaches max. performance at 2 threads and is slightly
higher than 4 interleaved rows (14.48 t/s vs 13.11 t/s @ 2 threads
and 14/28 t/s @ 4 threads).

* Try interleaving 8 iq4_xs rows

It is also faster on AVX2.

This is the NEON implementation. It is tiny bit faster than
4 interleaved rows (~0.5%).

So, this looks like a winner given the Zen4/AVX2 improvement
without associated NEON egression.

* Cleanup

* 8-rows interleaved q8_0 (AVX2)

* 8-rows interleaved q8_0 (Zen4)

* 8-rows interleaved q8_0 (Zen4) - slightly better

PP-512 is now 284 t/s compared to 257 t/s for 4-rows interleaved.
TG-128 reaches peak of 8.16 t/s at just 2 threads compared
to 7.95 t/s @ 4 threads before.

* 8-rows interleaved q8_0 (NEON)

PP-512 is slightly better (138 t/s vs 132.5 t/s), TG-128 is about the
same.

* FA: repack Q8_0 to Q8_0_R8

* Remove special purpose mul_mat_q8_0_r4_q8_1_128 (Zen4)

* FA: repack Q8_0 to Q8_0_R8 (NEON)

Very slightly faster than the general purpose gemm, slightly
slower than the D = 128 special case gemm mul_mat_q8_0_r4_q8_0_128.
Still removing mul_mat_q8_0_r4_q8_0_128 as we simply don't have
enough vector registers to hold 8 interleaved rows, so there is
no point to have the special purpose implementation.

* q4_0_r8 (AVX2)

* q4_0_r8 (NEON)

Tiny bit faster PP (~128 vs ~126 t/s), same TG.

* q4_0_r8 (Zen4)

Somehow only marginally faster?
268 t/s vs 261 t/s

* q4_0_r8 (Zen4) - slightly better

282 t/s for a pure q4_0 L3-8B quantization.

* Apply platform specific modifications when repacking

E.g., on NEON it is useful to pre-apply q ^ 0x88 to q4_0.
This results in a ~3% performance improvement.
Hence,
* Changed the signature of the repack_X functions to take a
  bool argument indicating if the repacking is done online and,
  if so, apply modifications as appropriate while repacking.
* Added iqk_modify_tensor to apply modifications to models that
  have already been repacked while loading the model. Caveat:
  just like rtr, this needs to have mmap disabled (else one would
  need to move the data to a not mmap-ed buffer, so much more
  complicated).

* Apply platform specific modifications when repacking

On Zen4 we can pre-convert the signed quants in q8_0_r4 and
q8_k_r8 to unsigned thus avoiding these operations in matrix
multiplications. With this change we hit
PP-512 = 382.40 t/s (q8_k_r8)
PP-512 = 306.92 t/s (q8_0_r4)
for L3-8B on a Ryzen-7950X using q8_0 KV-cache.

* Process up to 16 columns per kernel call for q8_k_r8

This brings PP-512 up to 389 t/s.

* Be able to load Deepseek-v2-Lite

---------

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-01-27 18:53:47 +02:00
committed by GitHub
parent d9c4ea48d1
commit f725576345
5 changed files with 455 additions and 186 deletions

View File

@@ -528,7 +528,12 @@ typedef struct {
ggml_half d[4];
uint8_t qs[2*QK4_NL];
} block_iq4_nl_r4;
static_assert(sizeof(block_iq4_nl_r4) == 4*sizeof(ggml_half) + 2*QK4_NL, "wrong iq4_nl_x4 block size/padding");
static_assert(sizeof(block_iq4_nl_r4) == 4*sizeof(ggml_half) + 2*QK4_NL, "wrong iq4_nl_r4 block size/padding");
typedef struct {
ggml_half d[8];
uint8_t qs[4*QK4_NL];
} block_iq4_nl_r8;
static_assert(sizeof(block_iq4_nl_r8) == 8*sizeof(ggml_half) + 4*QK4_NL, "wrong iq4_nl_r8 block size/padding");
typedef struct {
ggml_half d;

View File

@@ -242,7 +242,6 @@ struct MulMat {
case GGML_TYPE_Q4_K_R4:
case GGML_TYPE_Q5_K_R4:
case GGML_TYPE_Q6_K_R4:
case GGML_TYPE_Q4_0_R4:
case GGML_TYPE_Q5_0_R4:
case GGML_TYPE_Q6_0_R4:
case GGML_TYPE_IQ4_NL_R4:
@@ -258,6 +257,7 @@ struct MulMat {
case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_BN_R4: return 4;
case GGML_TYPE_IQ4_XS_R4:
case GGML_TYPE_Q4_0_R4:
case GGML_TYPE_Q8_0_R4:
case GGML_TYPE_Q8_K_R8: return 8;
case GGML_TYPE_BF16_R16: return 16;
@@ -2538,52 +2538,119 @@ static void mul_mat_iq4_nl_r4_q8_1(int n, const void * vx, size_t bx, const Data
}
#endif
inline void prepare_q4_0_quants_avx2(const uint8_t * qs, __m256i * v, const __m256i& m4) {
auto bits1 = _mm256_loadu_si256((const __m256i *)qs+0);
auto bits2 = _mm256_loadu_si256((const __m256i *)qs+1);
auto bits3 = _mm256_loadu_si256((const __m256i *)qs+2);
auto bits4 = _mm256_loadu_si256((const __m256i *)qs+3);
v[0] = _mm256_and_si256(bits1, m4);
v[1] = _mm256_and_si256(bits2, m4);
v[2] = _mm256_and_si256(bits3, m4);
v[3] = _mm256_and_si256(bits4, m4);
v[4] = _mm256_and_si256(_mm256_srli_epi16(bits1, 4), m4);
v[5] = _mm256_and_si256(_mm256_srli_epi16(bits2, 4), m4);
v[6] = _mm256_and_si256(_mm256_srli_epi16(bits3, 4), m4);
v[7] = _mm256_and_si256(_mm256_srli_epi16(bits4, 4), m4);
}
inline __m256i accum_q4_0_quants(const __m256i * v, const int8_t * qs) {
auto y4l = _mm_loadu_si128((const __m128i*)qs+0);
auto y4h = _mm_loadu_si128((const __m128i*)qs+1);
auto yl = MM256_SET_M128I(y4l, y4l);
auto yh = MM256_SET_M128I(y4h, y4h);
#ifdef HAVE_FANCY_SIMD
auto sumi = _mm256_setzero_si256();
sumi = _mm256_dpbusd_epi32(sumi, v[0], _mm256_shuffle_epi32(yl, 0x00));
sumi = _mm256_dpbusd_epi32(sumi, v[1], _mm256_shuffle_epi32(yl, 0x55));
sumi = _mm256_dpbusd_epi32(sumi, v[2], _mm256_shuffle_epi32(yl, 0xaa));
sumi = _mm256_dpbusd_epi32(sumi, v[3], _mm256_shuffle_epi32(yl, 0xff));
sumi = _mm256_dpbusd_epi32(sumi, v[4], _mm256_shuffle_epi32(yh, 0x00));
sumi = _mm256_dpbusd_epi32(sumi, v[5], _mm256_shuffle_epi32(yh, 0x55));
sumi = _mm256_dpbusd_epi32(sumi, v[6], _mm256_shuffle_epi32(yh, 0xaa));
sumi = _mm256_dpbusd_epi32(sumi, v[7], _mm256_shuffle_epi32(yh, 0xff));
#else
auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(v[0], _mm256_shuffle_epi32(yl, 0x00)),
_mm256_maddubs_epi16(v[1], _mm256_shuffle_epi32(yl, 0x55)));
auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(v[2], _mm256_shuffle_epi32(yl, 0xaa)),
_mm256_maddubs_epi16(v[3], _mm256_shuffle_epi32(yl, 0xff)));
auto sumi3 = _mm256_add_epi16(_mm256_maddubs_epi16(v[4], _mm256_shuffle_epi32(yh, 0x00)),
_mm256_maddubs_epi16(v[5], _mm256_shuffle_epi32(yh, 0x55)));
auto sumi4 = _mm256_add_epi16(_mm256_maddubs_epi16(v[6], _mm256_shuffle_epi32(yh, 0xaa)),
_mm256_maddubs_epi16(v[7], _mm256_shuffle_epi32(yh, 0xff)));
auto sumi = _mm256_add_epi32(_mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_add_epi16(sumi1, sumi2)),
_mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_add_epi16(sumi3, sumi4)));
#endif
return sumi;
}
template <int nrc_y>
static void mul_mat_q4_0_r4_q8_1_avx2(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
GGML_ASSERT(nrc_x%4 == 0);
GGML_ASSERT(nrc_x%8 == 0);
Q8<nrc_y, block_q8_1_x4> q8(info);
auto m4 = _mm256_set1_epi8(0xf);
auto m1 = _mm256_set1_epi16(1);
int nb = n / QK4_NL;
__m256i v[8];
GGML_ASSERT(nb%4 == 0);
if constexpr (nrc_y == 1) {
union { __m256 vec; float val[8]; } helper;
for (int ix = 0; ix < nrc_x; ix += 8) {
const block_iq4_nl_r8 * iq4 = (const block_iq4_nl_r8 *)((const char *)vx + ix*bx);
auto acc1 = _mm256_setzero_ps();
auto acc2 = _mm256_setzero_ps();
for (int ib4 = 0; ib4 < nb/4; ++ib4) {
helper.vec = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8.y[0][ib4].d));
for (int k = 0; k < 4; ++k) {
auto scales = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)iq4[4*ib4+k].d));
prepare_q4_0_quants_avx2(iq4[4*ib4+k].qs, v, m4);
auto sumi = accum_q4_0_quants(v, q8.y[0][ib4].qs+32*k);
auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(helper.val[k]));
acc1 = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(sumi), acc1);
acc2 = _mm256_fmadd_ps(scales, _mm256_set1_ps(helper.val[k+4]), acc2);
}
}
acc1 = _mm256_fmadd_ps(acc2, _mm256_set1_ps(-8.f), acc1);
info.store(ix, 0, acc1);
}
}
else {
__m256 acc[nrc_y] = {};
float d8[8*nrc_y];
for (int ix = 0; ix < nrc_x; ix += 4) {
const block_iq4_nl_r4 * iq4 = (const block_iq4_nl_r4 *)((const char *)vx + ix*bx);
for (int ix = 0; ix < nrc_x; ix += 8) {
const block_iq4_nl_r8 * iq4 = (const block_iq4_nl_r8 *)((const char *)vx + ix*bx);
for (int ib4 = 0; ib4 < nb/4; ++ib4) {
for (int iy = 0; iy < nrc_y; ++iy) {
auto scales = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8.y[iy][ib4].d));
_mm256_storeu_ps(d8 + 8*iy, scales);
{
__m256 d4[4];
for (int k = 0; k < 4; ++k) {
d4[k] = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)iq4[4*ib4+k].d));
}
for (int iy = 0; iy < nrc_y; ++iy) {
auto scales = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8.y[iy][ib4].d));
_mm256_storeu_ps(d8 + 8*iy, scales);
auto m4 = _mm256_extractf128_ps(scales, 1);
auto m8 = _mm256_set_m128(m4, m4);
auto sumf = _mm256_mul_ps(d4[0], _mm256_shuffle_ps(m8, m8, 0x00));
sumf = _mm256_fmadd_ps(d4[1], _mm256_shuffle_ps(m8, m8, 0x55), sumf);
sumf = _mm256_fmadd_ps(d4[2], _mm256_shuffle_ps(m8, m8, 0xaa), sumf);
sumf = _mm256_fmadd_ps(d4[3], _mm256_shuffle_ps(m8, m8, 0xff), sumf);
acc[iy] = _mm256_fmadd_ps(sumf, _mm256_set1_ps(-8.f), acc[iy]);
}
}
for (int k = 0; k < 4; ++k) {
auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4[4*ib4+k].d));
auto scales = _mm256_set_m128(scales128, scales128);
auto scales_m = _mm256_mul_ps(scales, _mm256_set1_ps(-4.f));
auto bits1 = _mm256_loadu_si256((const __m256i *)iq4[4*ib4+k].qs+0);
auto bits2 = _mm256_loadu_si256((const __m256i *)iq4[4*ib4+k].qs+1);
auto q1 = _mm256_and_si256(bits1, m4);
auto q2 = _mm256_and_si256(bits2, m4);
auto q3 = _mm256_and_si256(_mm256_srli_epi16(bits1, 4), m4);
auto q4 = _mm256_and_si256(_mm256_srli_epi16(bits2, 4), m4);
auto scales = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)iq4[4*ib4+k].d));
prepare_q4_0_quants_avx2(iq4[4*ib4+k].qs, v, m4);
for (int iy = 0; iy < nrc_y; ++iy) {
auto y = _mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k);
auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(q1, _mm256_shuffle_epi32(y, 0x00)),
_mm256_maddubs_epi16(q2, _mm256_shuffle_epi32(y, 0x55)));
auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(q3, _mm256_shuffle_epi32(y, 0xaa)),
_mm256_maddubs_epi16(q4, _mm256_shuffle_epi32(y, 0xff)));
auto sumi = _mm256_madd_epi16(m1, _mm256_add_epi16(sumi1, sumi2));
auto sumi = accum_q4_0_quants(v, q8.y[iy][ib4].qs+32*k);
auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(d8[8*iy+k]));
acc[iy] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(sumi), acc[iy]);
acc[iy] = _mm256_fmadd_ps(scales_m, _mm256_set1_ps(d8[8*iy+4+k]), acc[iy]);
}
}
}
for (int iy = 0; iy < nrc_y; ++iy) {
auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
info.store(ix, iy, sum);
info.store(ix, iy, acc[iy]);
acc[iy] = _mm256_setzero_ps();
}
}
}
}
#ifdef HAVE_FANCY_SIMD
@@ -2593,53 +2660,67 @@ static void mul_mat_q4_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
mul_mat_q4_0_r4_q8_1_avx2<1>(n, vx, bx, info, nrc_x);
return;
}
GGML_ASSERT(nrc_x%8 == 0);
GGML_ASSERT(nrc_x%16 == 0);
Q8<nrc_y, block_q8_1_x4> q8(info);
auto m4 = _mm512_set1_epi8(0xf);
int nb = n / QK4_NL;
GGML_ASSERT(nb%4 == 0);
__m512 acc[2*nrc_y] = {};
__m512i qx[4];
for (int ix = 0; ix < nrc_x; ix += 8) {
const block_iq4_nl_r4 * iq4l = (const block_iq4_nl_r4 *)((const char *)vx + (ix+0)*bx);
const block_iq4_nl_r4 * iq4h = (const block_iq4_nl_r4 *)((const char *)vx + (ix+4)*bx);
__m512i qx[8];
float d8[8*nrc_y];
for (int ix = 0; ix < nrc_x; ix += 16) {
const block_iq4_nl_r8 * iq4l = (const block_iq4_nl_r8 *)((const char *)vx + (ix+0)*bx);
const block_iq4_nl_r8 * iq4h = (const block_iq4_nl_r8 *)((const char *)vx + (ix+8)*bx);
for (int ib4 = 0; ib4 < nb/4; ++ib4) {
for (int iy = 0; iy < nrc_y; ++iy) {
_mm256_storeu_ps(d8+8*iy, _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8.y[iy][ib4].d)));
}
for (int k = 0; k < 4; ++k) {
auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4l[4*ib4+k].d));
auto scales1 = _mm256_set_m128(scales128, scales128);
scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4h[4*ib4+k].d));
auto scales2 = _mm256_set_m128(scales128, scales128);
auto scales1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)iq4l[4*ib4+k].d));
auto scales2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)iq4h[4*ib4+k].d));
auto scales = _mm512_insertf32x8(_mm512_castps256_ps512(scales1), scales2, 1);
auto scales_m = _mm512_mul_ps(scales, _mm512_set1_ps(-4.f));
auto bits1 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq4l[4*ib4+k].qs+0)),
_mm256_loadu_si256((const __m256i *)iq4h[4*ib4+k].qs+0), 1);
auto bits2 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq4l[4*ib4+k].qs+1)),
_mm256_loadu_si256((const __m256i *)iq4h[4*ib4+k].qs+1), 1);
auto bits3 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq4l[4*ib4+k].qs+2)),
_mm256_loadu_si256((const __m256i *)iq4h[4*ib4+k].qs+2), 1);
auto bits4 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq4l[4*ib4+k].qs+3)),
_mm256_loadu_si256((const __m256i *)iq4h[4*ib4+k].qs+3), 1);
qx[0] = _mm512_and_si512(bits1, m4);
qx[1] = _mm512_and_si512(bits2, m4);
qx[2] = _mm512_and_si512(_mm512_srli_epi16(bits1, 4), m4);
qx[3] = _mm512_and_si512(_mm512_srli_epi16(bits2, 4), m4);
qx[2] = _mm512_and_si512(bits3, m4);
qx[3] = _mm512_and_si512(bits4, m4);
qx[4] = _mm512_and_si512(_mm512_srli_epi16(bits1, 4), m4);
qx[5] = _mm512_and_si512(_mm512_srli_epi16(bits2, 4), m4);
qx[6] = _mm512_and_si512(_mm512_srli_epi16(bits3, 4), m4);
qx[7] = _mm512_and_si512(_mm512_srli_epi16(bits4, 4), m4);
for (int iy = 0; iy < nrc_y; ++iy) {
auto y8 = _mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k);
auto y = _mm512_inserti32x8(_mm512_castsi256_si512(y8), y8, 1);
auto y4l = _mm_loadu_si128((const __m128i*)q8.y[iy][ib4].qs+2*k+0);
auto y4h = _mm_loadu_si128((const __m128i*)q8.y[iy][ib4].qs+2*k+1);
auto y8l = MM256_SET_M128I(y4l, y4l);
auto y8h = MM256_SET_M128I(y4h, y4h);
auto yl = _mm512_inserti32x8(_mm512_castsi256_si512(y8l), y8l, 1);
auto yh = _mm512_inserti32x8(_mm512_castsi256_si512(y8h), y8h, 1);
auto sumi = _mm512_setzero_si512();
sumi = _mm512_dpbusd_epi32(sumi, qx[0], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x00)));
sumi = _mm512_dpbusd_epi32(sumi, qx[1], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x55)));
sumi = _mm512_dpbusd_epi32(sumi, qx[2], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xaa)));
sumi = _mm512_dpbusd_epi32(sumi, qx[3], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xff)));
auto dy = _mm512_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k]));
sumi = _mm512_dpbusd_epi32(sumi, qx[0], _mm512_shuffle_epi32(yl, _MM_PERM_ENUM(0x00)));
sumi = _mm512_dpbusd_epi32(sumi, qx[1], _mm512_shuffle_epi32(yl, _MM_PERM_ENUM(0x55)));
sumi = _mm512_dpbusd_epi32(sumi, qx[2], _mm512_shuffle_epi32(yl, _MM_PERM_ENUM(0xaa)));
sumi = _mm512_dpbusd_epi32(sumi, qx[3], _mm512_shuffle_epi32(yl, _MM_PERM_ENUM(0xff)));
sumi = _mm512_dpbusd_epi32(sumi, qx[4], _mm512_shuffle_epi32(yh, _MM_PERM_ENUM(0x00)));
sumi = _mm512_dpbusd_epi32(sumi, qx[5], _mm512_shuffle_epi32(yh, _MM_PERM_ENUM(0x55)));
sumi = _mm512_dpbusd_epi32(sumi, qx[6], _mm512_shuffle_epi32(yh, _MM_PERM_ENUM(0xaa)));
sumi = _mm512_dpbusd_epi32(sumi, qx[7], _mm512_shuffle_epi32(yh, _MM_PERM_ENUM(0xff)));
auto dy = _mm512_set1_ps(d8[8*iy+k]);
acc[2*iy+0] = _mm512_fmadd_ps(_mm512_mul_ps(scales, dy), _mm512_cvtepi32_ps(sumi), acc[2*iy+0]);
acc[2*iy+1] = _mm512_fmadd_ps(scales_m, _mm512_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k+4])), acc[2*iy+1]);
acc[2*iy+1] = _mm512_fmadd_ps(scales, _mm512_set1_ps(d8[8*iy+k+4]), acc[2*iy+1]);
}
}
}
for (int iy = 0; iy < nrc_y; ++iy) {
auto sum512 = _mm512_add_ps(acc[2*iy+0], acc[2*iy+1]);
auto sum = _mm512_fmadd_ps(_mm512_set1_ps(-8.f), acc[2*iy+1], acc[2*iy+0]);
acc[2*iy+0] = acc[2*iy+1] = _mm512_setzero_ps();
auto sum1 = _mm_add_ps(_mm512_extractf32x4_ps(sum512, 0), _mm512_extractf32x4_ps(sum512, 1));
auto sum2 = _mm_add_ps(_mm512_extractf32x4_ps(sum512, 2), _mm512_extractf32x4_ps(sum512, 3));
info.store(ix+0, iy, sum1);
info.store(ix+4, iy, sum2);
info.store(ix, iy, sum);
}
}
}
@@ -2907,7 +2988,6 @@ static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
int nb = n / QK8_0;
GGML_ASSERT(nb%4 == 0);
if constexpr (nrc_y == 1) {
auto m127 = _mm256_set1_epi8(127);
__m256 acc[2] = {};
__m256i qx[8];
float d8[8];
@@ -2917,15 +2997,14 @@ static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
_mm256_storeu_ps(d8, _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8.y[0][ib4].d)));
for (int k = 0; k < 4; ++k) {
auto scales = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)iq8[4*ib4+k].d));
auto scales_m = _mm256_mul_ps(scales, _mm256_set1_ps(-127.f));
qx[0] = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+0), m127);
qx[1] = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+1), m127);
qx[2] = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+2), m127);
qx[3] = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+3), m127);
qx[4] = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+4), m127);
qx[5] = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+5), m127);
qx[6] = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+6), m127);
qx[7] = _mm256_add_epi8(_mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+7), m127);
qx[0] = _mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+0);
qx[1] = _mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+1);
qx[2] = _mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+2);
qx[3] = _mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+3);
qx[4] = _mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+4);
qx[5] = _mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+5);
qx[6] = _mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+6);
qx[7] = _mm256_loadu_si256((const __m256i *)iq8[4*ib4+k].qs+7);
auto y4l = _mm_loadu_si128((const __m128i*)q8.y[0][ib4].qs+2*k+0);
auto y4h = _mm_loadu_si128((const __m128i*)q8.y[0][ib4].qs+2*k+1);
auto yl = MM256_SET_M128I(y4l, y4l);
@@ -2941,17 +3020,16 @@ static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
sumi = _mm256_dpbusd_epi32(sumi, qx[7], _mm256_shuffle_epi32(yh, 0xff));
auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(d8[k]));
acc[0] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(sumi), acc[0]);
acc[1] = _mm256_fmadd_ps(scales_m, _mm256_set1_ps(d8[k+4]), acc[1]);
acc[1] = _mm256_fmadd_ps(scales, _mm256_set1_ps(d8[k+4]), acc[1]);
}
}
info.store(ix, 0, _mm256_add_ps(acc[0], acc[1]));
info.store(ix, 0, _mm256_fmadd_ps(_mm256_set1_ps(-127.f), acc[1], acc[0]));
acc[0] = acc[1] = _mm256_setzero_ps();
}
} else {
__m512 acc[2*nrc_y] = {};
__m512i qx[8];
float d8[8*nrc_y];
auto m127 = _mm512_set1_epi8(127);
for (int ix = 0; ix < nrc_x; ix += 16) {
const block_q8_0_r8 * q8l = (const block_q8_0_r8 *)((const char *)vx + (ix+0)*bx);
const block_q8_0_r8 * q8h = (const block_q8_0_r8 *)((const char *)vx + (ix+8)*bx);
@@ -2963,11 +3041,9 @@ static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
auto scales1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8l[4*ib4+k].d));
auto scales2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8h[4*ib4+k].d));
auto scales = _mm512_insertf32x8(_mm512_castps256_ps512(scales1), scales2, 1);
auto scales_m = _mm512_mul_ps(scales, _mm512_set1_ps(-127.f));
for (int j = 0; j < 8; ++j) {
qx[j] = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)q8l[4*ib4+k].qs+j)),
_mm256_loadu_si256((const __m256i *)q8h[4*ib4+k].qs+j), 1);
qx[j] = _mm512_add_epi8(qx[j], m127);
}
for (int iy = 0; iy < nrc_y; ++iy) {
auto y4l = _mm_loadu_si128((const __m128i*)q8.y[iy][ib4].qs+2*k+0);
@@ -2987,18 +3063,14 @@ static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
sumi = _mm512_dpbusd_epi32(sumi, qx[7], _mm512_shuffle_epi32(yh, _MM_PERM_ENUM(0xff)));
auto dy = _mm512_set1_ps(d8[8*iy+k]);
acc[2*iy+0] = _mm512_fmadd_ps(_mm512_mul_ps(scales, dy), _mm512_cvtepi32_ps(sumi), acc[2*iy+0]);
acc[2*iy+1] = _mm512_fmadd_ps(scales_m, _mm512_set1_ps(d8[8*iy+k+4]), acc[2*iy+1]);
acc[2*iy+1] = _mm512_fmadd_ps(scales, _mm512_set1_ps(d8[8*iy+k+4]), acc[2*iy+1]);
}
}
}
for (int iy = 0; iy < nrc_y; ++iy) {
auto sum512 = _mm512_add_ps(acc[2*iy+0], acc[2*iy+1]);
auto sum512 = _mm512_fmadd_ps(_mm512_set1_ps(-127.f), acc[2*iy+1], acc[2*iy+0]);
info.store(ix, iy, sum512);
acc[2*iy+0] = acc[2*iy+1] = _mm512_setzero_ps();
//auto sum1 = _mm_add_ps(_mm512_extractf32x4_ps(sum512, 0), _mm512_extractf32x4_ps(sum512, 1));
//auto sum2 = _mm_add_ps(_mm512_extractf32x4_ps(sum512, 2), _mm512_extractf32x4_ps(sum512, 3));
//info.store(ix+0, iy, sum1);
//info.store(ix+4, iy, sum2);
}
}
}
@@ -4995,12 +5067,7 @@ static void mul_mat_q8_k_r8_q8_k(int n, const void * vx, size_t bx, const DataIn
qx[1] = _mm256_loadu_si256((const __m256i *)iq8[ibl].qs+4*ib+1);
qx[2] = _mm256_loadu_si256((const __m256i *)iq8[ibl].qs+4*ib+2);
qx[3] = _mm256_loadu_si256((const __m256i *)iq8[ibl].qs+4*ib+3);
#ifdef HAVE_FANCY_SIMD
qx[0] = _mm256_xor_si256(_mm256_loadu_si256((const __m256i *)iq8[ibl].qs+4*ib+0), _mm256_set1_epi8(-128));
qx[1] = _mm256_xor_si256(_mm256_loadu_si256((const __m256i *)iq8[ibl].qs+4*ib+1), _mm256_set1_epi8(-128));
qx[2] = _mm256_xor_si256(_mm256_loadu_si256((const __m256i *)iq8[ibl].qs+4*ib+2), _mm256_set1_epi8(-128));
qx[3] = _mm256_xor_si256(_mm256_loadu_si256((const __m256i *)iq8[ibl].qs+4*ib+3), _mm256_set1_epi8(-128));
#else
#ifndef HAVE_FANCY_SIMD
auto s0 = _mm256_sign_epi8(qx[0], qx[0]);
auto s1 = _mm256_sign_epi8(qx[1], qx[1]);
auto s2 = _mm256_sign_epi8(qx[2], qx[2]);
@@ -7924,6 +7991,9 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
mm.funcs[5] = mul_mat_q8_k_r8_q8_k<6>;
mm.funcs[6] = mul_mat_q8_k_r8_q8_k<7>;
mm.funcs[7] = mul_mat_q8_k_r8_q8_k<8>;
#ifdef HAVE_FANCY_SIMD
mm.func16 = mul_mat_q8_k_r8_q8_k<16>;
#endif
expected_typeB = GGML_TYPE_Q8_KR8;
break;
case GGML_TYPE_IQ4_K_R4:
@@ -7989,6 +8059,9 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
mm.funcs[5] = mul_mat_q4_0_r4_q8_1<6>;
mm.funcs[6] = mul_mat_q4_0_r4_q8_1<7>;
mm.funcs[7] = mul_mat_q4_0_r4_q8_1<8>;
#ifdef HAVE_FANCY_SIMD
mm.func16 = mul_mat_q4_0_r4_q8_1<16>;
#endif
expected_typeB = GGML_TYPE_Q8_1_X4;
break;
case GGML_TYPE_Q5_0_R4:
@@ -12067,6 +12140,42 @@ void mul_mat_qx_r4_q8_0(int n, const void * vx, size_t bx, const DataInfo& info,
}
}
template <typename Dequantizer, int nrc_y>
void mul_mat_qx_r8_q8_0(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
GGML_ASSERT(nrc_x%8 == 0);
Q8<nrc_y, block_q8_0_x4> q8(info);
Dequantizer deq(vx, bx);
int nb = n / QK4_NL;
GGML_ASSERT(nb%4 == 0);
int8x16_t qx[16];
float d8[4*nrc_y];
float32x4_t acc[2*nrc_y] = {};
for (int ix = 0; ix < nrc_x; ix += 8) {
deq.new_row(ix);
for (int ib4 = 0; ib4 < nb/4; ++ib4) {
for (int iy = 0; iy < nrc_y; ++iy) {
vst1q_f32(d8+4*iy, vcvt_f32_f16(vld1_f16((const float16_t *)q8.y[iy][ib4].d)));
}
for (int k = 0; k < 4; ++k) {
auto scales = deq.prepare(ib4, k, qx);
for (int iy = 0; iy < nrc_y; ++iy) {
auto y = vld1q_s8_x2(q8.y[iy][ib4].qs+32*k);
auto sumi1 = interleaved_dotq(qx+0, y);
auto sumi2 = interleaved_dotq(qx+8, y);
auto dy = vdupq_n_f32(d8[4*iy+k]);
acc[2*iy+0] = vfmaq_f32(acc[2*iy+0], vmulq_f32(scales.val[0], dy), vcvtq_f32_s32(sumi1));
acc[2*iy+1] = vfmaq_f32(acc[2*iy+1], vmulq_f32(scales.val[1], dy), vcvtq_f32_s32(sumi2));
}
}
}
for (int iy = 0; iy < nrc_y; ++iy) {
info.store(ix+0, iy, deq.result(acc[2*iy+0]));
info.store(ix+4, iy, deq.result(acc[2*iy+1]));
acc[2*iy] = acc[2*iy+1] = vdupq_n_f32(0.f);
}
}
}
struct IQ4_NL_R4_Dequantizer {
IQ4_NL_R4_Dequantizer(const void * vx, size_t bx) : cx((const char *)vx), bx(bx), values(vld1q_s8(iq4k_values)) {}
inline void new_row(int ix) { iq4 = (const block_iq4_nl_r4 *)(cx + ix*bx); }
@@ -12116,6 +12225,35 @@ struct Q4_0_R4_Dequantizer {
const float32x4_t norm = vdupq_n_f32(1.f/16);
};
struct Q4_0_R8_Dequantizer {
Q4_0_R8_Dequantizer(const void * vx, size_t bx) : cx((const char *)vx), bx(bx) {}
inline void new_row(int ix) { iq4 = (const block_iq4_nl_r8 *)(cx + ix*bx); }
inline float32x4x2_t prepare(int ib4, int k, int8x16_t * qx) const {
auto scales16 = vld1q_f16((const float16_t *)iq4[4*ib4+k].d);
float32x4x2_t scales = { vcvt_f32_f16(vget_low_f16(scales16)), vcvt_f32_f16(vget_high_f16(scales16)) };
for (int j = 0; j < 4; ++j) {
auto bits = vld1q_u8_x2(iq4[4*ib4+k].qs + 32*j);
//bits.val[0] = veorq_u8(m88, bits.val[0]);
//bits.val[1] = veorq_u8(m88, bits.val[1]);
qx[2*j+0] = vshlq_n_u8(bits.val[0], 4);
qx[2*j+1] = vandq_u8(bits.val[0], m4);
qx[2*j+8] = vshlq_n_u8(bits.val[1], 4);
qx[2*j+9] = vandq_u8(bits.val[1], m4);
}
return scales;
}
inline float32x4_t result(float32x4_t acc) const {
return vmulq_f32(norm, acc);
}
const char * cx;
const size_t bx;
const block_iq4_nl_r8 * iq4;
const uint8x16_t m4 = vdupq_n_u8(0xf0);
const uint8x16_t m88 = vdupq_n_u8(0x88);
const float32x4_t norm = vdupq_n_f32(1.f/16);
};
struct Q5_0_R4_Dequantizer {
Q5_0_R4_Dequantizer(const void * vx, size_t bx) : cx((const char *)vx), bx(bx) {}
inline void new_row(int ix) { iq5 = (const block_q5_0_r4 *)(cx + ix*bx); }
@@ -12471,7 +12609,7 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& m, int /*Ny*/) {
expected_Btype = GGML_TYPE_Q8_K;
break;
case GGML_TYPE_Q4_0_R4:
SET_MUL_MAT_FUNCTIONS_T(m, mul_mat_qx_r4_q8_0, Q4_0_R4_Dequantizer);
SET_MUL_MAT_FUNCTIONS_T(m, mul_mat_qx_r8_q8_0, Q4_0_R8_Dequantizer);
expected_Btype = GGML_TYPE_Q8_0_X4;
break;
case GGML_TYPE_Q5_0_R4:
@@ -12894,6 +13032,12 @@ struct HelperQ80R4 : public BaseHelper<step> {
m1 = _mm256_unpackhi_epi64(t0, t1);
m2 = _mm256_unpacklo_epi64(t2, t3);
m3 = _mm256_unpackhi_epi64(t2, t3);
#ifdef HAVE_FANCY_SIMD
m0 = _mm256_xor_si256(m0, _mm256_set1_epi8(-128));
m1 = _mm256_xor_si256(m1, _mm256_set1_epi8(-128));
m2 = _mm256_xor_si256(m2, _mm256_set1_epi8(-128));
m3 = _mm256_xor_si256(m3, _mm256_set1_epi8(-128));
#endif
_mm256_storeu_si256((__m256i *)y[ib].qs + 0, m0);
_mm256_storeu_si256((__m256i *)y[ib].qs + 1, m1);
_mm256_storeu_si256((__m256i *)y[ib].qs + 2, m2);
@@ -12910,6 +13054,12 @@ struct HelperQ80R4 : public BaseHelper<step> {
m1 = _mm256_unpackhi_epi64(t0, t1);
m2 = _mm256_unpacklo_epi64(t2, t3);
m3 = _mm256_unpackhi_epi64(t2, t3);
#ifdef HAVE_FANCY_SIMD
m0 = _mm256_xor_si256(m0, _mm256_set1_epi8(-128));
m1 = _mm256_xor_si256(m1, _mm256_set1_epi8(-128));
m2 = _mm256_xor_si256(m2, _mm256_set1_epi8(-128));
m3 = _mm256_xor_si256(m3, _mm256_set1_epi8(-128));
#endif
_mm256_storeu_si256((__m256i *)y[ib].qs + 4, m0);
_mm256_storeu_si256((__m256i *)y[ib].qs + 5, m1);
_mm256_storeu_si256((__m256i *)y[ib].qs + 6, m2);

View File

@@ -43,6 +43,15 @@ constexpr int popcount(uint32_t x) { return __builtin_popcount(x); }
constexpr int popcount(uint64_t x) { return __builtin_popcountll(x); }
#endif
#if defined __x86_64__
#if defined HAVE_FANCY_SIMD
#undef HAVE_FANCY_SIMD
#endif
#if defined(__AVX512F__) && defined(__AVX512VNNI__) && defined(__AVX512VL__) && defined(__AVX512BW__) && defined(__AVX512DQ__)
#define HAVE_FANCY_SIMD
#endif
#endif
namespace {
inline int nearest_int(float fval) {
@@ -3541,7 +3550,7 @@ void quantize_row_iq4_nl_r4(const float * x, void * y, int64_t k) {
quantize_iq4_nl_r4(x, y, 4, k/4, nullptr);
}
static void repack_iq4_nl(int nrows, int n_per_row, const block_iq4_nl * x, block_iq4_nl_r4 * y) {
static void repack_iq4_nl(int nrows, int n_per_row, const block_iq4_nl * x, block_iq4_nl_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK4_NL == 0);
int nblock = n_per_row/QK4_NL;
@@ -3569,7 +3578,7 @@ size_t quantize_iq4_nl_r4(const float * src, void * dst, int64_t nrows, int64_t
char * qrow = (char *)dst;
for (int row = 0; row < nrows; row += 4) {
quantize_iq4_nl(src, qtmp.data(), 4, n_per_row, imatrix);
repack_iq4_nl(4, n_per_row, (const block_iq4_nl *)qtmp.data(), (block_iq4_nl_r4 *)qrow);
repack_iq4_nl(4, n_per_row, (const block_iq4_nl *)qtmp.data(), (block_iq4_nl_r4 *)qrow, false);
src += 4*n_per_row;
qrow += 4*row_size_nl;
}
@@ -3615,77 +3624,89 @@ void vec_dot_iq4_nl_r4_q8_0(int n, float * s, size_t bs, const void * vx, size_t
//
// ========================================= q4_0_r4
//
void quantize_row_q4_0_r4_ref(const float * x, block_iq4_nl_r4 * y, int64_t k) {
// we assume we are called with 4 rows
quantize_q4_0_r4(x, (void *)y, 4, k/4, nullptr);
void quantize_row_q4_0_r4_ref(const float * x, block_iq4_nl_r8 * y, int64_t k) {
// we assume we are called with 8 rows
quantize_q4_0_r4(x, (void *)y, 8, k/8, nullptr);
}
void quantize_row_q4_0_r4(const float * x, void * y, int64_t k) {
// we assume we are called with 4 rows
quantize_q4_0_r4(x, y, 4, k/4, nullptr);
// we assume we are called with 8 rows
quantize_q4_0_r4(x, y, 8, k/8, nullptr);
}
static void repack_q4_0(int nrows, int n_per_row, const block_q4_0 * x, block_iq4_nl_r4 * y) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK4_NL == 0);
int nblock = n_per_row/QK4_NL;
const block_q4_0 * x4[4];
for (int row = 0; row < nrows; row += 4) {
for (int k = 0; k < 4; ++k) x4[k] = x + nblock*k;
static void repack_q4_0(int nrows, int n_per_row, const block_q4_0 * x, block_iq4_nl_r8 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%8 == 0);
GGML_ASSERT(n_per_row%QK4_0 == 0);
int nblock = n_per_row/QK4_0;
const block_q4_0 * x8[8];
for (int row = 0; row < nrows; row += 8) {
for (int k = 0; k < 8; ++k) x8[k] = x + nblock*k;
for (int ib = 0; ib < nblock; ++ib) {
//for (int k = 0; k < 4; ++k) y[ib].d[k] = x4[k][ib].d;
//for (int k = 0; k < 4; ++k) for (int i = 0; i < 4; ++i) {
// y[ib].qs[4*k+i+ 0] = (x4[k][ib].qs[i+0] & 0xf) | ((x4[k][ib].qs[i+ 8] & 0x0f) << 4); // 0....3 + 8...11 from each row
// y[ib].qs[4*k+i+16] = (x4[k][ib].qs[i+0] >> 4) | ((x4[k][ib].qs[i+ 8] & 0xf0)); // 16...19 + 24...27 from each row
// y[ib].qs[4*k+i+32] = (x4[k][ib].qs[i+4] & 0xf) | ((x4[k][ib].qs[i+12] & 0x0f) << 4); // 4....7 + 12...15 from each row
// y[ib].qs[4*k+i+48] = (x4[k][ib].qs[i+4] >> 4) | ((x4[k][ib].qs[i+12] & 0xf0)); // 20...23 + 28...31 from each row
//}
for (int k = 0; k < 4; ++k) {
y[ib].d[k] = x4[k][ib].d;
for (int k = 0; k < 8; ++k) {
y[ib].d[k] = x8[k][ib].d;
for (int l = 0; l < 4; ++l) {
// l = 0 -> 0, 8 with shift 0 -> 4*(l/2), 4*(l/2)+8 with shift 4*(l%2)
// l = 1 -> 0, 8 with shift 4
// l = 2 -> 4, 12 with shift 0
// l = 3 -> 4, 12 with shift 4
for (int i = 0; i < 4; ++i) {
y[ib].qs[4*k+i+16*l] = ((x4[k][ib].qs[i+4*(l/2)] >> 4*(l%2)) & 0xf) | (((x4[k][ib].qs[i+4*(l/2)+8] >> 4*(l%2)) & 0xf) << 4);
y[ib].qs[32*l+4*k+i] = x8[k][ib].qs[4*l + i];
}
}
}
#ifdef __ARM_NEON
if (online) {
for (int l = 0; l < 8; ++l) {
auto v = vld1q_u8(y[ib].qs + 16*l);
vst1q_u8(y[ib].qs + 16*l, veorq_u8(v, vdupq_n_u8(0x88)));
}
}
#endif
}
x += 4*nblock;
x += 8*nblock;
y += nblock;
}
}
#ifdef __ARM_NEON
static void modify_q4_0_r4(int64_t k, char * cy) {
auto y = (block_iq4_nl_r8 *)cy;
int nb = k/(32*8);
for (int ib = 0; ib < nb; ++ib) {
auto v1 = vld1q_u8_x4(y[ib].qs);
auto v2 = vld1q_u8_x4(y[ib].qs+64);
for (int j = 0; j < 4; ++j) {
v1.val[j] = veorq_u8(v1.val[j], vdupq_n_u8(0x88));
v2.val[j] = veorq_u8(v2.val[j], vdupq_n_u8(0x88));
}
vst1q_u8_x4(y[ib].qs+ 0, v1);
vst1q_u8_x4(y[ib].qs+64, v2);
}
}
#endif
size_t quantize_q4_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(nrows%8 == 0);
auto row_size_nl = ggml_row_size(GGML_TYPE_IQ4_NL, n_per_row);
std::vector<char> qtmp(4*row_size_nl);
std::vector<char> qtmp(8*row_size_nl);
char * qrow = (char *)dst;
for (int row = 0; row < nrows; row += 4) {
quantize_q4_0(src, qtmp.data(), 4, n_per_row, imatrix);
repack_iq4_nl(4, n_per_row, (const block_iq4_nl *)qtmp.data(), (block_iq4_nl_r4 *)qrow);
src += 4*n_per_row;
qrow += 4*row_size_nl;
for (int row = 0; row < nrows; row += 8) {
quantize_q4_0(src, qtmp.data(), 8, n_per_row, imatrix);
repack_q4_0(8, n_per_row, (const block_q4_0 *)qtmp.data(), (block_iq4_nl_r8 *)qrow, false);
src += 8*n_per_row;
qrow += 8*row_size_nl;
}
return nrows*row_size_nl;
}
void dequantize_row_q4_0_r4(const block_iq4_nl_r4 * x, float * y, int64_t k) {
// we assume we are called with 4 rows
int n_per_row = k/4;
void dequantize_row_q4_0_r4(const block_iq4_nl_r8 * x, float * y, int64_t k) {
// we assume we are called with 8 rows
int n_per_row = k/8;
int nb = n_per_row/QK4_0;
float * yk[4];
for (int k = 0; k < 4; ++k) yk[k] = y + k*n_per_row;
float * yk[8];
for (int k = 0; k < 8; ++k) yk[k] = y + k*n_per_row;
for (int ib = 0; ib < nb; ++ib) {
for (int k = 0; k < 4; ++k) {
for (int k = 0; k < 8; ++k) {
float scale = GGML_FP16_TO_FP32(x[ib].d[k]);
for (int l = 0; l < 4; ++l) {
int ll = 16*(l%2) + 4*(l/2);
for (int i = 0; i < 4; ++i) {
yk[k][QK4_0*ib+i+ll+0] = scale * ((x[ib].qs[4*k+i+16*l] & 0xf) - 8);
yk[k][QK4_0*ib+i+ll+8] = scale * ((x[ib].qs[4*k+i+16*l] >> 4) - 8);
yk[k][QK4_0*ib+4*l+i+ 0] = scale * ((x[ib].qs[32*l+4*k+i] & 0xf) - 8);
yk[k][QK4_0*ib+4*l+i+16] = scale * ((x[ib].qs[32*l+4*k+i] >> 4) - 8);
}
}
}
@@ -3719,7 +3740,7 @@ void quantize_row_q8_0_r4(const float * x, void * y, int64_t k) {
quantize_q8_0_r4(x, y, 8, k/8, nullptr);
}
static void repack_q8_0(int nrows, int n_per_row, const block_q8_0 * x, block_q8_0_r8 * y) {
static void repack_q8_0(int nrows, int n_per_row, const block_q8_0 * x, block_q8_0_r8 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%8 == 0);
GGML_ASSERT(n_per_row%QK8_0 == 0);
int nblock = n_per_row/QK8_0;
@@ -3734,12 +3755,33 @@ static void repack_q8_0(int nrows, int n_per_row, const block_q8_0 * x, block_q8
y[ib].qs[32*l+4*k+i+128] = x8[k][ib].qs[i+4*l+16];
}
}
#ifdef HAVE_FANCY_SIMD
if (online) {
for (int l = 0; l < 4; ++l) {
auto v = _mm512_add_epi8(_mm512_loadu_si512((const __m512i *)y[ib].qs + l), _mm512_set1_epi8(127));
_mm512_storeu_si512((__m512i *)y[ib].qs + l, v);
}
}
#endif
}
x += 8*nblock;
y += nblock;
}
}
#ifdef HAVE_FANCY_SIMD
static void modify_q8_0_r4(int64_t k, char * cy) {
auto y = (block_iq4_nl_r8 *)cy;
int nb = k/(32*8);
for (int ib = 0; ib < nb; ++ib) {
for (int l = 0; l < 4; ++l) {
auto v = _mm512_add_epi8(_mm512_loadu_si512((const __m512i *)y[ib].qs + l), _mm512_set1_epi8(127));
_mm512_storeu_si512((__m512i *)y[ib].qs + l, v);
}
}
}
#endif
size_t quantize_q8_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
GGML_ASSERT(nrows%8 == 0);
auto row_size_0 = ggml_row_size(GGML_TYPE_Q8_0, n_per_row);
@@ -3747,7 +3789,7 @@ size_t quantize_q8_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_
char * qrow = (char *)dst;
for (int row = 0; row < nrows; row += 8) {
quantize_q8_0(src, qtmp.data(), 8, n_per_row, imatrix);
repack_q8_0(8, n_per_row, (const block_q8_0 *)qtmp.data(), (block_q8_0_r8 *)qrow);
repack_q8_0(8, n_per_row, (const block_q8_0 *)qtmp.data(), (block_q8_0_r8 *)qrow, false);
src += 8*n_per_row;
qrow += 8*row_size_0;
}
@@ -3810,7 +3852,7 @@ static inline void convert_q5_0(const block_q5_0& x, uint8_t * L) {
}
}
static void repack_q5_0(int nrows, int n_per_row, const block_q5_0 * x, block_q5_0_r4 * y) {
static void repack_q5_0(int nrows, int n_per_row, const block_q5_0 * x, block_q5_0_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK5_0 == 0);
int nblock = n_per_row/QK5_0;
@@ -3844,7 +3886,7 @@ size_t quantize_q5_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_
char * qrow = (char *)dst;
for (int row = 0; row < nrows; row += 4) {
quantize_q5_0(src, qtmp.data(), 4, n_per_row, imatrix);
repack_q5_0(4, n_per_row, (const block_q5_0 *)qtmp.data(), (block_q5_0_r4 *)qrow);
repack_q5_0(4, n_per_row, (const block_q5_0 *)qtmp.data(), (block_q5_0_r4 *)qrow, false);
src += 4*n_per_row;
qrow += 4*row_size_0;
}
@@ -3907,7 +3949,7 @@ static inline void convert_q6_0(const block_q6_0& x, uint8_t * L) {
}
}
static void repack_q6_0(int nrows, int n_per_row, const block_q6_0 * x, block_q6_0_r4 * y) {
static void repack_q6_0(int nrows, int n_per_row, const block_q6_0 * x, block_q6_0_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK5_0 == 0);
int nblock = n_per_row/QK6_0;
@@ -3941,7 +3983,7 @@ size_t quantize_q6_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_
char * qrow = (char *)dst;
for (int row = 0; row < nrows; row += 4) {
quantize_q6_0(src, qtmp.data(), 4, n_per_row, imatrix);
repack_q6_0(4, n_per_row, (const block_q6_0 *)qtmp.data(), (block_q6_0_r4 *)qrow);
repack_q6_0(4, n_per_row, (const block_q6_0 *)qtmp.data(), (block_q6_0_r4 *)qrow, false);
src += 4*n_per_row;
qrow += 4*row_size_0;
}
@@ -3994,7 +4036,7 @@ void quantize_row_iq4_xs_r4(const float * x, void * y, int64_t k) {
quantize_iq4_xs_r4(x, y, 8, k/8, nullptr);
}
static void repack_iq4_xs(int nrows, int n_per_row, const block_iq4_xs * x, block_iq4_xs_r4 * y) {
static void repack_iq4_xs(int nrows, int n_per_row, const block_iq4_xs * x, block_iq4_xs_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%8 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -4034,7 +4076,7 @@ size_t quantize_iq4_xs_r4(const float * src, void * dst, int64_t nrows, int64_t
std::vector<char> qtmp(8*row_size);
for (int row = 0; row < nrows; row += 8) {
quantize_iq4_xs(src, (void *)qtmp.data(), 8, n_per_row, imatrix);
repack_iq4_xs(8, n_per_row, (const block_iq4_xs *)qtmp.data(), (block_iq4_xs_r4 *)qcur);
repack_iq4_xs(8, n_per_row, (const block_iq4_xs *)qtmp.data(), (block_iq4_xs_r4 *)qcur, false);
qcur += 8*row_size;
src += 8*n_per_row;
}
@@ -4086,7 +4128,7 @@ void quantize_row_iq4_ks_r4(const float * x, void * y, int64_t k) {
quantize_iq4_ks_r4(x, y, 4, k/4, nullptr);
}
static void repack_iq4_ks(int nrows, int n_per_row, const block_iq4_ks * x, block_iq4_ks_r4 * y) {
static void repack_iq4_ks(int nrows, int n_per_row, const block_iq4_ks * x, block_iq4_ks_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
auto row_size = ggml_row_size(GGML_TYPE_IQ4_KS, n_per_row);
@@ -4128,7 +4170,7 @@ size_t quantize_iq4_ks_r4(const float * src, void * dst, int64_t nrows, int64_t
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq4_ks(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq4_ks(4, n_per_row, (const block_iq4_ks *)qtmp.data(), (block_iq4_ks_r4 *)qcur);
repack_iq4_ks(4, n_per_row, (const block_iq4_ks *)qtmp.data(), (block_iq4_ks_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -4187,7 +4229,7 @@ void quantize_row_iq2_bn_r4(const float * x, void * y, int64_t k) {
}
namespace {
void repack_iq2_bn(int nrows, int n_per_row, const char * x, char * y) {
void repack_iq2_bn(int nrows, int n_per_row, const char * x, char * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_IQ1BN == 0);
int nblock = n_per_row/QK_IQ1BN;
@@ -4256,7 +4298,7 @@ size_t quantize_iq2_bn_r4(const float * src, void * dst, int64_t nrows, int64_t
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq2_bn(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq2_bn(4, n_per_row, qtmp.data(), qcur);
repack_iq2_bn(4, n_per_row, qtmp.data(), qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -4330,7 +4372,7 @@ inline void convert_q4_k(const block_q4_K& x, uint8_t * L, uint8_t * Ld, uint8_t
}
}
static void repack_q4_k(int nrows, int n_per_row, const block_q4_K * x, block_q4_k_r4 * y) {
static void repack_q4_k(int nrows, int n_per_row, const block_q4_K * x, block_q4_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -4371,7 +4413,7 @@ size_t quantize_q4_k_r4(const float * src, void * dst, int64_t nrows, int64_t n_
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_q4_K(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_q4_k(4, n_per_row, (const block_q4_K *)qtmp.data(), (block_q4_k_r4 *)qcur);
repack_q4_k(4, n_per_row, (const block_q4_K *)qtmp.data(), (block_q4_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -4448,7 +4490,7 @@ inline void convert_q6_k(const block_q6_K& x, uint8_t * L) {
}
}
static void repack_q6_k(int nrows, int n_per_row, const block_q6_K * x, block_q6_k_r4 * y) {
static void repack_q6_k(int nrows, int n_per_row, const block_q6_K * x, block_q6_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -4487,7 +4529,7 @@ size_t quantize_q6_k_r4(const float * src, void * dst, int64_t nrows, int64_t n_
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_q6_K(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_q6_k(4, n_per_row, (const block_q6_K *)qtmp.data(), (block_q6_k_r4 *)qcur);
repack_q6_k(4, n_per_row, (const block_q6_K *)qtmp.data(), (block_q6_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -4562,7 +4604,7 @@ inline void convert_q5_k(const block_q5_K& x, uint8_t * L, uint8_t * Ld, uint8_t
}
}
static void repack_q5_k(int nrows, int n_per_row, const block_q5_K * x, block_q5_k_r4 * y) {
static void repack_q5_k(int nrows, int n_per_row, const block_q5_K * x, block_q5_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -4605,7 +4647,7 @@ size_t quantize_q5_k_r4(const float * src, void * dst, int64_t nrows, int64_t n_
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_q5_K(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_q5_k(4, n_per_row, (const block_q5_K *)qtmp.data(), (block_q5_k_r4 *)qcur);
repack_q5_k(4, n_per_row, (const block_q5_K *)qtmp.data(), (block_q5_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -4698,7 +4740,7 @@ inline void convert_q3_k(const block_q3_K& x, uint8_t * L, uint8_t * Ld) {
}
}
static void repack_q3_k(int nrows, int n_per_row, const block_q3_K * x, block_q3_k_r4 * y) {
static void repack_q3_k(int nrows, int n_per_row, const block_q3_K * x, block_q3_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -4741,7 +4783,7 @@ size_t quantize_q3_k_r4(const float * src, void * dst, int64_t nrows, int64_t n_
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_q3_K(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_q3_k(4, n_per_row, (const block_q3_K *)qtmp.data(), (block_q3_k_r4 *)qcur);
repack_q3_k(4, n_per_row, (const block_q3_K *)qtmp.data(), (block_q3_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -4820,7 +4862,7 @@ inline void convert_q2_k(const block_q2_K& x, uint8_t * L) {
}
}
static void repack_q2_k(int nrows, int n_per_row, const block_q2_K * x, block_q2_k_r4 * y) {
static void repack_q2_k(int nrows, int n_per_row, const block_q2_K * x, block_q2_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -4857,7 +4899,7 @@ size_t quantize_q2_k_r4(const float * src, void * dst, int64_t nrows, int64_t n_
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_q2_K(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_q2_k(4, n_per_row, (const block_q2_K *)qtmp.data(), (block_q2_k_r4 *)qcur);
repack_q2_k(4, n_per_row, (const block_q2_K *)qtmp.data(), (block_q2_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -4919,7 +4961,7 @@ void quantize_row_iq4_k_r4(const float * x, void * y, int64_t k) {
quantize_iq4_k_r4(x, y, 4, k/4, nullptr);
}
static void repack_iq4_k(int nrows, int n_per_row, const block_iq4_k * x, block_iq4_k_r4 * y) {
static void repack_iq4_k(int nrows, int n_per_row, const block_iq4_k * x, block_iq4_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -4972,7 +5014,7 @@ size_t quantize_iq4_k_r4(const float * src, void * dst, int64_t nrows, int64_t n
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq4_k(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq4_k(4, n_per_row, (const block_iq4_k *)qtmp.data(), (block_iq4_k_r4 *)qcur);
repack_iq4_k(4, n_per_row, (const block_iq4_k *)qtmp.data(), (block_iq4_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -5053,7 +5095,7 @@ inline void convert_iq5_k(const block_iq5_k& x, uint8_t * L) {
}
}
static void repack_iq5_k(int nrows, int n_per_row, const block_iq5_k * x, block_iq5_k_r4 * y) {
static void repack_iq5_k(int nrows, int n_per_row, const block_iq5_k * x, block_iq5_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5108,7 +5150,7 @@ size_t quantize_iq5_k_r4(const float * src, void * dst, int64_t nrows, int64_t n
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq5_k(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq5_k(4, n_per_row, (const block_iq5_k *)qtmp.data(), (block_iq5_k_r4 *)qcur);
repack_iq5_k(4, n_per_row, (const block_iq5_k *)qtmp.data(), (block_iq5_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -5169,7 +5211,7 @@ void quantize_row_q8_k_r8(const float * x, void * y, int64_t k) {
quantize_q8_k_r8(x, y, 8, k/8, nullptr);
}
static void repack_q8_k(int nrows, int n_per_row, const block_q8_K * x, block_q8_k_r8 * y) {
static void repack_q8_k(int nrows, int n_per_row, const block_q8_K * x, block_q8_k_r8 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%8 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5183,11 +5225,31 @@ static void repack_q8_k(int nrows, int n_per_row, const block_q8_K * x, block_q8
for (int i = 0; i < 4; ++i) y[ibl].qs[32*ib + 4*k + i] = x8[k][ibl].qs[4*ib+i];
}
}
#ifdef HAVE_FANCY_SIMD
if (online) {
for (int l = 0; l < 32; ++l) {
auto v = _mm512_xor_si512(_mm512_loadu_si512((const __m512i *)y[ibl].qs + l), _mm512_set1_epi8(-128));
_mm512_storeu_si512((__m512i *)y[ibl].qs + l, v);
}
}
#endif
}
x += 8*nblock;
y += nblock;
}
}
#ifdef HAVE_FANCY_SIMD
static void modify_q8_k_r8(int64_t k, char * cy) {
auto y = (block_q8_k_r8 *)cy;
int nb = k/(256*8);
for (int ib = 0; ib < nb; ++ib) {
for (int l = 0; l < 32; ++l) {
auto v = _mm512_xor_si512(_mm512_loadu_si512((const __m512i *)y[ib].qs + l), _mm512_set1_epi8(-128));
_mm512_storeu_si512((__m512i *)y[ib].qs + l, v);
}
}
}
#endif
size_t quantize_q8_k_r8(const float * src, void * dst, int64_t nrows, int64_t n_per_row, [[maybe_unused]] const float * imatrix) {
GGML_ASSERT(nrows%8 == 0);
@@ -5198,7 +5260,7 @@ size_t quantize_q8_k_r8(const float * src, void * dst, int64_t nrows, int64_t n_
std::vector<char> qtmp(8*row_size_0);
for (int row = 0; row < nrows; row += 8) {
quantize_row_q8_K32(src, (void *)qtmp.data(), 8*n_per_row);
repack_q8_k(8, n_per_row, (const block_q8_K *)qtmp.data(), (block_q8_k_r8 *)qcur);
repack_q8_k(8, n_per_row, (const block_q8_K *)qtmp.data(), (block_q8_k_r8 *)qcur, false);
qcur += 8*row_size_1;
src += 8*n_per_row;
}
@@ -5247,7 +5309,7 @@ inline ggml_bf16_t to_bf16(const float& x) {
inline ggml_bf16_t to_bf16(const ggml_half& x) { return to_bf16(GGML_FP16_TO_FP32(x)); }
inline ggml_bf16_t to_bf16(const ggml_bf16_t& x) { return x; }
template <typename T>
void repack_bf16(int nrows, int n_per_row, const T * x, ggml_bf16_t * y) {
void repack_bf16(int nrows, int n_per_row, const T * x, ggml_bf16_t * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%16 == 0);
GGML_ASSERT(n_per_row%2 == 0);
for (int row = 0; row < nrows; row += 16) {
@@ -5265,11 +5327,11 @@ void repack_bf16(int nrows, int n_per_row, const T * x, ggml_bf16_t * y) {
}
void repack_f32_bf16_r16(const void * src, void * dst, int64_t nrows, int64_t n_per_row) {
repack_bf16(nrows, n_per_row, (const float *)src, (ggml_bf16_t *)dst);
repack_bf16(nrows, n_per_row, (const float *)src, (ggml_bf16_t *)dst, false);
}
void repack_bf16_bf16_r16(const void * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row) {
repack_bf16(nrows, n_per_row, (const ggml_bf16_t *)src, (ggml_bf16_t *)dst);
repack_bf16(nrows, n_per_row, (const ggml_bf16_t *)src, (ggml_bf16_t *)dst, false);
}
//
@@ -5301,7 +5363,7 @@ inline void convert_iq3_k(const block_iq3_k& x, uint8_t * L) {
}
}
static void repack_iq3_k(int nrows, int n_per_row, const block_iq3_k * x, block_iq3_k_r4 * y) {
static void repack_iq3_k(int nrows, int n_per_row, const block_iq3_k * x, block_iq3_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5355,7 +5417,7 @@ size_t quantize_iq3_k_r4(const float * src, void * dst, int64_t nrows, int64_t n
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq3_k(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq3_k(4, n_per_row, (const block_iq3_k *)qtmp.data(), (block_iq3_k_r4 *)qcur);
repack_iq3_k(4, n_per_row, (const block_iq3_k *)qtmp.data(), (block_iq3_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -5435,7 +5497,7 @@ inline void convert_iq2_k(const block_iq2_k& x, uint8_t * L) {
}
}
static void repack_iq2_k(int nrows, int n_per_row, const block_iq2_k * x, block_iq2_k_r4 * y) {
static void repack_iq2_k(int nrows, int n_per_row, const block_iq2_k * x, block_iq2_k_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5480,7 +5542,7 @@ size_t quantize_iq2_k_r4(const float * src, void * dst, int64_t nrows, int64_t n
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq2_k(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq2_k(4, n_per_row, (const block_iq2_k *)qtmp.data(), (block_iq2_k_r4 *)qcur);
repack_iq2_k(4, n_per_row, (const block_iq2_k *)qtmp.data(), (block_iq2_k_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -5531,15 +5593,6 @@ void vec_dot_iq2_k_r4_q8_k(int n, float * s, size_t bs, const void * vx, size_t
GGML_UNUSED(by);
}
namespace {
struct Repack {
using repack_func = void (*) (int nrows, int n_per_row, const char * src, char * dst);
ggml_type new_type;
int num_rows;
repack_func repack;
};
}
namespace {
inline uint8_t scrambled_sign(uint8_t s) {
static const uint8_t k_table[128] = {
@@ -5568,7 +5621,7 @@ void quantize_row_iq2_xxs_r4(const float * x, void * y, int64_t k) {
quantize_iq2_xxs_r4(x, y, 4, k/4, nullptr);
}
static void repack_iq2_xxs(int nrows, int n_per_row, const block_iq2_xxs * x, block_iq2_xxs_r4 * y) {
static void repack_iq2_xxs(int nrows, int n_per_row, const block_iq2_xxs * x, block_iq2_xxs_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5609,7 +5662,7 @@ size_t quantize_iq2_xxs_r4(const float * src, void * dst, int64_t nrows, int64_t
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq2_xxs(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq2_xxs(4, n_per_row, (const block_iq2_xxs *)qtmp.data(), (block_iq2_xxs_r4 *)qcur);
repack_iq2_xxs(4, n_per_row, (const block_iq2_xxs *)qtmp.data(), (block_iq2_xxs_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -5668,7 +5721,7 @@ void quantize_row_iq2_xs_r4(const float * x, void * y, int64_t k) {
quantize_iq2_xs_r4(x, y, 4, k/4, nullptr);
}
static void repack_iq2_xs(int nrows, int n_per_row, const block_iq2_xs * x, block_iq2_xs_r4 * y) {
static void repack_iq2_xs(int nrows, int n_per_row, const block_iq2_xs * x, block_iq2_xs_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5701,7 +5754,7 @@ size_t quantize_iq2_xs_r4(const float * src, void * dst, int64_t nrows, int64_t
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq2_xs(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq2_xs(4, n_per_row, (const block_iq2_xs *)qtmp.data(), (block_iq2_xs_r4 *)qcur);
repack_iq2_xs(4, n_per_row, (const block_iq2_xs *)qtmp.data(), (block_iq2_xs_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -5755,7 +5808,7 @@ void quantize_row_iq2_s_r4(const float * x, void * y, int64_t k) {
quantize_iq2_s_r4(x, y, 4, k/4, nullptr);
}
static void repack_iq2_s(int nrows, int n_per_row, const block_iq2_s * x, block_iq2_s_r4 * y) {
static void repack_iq2_s(int nrows, int n_per_row, const block_iq2_s * x, block_iq2_s_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5789,7 +5842,7 @@ size_t quantize_iq2_s_r4(const float * src, void * dst, int64_t nrows, int64_t n
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq2_s(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq2_s(4, n_per_row, (const block_iq2_s *)qtmp.data(), (block_iq2_s_r4 *)qcur);
repack_iq2_s(4, n_per_row, (const block_iq2_s *)qtmp.data(), (block_iq2_s_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -5845,7 +5898,7 @@ void quantize_row_iq3_xxs_r4(const float * x, void * y, int64_t k) {
namespace {
}
static void repack_iq3_xxs(int nrows, int n_per_row, const block_iq3_xxs * x, block_iq3_xxs_r4 * y) {
static void repack_iq3_xxs(int nrows, int n_per_row, const block_iq3_xxs * x, block_iq3_xxs_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5886,7 +5939,7 @@ size_t quantize_iq3_xxs_r4(const float * src, void * dst, int64_t nrows, int64_t
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq3_xxs(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq3_xxs(4, n_per_row, (const block_iq3_xxs *)qtmp.data(), (block_iq3_xxs_r4 *)qcur);
repack_iq3_xxs(4, n_per_row, (const block_iq3_xxs *)qtmp.data(), (block_iq3_xxs_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -5945,7 +5998,7 @@ void quantize_row_iq3_s_r4(const float * x, void * y, int64_t k) {
quantize_iq3_s_r4(x, y, 4, k/4, nullptr);
}
static void repack_iq3_s(int nrows, int n_per_row, const block_iq3_s * x, block_iq3_s_r4 * y) {
static void repack_iq3_s(int nrows, int n_per_row, const block_iq3_s * x, block_iq3_s_r4 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%4 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -5991,7 +6044,7 @@ size_t quantize_iq3_s_r4(const float * src, void * dst, int64_t nrows, int64_t n
std::vector<char> qtmp(4*row_size);
for (int row = 0; row < nrows; row += 4) {
quantize_iq3_s(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
repack_iq3_s(4, n_per_row, (const block_iq3_s *)qtmp.data(), (block_iq3_s_r4 *)qcur);
repack_iq3_s(4, n_per_row, (const block_iq3_s *)qtmp.data(), (block_iq3_s_r4 *)qcur, false);
qcur += 4*row_size;
src += 4*n_per_row;
}
@@ -6036,6 +6089,56 @@ void vec_dot_iq3_s_r4_q8_k(int n, float * s, size_t bs, const void * vx, size_t
//================================================
namespace {
struct Repack {
using repack_func = void (*) (int nrows, int n_per_row, const char * src, char * dst, bool online);
ggml_type new_type;
int num_rows;
repack_func repack;
};
struct Modify {
using modify_func_t = void (*)(int64_t k, char * src_dst);
modify_func_t mod_func;
int nrows;
};
}
bool iqk_modify_tensor(struct ggml_tensor * tensor) {
static const std::unordered_map<ggml_type, Modify> k_mod_map = {
#ifdef __ARM_NEON
{ GGML_TYPE_Q4_0_R4, {modify_q4_0_r4, 8} },
#endif
#ifdef HAVE_FANCY_SIMD
{ GGML_TYPE_Q8_0_R4, {modify_q8_0_r4, 8} },
{ GGML_TYPE_Q8_K_R8, {modify_q8_k_r8, 8} },
#endif
};
auto it = k_mod_map.find(tensor->type);
if (it == k_mod_map.end()) return false;
auto& m = it->second;
int nrows = ggml_nrows(tensor);
int nchunks = nrows/m.nrows;
int max_thread = std::max(1, int(std::thread::hardware_concurrency()/2));
int nthread = std::min(nchunks, max_thread);
auto row_size = ggml_row_size(tensor->type, tensor->ne[0]);
std::atomic<int> counter(0);
auto compute = [&counter, &m, tensor, row_size, nchunks] () {
int64_t n_per_call = m.nrows*tensor->ne[0];
while (true) {
int row = counter.fetch_add(1);
if (row >= nchunks) break;
m.mod_func(n_per_call, (char *)tensor->data + row_size*row*m.nrows);
}
};
std::vector<std::thread> workers(nthread-1);
for (auto& w : workers) w = std::thread(compute);
compute();
for (auto& w : workers) w.join();
return true;
}
void iqk_repack_tensor(struct ggml_tensor * tensor) {
constexpr int kChunk = 8;
if (!tensor) return;
@@ -6061,7 +6164,7 @@ void iqk_repack_tensor(struct ggml_tensor * tensor) {
{ GGML_TYPE_Q4_K, { GGML_TYPE_Q4_K_R4, 4, (Repack::repack_func)repack_q4_k} },
{ GGML_TYPE_Q5_K, { GGML_TYPE_Q5_K_R4, 4, (Repack::repack_func)repack_q5_k} },
{ GGML_TYPE_Q6_K, { GGML_TYPE_Q6_K_R4, 4, (Repack::repack_func)repack_q6_k} },
{ GGML_TYPE_Q4_0, { GGML_TYPE_Q4_0_R4, 4, (Repack::repack_func)repack_q4_0} },
{ GGML_TYPE_Q4_0, { GGML_TYPE_Q4_0_R4, 8, (Repack::repack_func)repack_q4_0} },
{ GGML_TYPE_Q5_0, { GGML_TYPE_Q5_0_R4, 4, (Repack::repack_func)repack_q5_0} },
{ GGML_TYPE_Q6_0, { GGML_TYPE_Q6_0_R4, 4, (Repack::repack_func)repack_q6_0} },
{ GGML_TYPE_Q8_0, { GGML_TYPE_Q8_0_R4, 8, (Repack::repack_func)repack_q8_0} },
@@ -6099,7 +6202,7 @@ void iqk_repack_tensor(struct ggml_tensor * tensor) {
int last_row = std::min(first_row + chunkSize*r.num_rows, nrows);
for (int row = first_row; row < last_row; row += r.num_rows) {
std::memcpy(qtmp.data(), data + row*row_size, r.num_rows*row_size);
r.repack(r.num_rows, n_per_row, qtmp.data(), data + row*row_size);
r.repack(r.num_rows, n_per_row, qtmp.data(), data + row*row_size, true);
}
}
};

View File

@@ -67,10 +67,10 @@ size_t quantize_iq4_nl_r4(const float * GGML_RESTRICT src, void * GGML_RESTRICT
void dequantize_row_iq4_nl_r4(const block_iq4_nl_r4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void vec_dot_iq4_nl_r4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
void quantize_row_q4_0_r4_ref(const float * GGML_RESTRICT x, block_iq4_nl_r4 * GGML_RESTRICT y, int64_t k);
void quantize_row_q4_0_r4_ref(const float * GGML_RESTRICT x, block_iq4_nl_r8 * GGML_RESTRICT y, int64_t k);
void quantize_row_q4_0_r4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
size_t quantize_q4_0_r4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
void dequantize_row_q4_0_r4(const block_iq4_nl_r4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void dequantize_row_q4_0_r4(const block_iq4_nl_r8 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void vec_dot_q4_0_r4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
void quantize_row_q8_0_r4_ref(const float * GGML_RESTRICT x, block_q8_0_r8 * GGML_RESTRICT y, int64_t k);
@@ -218,6 +218,7 @@ void repack_f32_bf16_r16 (const void * GGML_RESTRICT src, void * GGML_RESTRICT d
void repack_bf16_bf16_r16(const void * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row);
void iqk_repack_tensor(struct ggml_tensor * tensor);
bool iqk_modify_tensor(struct ggml_tensor * tensor);
// So we can re-pack Microsoft's BitNet I2_S quants
void dequantize_row_ms_i2s(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);