mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-04-22 15:39:23 +00:00
iq2_ks
200 t/s -> 367 t/s. There is no iq2_ks_r4.
This commit is contained in:
@@ -2106,37 +2106,68 @@ inline float convert_to_q8_k_r8(int k, float d0, const __m256i * qx, const int16
|
||||
return dnew;
|
||||
}
|
||||
|
||||
//struct DequantizerIQ2K final : public BaseDequantizer<block_iq2_k> {
|
||||
// DequantizerIQ2K(const void * vx, size_t bx) : BaseDequantizer(vx, bx), iqxk(5, -32), values(load_values()) {}
|
||||
// template <typename Q8>
|
||||
// inline void new_block(int i, const Q8& q8, __m256 * accm, __m256i * scales) {
|
||||
// d = GGML_FP16_TO_FP32(x[i].d);
|
||||
// iqxk.process(i, d, x[i].extra, make_scales(x[i].scales), q8, accm, scales);
|
||||
// }
|
||||
// inline void prepare(int i, int j) {
|
||||
// bits.prepare(x[i].qs, j);
|
||||
// bits.values[0] = _mm256_shuffle_epi8(values, bits.values[0]);
|
||||
// bits.values[1] = _mm256_shuffle_epi8(values, bits.values[1]);
|
||||
// bits.values[2] = _mm256_shuffle_epi8(values, bits.values[2]);
|
||||
// bits.values[3] = _mm256_shuffle_epi8(values, bits.values[3]);
|
||||
// }
|
||||
// static inline __m256i load_values() {
|
||||
// static const uint8_t kvalues_iq2nl[16] = {1, 19, 33, 49, 0, 0, 0, 0, 6, 24, 38, 54, 0, 0, 0, 0};
|
||||
// auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq2nl);
|
||||
// return MM256_SET_M128I(val128, val128);
|
||||
// }
|
||||
// inline __m128i make_scales(const uint8_t * scales_l) const {
|
||||
// uint64_t aux64; std::memcpy(&aux64, scales_l, 8);
|
||||
// auto scl = _mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), maskl);
|
||||
// return _mm_add_epi8(scl, m8);
|
||||
// }
|
||||
//
|
||||
// Q2Bits bits;
|
||||
// const IQXKScales iqxk;
|
||||
// const __m256i values;
|
||||
// const __m128i m8 = _mm_set1_epi8(-8);
|
||||
// const __m128i maskl = _mm_set1_epi8(0xf);
|
||||
//};
|
||||
void iqk_convert_iq2_ks_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
|
||||
GGML_ASSERT(n%QK_K == 0);
|
||||
GGML_ASSERT(nrc_x%8 == 0);
|
||||
|
||||
int nb = n/QK_K;
|
||||
|
||||
const block_iq2_ks * x8[8];
|
||||
|
||||
block_q8_k_r8 * y = (block_q8_k_r8 *)vy;
|
||||
|
||||
__m256i values;
|
||||
{
|
||||
auto v = _mm_loadl_epi64((const __m128i *)iq2nl_values);
|
||||
values = MM256_SET_M128I(v, v);
|
||||
}
|
||||
|
||||
ggml_half dh[8];
|
||||
float dnew[8];
|
||||
uint32_t block[8];
|
||||
int16_t ls[16];
|
||||
|
||||
__m256i xv[8];
|
||||
|
||||
auto ml = _mm256_set1_epi8(0x03);
|
||||
|
||||
for (int ix = 0; ix < nrc_x; ix += 8) {
|
||||
for (int k = 0; k < 8; ++k) {
|
||||
const ggml_half * dptr = (const ggml_half *)((const char *)vx + (ix+k)*bx);
|
||||
dh[k] = dptr[0];
|
||||
x8[k] = (const block_iq2_ks *)(dptr + 1);
|
||||
}
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
for (int k = 0; k < 8; ++k) {
|
||||
auto extra = x8[k][i].extra;
|
||||
for (int i128 = 0; i128 < 2; ++i128) {
|
||||
ls[8*i128+0] = ls[8*i128+1] = ((x8[k][i].scales[2*i128+0] & 0xf) | ((extra >> 4) & 0x10)) - 16;
|
||||
ls[8*i128+2] = ls[8*i128+3] = ((x8[k][i].scales[2*i128+0] >> 4) | ((extra >> 5) & 0x10)) - 16;
|
||||
ls[8*i128+4] = ls[8*i128+5] = ((x8[k][i].scales[2*i128+1] & 0xf) | ((extra >> 6) & 0x10)) - 16;
|
||||
ls[8*i128+6] = ls[8*i128+7] = ((x8[k][i].scales[2*i128+1] >> 4) | ((extra >> 7) & 0x10)) - 16;
|
||||
auto bits = _mm256_loadu_si256((const __m256i *)x8[k][i].qs+i128);
|
||||
xv[4*i128+0] = _mm256_and_si256(bits, ml);
|
||||
xv[4*i128+1] = _mm256_and_si256(_mm256_srli_epi16(bits, 2), ml);
|
||||
xv[4*i128+2] = _mm256_and_si256(_mm256_srli_epi16(bits, 4), ml);
|
||||
xv[4*i128+3] = _mm256_and_si256(_mm256_srli_epi16(bits, 6), ml);
|
||||
xv[4*i128+0] = _mm256_add_epi8(xv[4*i128+0], _mm256_set1_epi8((extra << 2) & 0x04));
|
||||
xv[4*i128+1] = _mm256_add_epi8(xv[4*i128+1], _mm256_set1_epi8((extra << 1) & 0x04));
|
||||
xv[4*i128+2] = _mm256_add_epi8(xv[4*i128+2], _mm256_set1_epi8((extra >> 0) & 0x04));
|
||||
xv[4*i128+3] = _mm256_add_epi8(xv[4*i128+3], _mm256_set1_epi8((extra >> 1) & 0x04));
|
||||
xv[4*i128+0] = _mm256_shuffle_epi8(values, xv[4*i128+0]);
|
||||
xv[4*i128+1] = _mm256_shuffle_epi8(values, xv[4*i128+1]);
|
||||
xv[4*i128+2] = _mm256_shuffle_epi8(values, xv[4*i128+2]);
|
||||
xv[4*i128+3] = _mm256_shuffle_epi8(values, xv[4*i128+3]);
|
||||
extra >>= 4;
|
||||
}
|
||||
dnew[k] = convert_to_q8_k_r8(k, 1.f/125, xv, ls, block, y[i].qs);
|
||||
}
|
||||
auto vd = _mm256_mul_ps(_mm256_loadu_ps(dnew), _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)dh)));
|
||||
_mm_storeu_si128((__m128i *)y[i].d, _mm256_cvtps_ph(vd, _MM_ROUND_NEAREST));
|
||||
}
|
||||
y += nb;
|
||||
}
|
||||
}
|
||||
|
||||
void iqk_convert_iq2_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
|
||||
GGML_ASSERT(n%QK_K == 0);
|
||||
@@ -2697,6 +2728,7 @@ void iqk_convert_iq6_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int
|
||||
bool iqk_convert_iqk_quants_q80_r8(int type, int n, const void * vx, size_t bx, void * vy, int nrc_x) {
|
||||
if (n%QK_K != 0 || nrc_x%8 != 0) return false;
|
||||
switch (ggml_type(type)) {
|
||||
case GGML_TYPE_IQ2_KS : iqk_convert_iq2_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break;
|
||||
case GGML_TYPE_IQ2_K : iqk_convert_iq2_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break;
|
||||
case GGML_TYPE_IQ3_K : iqk_convert_iq3_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break;
|
||||
case GGML_TYPE_IQ4_KS : iqk_convert_iq4_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break;
|
||||
|
||||
@@ -250,6 +250,7 @@ struct MulMat {
|
||||
case GGML_TYPE_Q4_K : return nrc_y >= 32 ? GGML_TYPE_Q8_1 : type;
|
||||
case GGML_TYPE_Q5_K : return nrc_y >= 32 ? GGML_TYPE_Q8_1 : type;
|
||||
case GGML_TYPE_Q6_K : return nrc_y >= 64 ? GGML_TYPE_Q8_0_R8 : type;
|
||||
case GGML_TYPE_IQ2_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
|
||||
case GGML_TYPE_IQ2_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
|
||||
case GGML_TYPE_IQ3_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
|
||||
case GGML_TYPE_IQ4_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
|
||||
|
||||
Reference in New Issue
Block a user