diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp index 51013828..1183246b 100644 --- a/ggml/src/iqk/iqk_mul_mat.cpp +++ b/ggml/src/iqk/iqk_mul_mat.cpp @@ -542,6 +542,12 @@ struct SimpleBits { __m256i values[4]; }; +__m256i inline load_iq4nl_values_256() { + static const uint8_t kvalues_iq4nl[16] = {1, 24, 45, 63, 79, 93, 106, 118, 129, 141, 153, 166, 181, 197, 217, 241}; + auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq4nl); + return MM256_SET_M128I(val128, val128); +} + #ifdef HAVE_FANCY_SIMD //====================================== Zen4 ================================================== @@ -609,12 +615,6 @@ struct DequantizerQ4K final : public BaseDequantizer { Scales8K s8k; }; -__m256i inline load_iq4nl_values_256() { - static const uint8_t kvalues_iq4nl[16] = {1, 24, 45, 63, 79, 93, 106, 118, 129, 141, 153, 166, 181, 197, 217, 241}; - auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq4nl); - return MM256_SET_M128I(val128, val128); -} - __m512i inline load_iq4nl_values_512() { auto val256 = load_iq4nl_values_256(); return _mm512_inserti32x8(_mm512_castsi256_si512(val256), val256, 1); @@ -1422,14 +1422,8 @@ struct DequantizerQ4K final : public BaseDequantizer { Scales8K s8k; }; -__m256i load_iq4nl_values() { - static const uint8_t kvalues_iq4nl[16] = {1, 24, 45, 63, 79, 93, 106, 118, 129, 141, 153, 166, 181, 197, 217, 241}; - auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq4nl); - return MM256_SET_M128I(val128, val128); -} - struct DequantizerIQ4XS final : public BaseDequantizer { - DequantizerIQ4XS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values()) {} + DequantizerIQ4XS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_256()) {} template inline __m256i new_block(int i, const Q8& q8, __m256 * accd) { d = GGML_FP16_TO_FP32(x[i].d); @@ -1567,7 +1561,7 @@ struct DequantizerIQ3K final : public BaseDequantizer { }; struct DequantizerIQ4K final : public BaseDequantizer { - DequantizerIQ4K(const void * vx, size_t bx) : BaseDequantizer(vx, bx), iqxk(4, -128), values(load_iq4nl_values()) {} + DequantizerIQ4K(const void * vx, size_t bx) : BaseDequantizer(vx, bx), iqxk(4, -128), values(load_iq4nl_values_256()) {} template inline void new_block(int i, const Q8& q8, __m256 * accm, __m256i * scales) { d = GGML_FP16_TO_FP32(x[i].d); @@ -1784,12 +1778,9 @@ struct DequantizerQ6K final : public BaseDequantizer { const __m256i mh = _mm256_set1_epi8(0x30); }; -struct DequantizerIQ2TN final : public BaseDequantizer { +struct DequantizerIQ2TN final : public BaseDequantizer { DequantizerIQ2TN(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {} - inline void new_block(int i) { - d = GGML_FP16_TO_FP32(x[i].d); - } inline void prepare(int i, int j) { bits.prepare(x[i].qs, j); } @@ -1816,8 +1807,6 @@ IQK_NOINLINE void mul_mat_iq2tn_q8_K(int n, const void * vx, size_t bx, const Da for (int i = 0; i < nb; ++i) { - deq1.new_block(i); - if constexpr (nrc_y == 1) { deq1.prepare(i, 0); auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(deq1.bits.values[0], q8.load_quants(0, i, 0)),