From 1dd6c40c15e54cccea918a1465c08a21585acd29 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Tue, 8 Oct 2024 13:56:29 +0300 Subject: [PATCH] WIP + adding iq3_kl quantization mix --- examples/quantize/quantize.cpp | 1 + ggml/src/ggml-common.h | 4 ++-- ggml/src/ggml-cuda.cu | 1 + ggml/src/ggml-cuda/convert.cu | 35 +++++++++++++++++++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cu | 39 +++++++++++++++++++++++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cuh | 4 ++++ ggml/src/ggml-cuda/mmvq.cu | 3 +++ ggml/src/ggml-quants.c | 3 ++- ggml/src/iqk/iqk_quantize.cpp | 12 +++++----- include/llama.h | 1 + src/llama.cpp | 17 +++++++++++++- 11 files changed, 110 insertions(+), 10 deletions(-) diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 581f14d8..4c1cc398 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -46,6 +46,7 @@ static const std::vector QUANT_OPTIONS = { { "IQ4_XXS", LLAMA_FTYPE_MOSTLY_IQ4_XXS, " 4.06 bpw non-linear quantization", }, { "IQ2_K", LLAMA_FTYPE_MOSTLY_IQ2_K, " 2.375 bpw non-linear quantization",}, { "IQ3_K", LLAMA_FTYPE_MOSTLY_IQ3_K, " 3.44 bpw non-linear quantization", }, + { "IQ3_KL", LLAMA_FTYPE_MOSTLY_IQ3_KL, " 4 bpw non-linear quantization mix",}, { "IQ4_K", LLAMA_FTYPE_MOSTLY_IQ4_K, " 4.5 bpw non-linear quantization", }, { "IQ5_K", LLAMA_FTYPE_MOSTLY_IQ5_K, " 5.5 bpw non-linear quantization", }, { "IQ6_K", LLAMA_FTYPE_MOSTLY_IQ6_K, " 6.6 bpw non-linear quantization", }, diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h index ddd6b528..aa0c6e9e 100644 --- a/ggml/src/ggml-common.h +++ b/ggml/src/ggml-common.h @@ -442,10 +442,10 @@ typedef struct { static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding"); typedef struct { - uint8_t scales[2]; + uint8_t scales[QK_K/64]; uint8_t qs[QK_K/2]; } block_iq4_xxs; -static_assert(sizeof(block_iq4_xxs) == 2 + QK_K/2, "wrong iq4_xxs block size/padding"); +static_assert(sizeof(block_iq4_xxs) == QK_K/64 + QK_K/2, "wrong iq4_xxs block size/padding"); typedef struct { ggml_half d; diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index 871d4007..6b74ee33 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -2828,6 +2828,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ4_XXS: case GGML_TYPE_IQ2_K: case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ4_K: diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index 28b2415b..07ab11e9 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -615,6 +615,29 @@ static __global__ void dequantize_block_iq4_xs(const void * __restrict__ vx, dst } } +template +static __global__ void dequantize_block_iq4_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) { + + int64_t ii = blockIdx.x; + int64_t row = (QK_K * ii) / n_per_row; + const char * cx = (const char *)vx + row * row_size; + float scale = *(const float *)cx; + const block_iq4_xxs * x = (const block_iq4_xxs *)(cx + sizeof(float)); + const int64_t i = ii - (row*n_per_row)/QK_K; + + const int64_t tid = threadIdx.x; + const int64_t ib = tid/8; // 0...3 + const int64_t il = tid%8; // 0...7 + dst_t * y = yy + ii*QK_K + 64*ib + 4*il; + const uint8_t * q4 = x[i].qs + 32*ib + 4*il; + const float d = scale * ((x[i].scales[ib] & 254) - 127); + const int8_t * values = iq4k_values + ((x[i].scales[ib] & 1) << 4); + for (int j = 0; j < 4; ++j) { + y[j+ 0] = d * values[q4[j] & 0xf]; + y[j+32] = d * values[q4[j] >> 4]; + } +} + template static __global__ void dequantize_block_iq4_k(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; @@ -921,6 +944,14 @@ static void dequantize_row_iq4_xs_cuda(const void * vx, dst_t * y, const int64_t dequantize_block_iq4_xs<<>>(vx, y); } +template +static void dequantize_row_iq4_xxs_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { + const int64_t k = nrows * n_per_row; + const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_XXS, n_per_row); + const int nb = (k + QK_K - 1) / QK_K; + dequantize_block_iq4_xxs<<>>(vx, y, n_per_row, row_size); +} + template static void dequantize_row_iq2_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { const int64_t k = nrows * n_per_row; @@ -1083,6 +1114,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { return dequantize_row_iq4_nl_cuda; case GGML_TYPE_IQ4_XS: return dequantize_row_iq4_xs_cuda; + case GGML_TYPE_IQ4_XXS: + return dequantize_row_iq4_xxs_cuda; case GGML_TYPE_IQ2_K: return dequantize_row_iq2_k_cuda; case GGML_TYPE_IQ3_K: @@ -1152,6 +1185,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { return dequantize_row_iq4_nl_cuda; case GGML_TYPE_IQ4_XS: return dequantize_row_iq4_xs_cuda; + case GGML_TYPE_IQ4_XXS: + return dequantize_row_iq4_xxs_cuda; case GGML_TYPE_IQ2_K: return dequantize_row_iq2_k_cuda; case GGML_TYPE_IQ3_K: diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index b2c32c0c..14c1615c 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -214,6 +214,38 @@ __device__ __forceinline__ float vec_dot_iq4_k_q8_1( return d * (sumi1 * ls1 + sumi2 * ls2); } +#define VDR_IQ4_XXS_Q8_1_MMVQ 4 +#define VDR_IQ4_XXS_Q8_1_MMQ 4 + +// TODO +__device__ __forceinline__ float vec_dot_iq4_xxs_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { + return 0.f; +// +// const block_iq4_k * bq4 = (const block_iq4_k *) vbq + kbx; +// const uint8_t * all_values = (const uint8_t *)iq4k_values; +// +// // iqs is 0...28 +// const int ib32 = iqs/4; +// // Why iqs/4 ? +// const int32_t * q8 = (const int *)bq8_1[ib32].qs; +// const uint16_t * q4 = (const uint16_t *)bq4->qs + 8*ib32; +// const uint16_t extra = bq4->extra >> 2*ib32; +// int v1, v2; +// int sumi1 = 0, sumi2 = 0; +// for (int j = 0; j < 4; ++j) { +// const uint32_t aux32 = q4[2*j+0] | (q4[2*j+1] << 16); +// get_int_from_table_16_shift(aux32, extra, all_values, v1, v2); +// sumi1 = ggml_cuda_dp4a(v1, q8[j+0], sumi1); +// sumi2 = ggml_cuda_dp4a(v2, q8[j+4], sumi2); +// } +// const float d = __half2float(bq4->d) * __low2float(bq8_1[ib32].ds); +// const uint8_t sh = bq4->scales_h[ib32/2] >> 4*(ib32%2); +// const int ls1 = ((bq4->scales_l[ib32] & 0xf) | ((sh << 4) & 0x30)) - 32; +// const int ls2 = ((bq4->scales_l[ib32] >> 4) | ((sh << 2) & 0x30)) - 32; +// return d * (sumi1 * ls1 + sumi2 * ls2); +} + #define VDR_IQ5_K_Q8_1_MMVQ 4 #define VDR_IQ5_K_Q8_1_MMQ 4 @@ -612,6 +644,13 @@ void mul_mat_vec_iq4_k_q8_1_cuda( iqk_mul_mat_vec_q_cuda(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); } +void mul_mat_vec_iq4_xxs_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + iqk_mul_mat_vec_q_cuda(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + void mul_mat_vec_iq5_k_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh index 7fb76ff6..2855c6c1 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cuh +++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh @@ -28,3 +28,7 @@ void mul_mat_vec_iq1_tn_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream); +void mul_mat_vec_iq4_xxs_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream); + diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index 15e8fb5a..b0257aa9 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -459,6 +459,9 @@ void ggml_cuda_op_mul_mat_vec_q( case GGML_TYPE_IQ4_K: mul_mat_vec_iq4_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); break; + case GGML_TYPE_IQ4_XXS: + mul_mat_vec_iq4_xxs_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; case GGML_TYPE_IQ5_K: mul_mat_vec_iq5_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); break; diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c index f5fff22e..b70559a3 100644 --- a/ggml/src/ggml-quants.c +++ b/ggml/src/ggml-quants.c @@ -14947,7 +14947,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte return false; } - if (type != GGML_TYPE_IQ2_TN && type != GGML_TYPE_IQ1_TN && nbytes % ggml_type_size(type) != 0) { + if (type != GGML_TYPE_IQ2_TN && type != GGML_TYPE_IQ1_TN && type != GGML_TYPE_IQ4_XXS && nbytes % ggml_type_size(type) != 0) { fprintf(stderr, "%s: invalid size %zu for type %s (type size = %zu)\n", __func__, nbytes, ggml_type_name(type), ggml_type_size(type)); return false; } @@ -15166,6 +15166,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte case GGML_TYPE_IQ6_K: break; case GGML_TYPE_IQ2_TN: break; case GGML_TYPE_IQ1_TN: break; + case GGML_TYPE_IQ4_XXS: break; case GGML_TYPE_Q4_0_4_4: case GGML_TYPE_Q4_0_4_8: { diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp index 02df8b16..c2c0bead 100644 --- a/ggml/src/iqk/iqk_quantize.cpp +++ b/ggml/src/iqk/iqk_quantize.cpp @@ -2175,7 +2175,7 @@ static void quantize_row_iq4_k_impl_bs128(const int super_block_size, const int const float * quant_weights, const int ntry) { - GGML_ASSERT(super_block_size == 256 && block_size == 128); + //GGML_ASSERT(super_block_size == 256 && block_size == 128); float * dptr = (float *)cy; block_iq4_xxs * y = (block_iq4_xxs *)(dptr + 1); @@ -2286,7 +2286,7 @@ static void quantize_row_iq4_k_impl_bs128(const int super_block_size, const int if (!d) return; float id = d ? 1/d : 0.f; float sumqx = 0, sumq2 = 0; - float mse = 0; + //float mse = 0; for (int ibl = 0; ibl < n_per_row/super_block_size; ++ibl) { const float * xbl = x + ibl*super_block_size; float sigma2 = 0; @@ -2320,8 +2320,8 @@ static void quantize_row_iq4_k_impl_bs128(const int super_block_size, const int float q2 = block_values[i2]*l; sumqx += w1*q1*xb[j] + w2*q2*xb[j+block_size/2]; sumq2 += w1*q1*q1 + w2*q2*q2; - float diff = xb[j] - d*q1; mse += diff*diff; - diff = xb[j+block_size/2] - d*q2; mse += diff*diff; + //float diff = xb[j] - d*q1; mse += diff*diff; + //diff = xb[j+block_size/2] - d*q2; mse += diff*diff; } } } @@ -2340,7 +2340,7 @@ void quantize_row_iq4_xxs(const float * x, void * y, int64_t k) { size_t quantize_iq4_xxs(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) { //printf("============ %s(%d, %d)\n", __func__, int(nrows), int(n_per_row)); - constexpr int kBlockSize = 128; + constexpr int kBlockSize = 64; //128; GGML_ASSERT(n_per_row%QK_K == 0); auto row_size = ggml_row_size(GGML_TYPE_IQ4_XXS, n_per_row); char * qrow = (char *)dst; @@ -2355,7 +2355,7 @@ size_t quantize_iq4_xxs(const float * src, void * dst, int64_t nrows, int64_t n_ } void dequantize_row_iq4_xxs(const block_iq4_xxs * x, float * y, int64_t k) { - constexpr int kBlockSize = 128; + constexpr int kBlockSize = 64; //128; GGML_ASSERT(k%QK_K == 0); const float * dptr = (const float *)x; float d = *dptr; diff --git a/include/llama.h b/include/llama.h index 1fefc182..f564fe34 100644 --- a/include/llama.h +++ b/include/llama.h @@ -178,6 +178,7 @@ extern "C" { LLAMA_FTYPE_MOSTLY_IQ2_TN = 143, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ1_TN = 144, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ4_XXS = 145, // except 1d tensors + LLAMA_FTYPE_MOSTLY_IQ3_KL = 146, // except 1d tensors LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file }; diff --git a/src/llama.cpp b/src/llama.cpp index 40b24cb2..293901c2 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -4498,6 +4498,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_IQ4_XXS: return "IQ4_XXS - 4.06 bpw"; case LLAMA_FTYPE_MOSTLY_IQ2_K: return "IQ2_K - 2.375 bpw"; case LLAMA_FTYPE_MOSTLY_IQ3_K: return "IQ3_K - 3.4325 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_KL: return "IQ3_KL - 4 bpw"; case LLAMA_FTYPE_MOSTLY_IQ4_K: return "IQ4_K - 4.5 bpw"; case LLAMA_FTYPE_MOSTLY_IQ5_K: return "IQ5_K - 5.5 bpw"; case LLAMA_FTYPE_MOSTLY_IQ6_K: return "IQ6_K - 6.6 bpw"; @@ -15699,12 +15700,15 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : qs.model.hparams.n_gqa() >= 2 ? GGML_TYPE_IQ3_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS; } - else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K) && qs.model.hparams.n_gqa() >= 2) { + else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 2) { new_type = GGML_TYPE_IQ4_K; } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_K && qs.model.hparams.n_gqa() >= 2) { new_type = GGML_TYPE_IQ4_K; } + else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KL) { + new_type = qs.model.hparams.n_gqa() >= 2 ? GGML_TYPE_IQ5_K : GGML_TYPE_IQ4_K; + } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) { new_type = qs.model.hparams.n_gqa() >= 2 ? GGML_TYPE_IQ5_K : GGML_TYPE_IQ4_K; } @@ -15781,6 +15785,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) { new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K; } + else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KL) { + new_type = use_more_bits(i_layer, n_layer) ? GGML_TYPE_IQ4_K : GGML_TYPE_IQ3_K; + } else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) { if (arch == LLM_ARCH_FALCON) { new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K : @@ -15821,6 +15828,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K; else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ) new_type = GGML_TYPE_IQ4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_K ) new_type = GGML_TYPE_IQ3_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KL ) new_type = GGML_TYPE_IQ4_K; } } else { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K; @@ -15840,6 +15848,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) { new_type = GGML_TYPE_IQ3_XXS; } + else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KL && use_more_bits(i_layer, n_layer)) { + new_type = GGML_TYPE_IQ4_XS; + } ++qs.i_ffn_gate; } else if (name.find("ffn_up") != std::string::npos) { @@ -15848,6 +15859,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) { new_type = GGML_TYPE_IQ3_XXS; } + else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KL && use_more_bits(i_layer, n_layer)) { + new_type = GGML_TYPE_IQ4_K; + } ++qs.i_ffn_up; } @@ -16014,6 +16028,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s case LLAMA_FTYPE_MOSTLY_IQ4_XXS: default_type = GGML_TYPE_IQ4_XXS; break; case LLAMA_FTYPE_MOSTLY_IQ2_K: default_type = GGML_TYPE_IQ2_K; break; case LLAMA_FTYPE_MOSTLY_IQ3_K: default_type = GGML_TYPE_IQ3_K; break; + case LLAMA_FTYPE_MOSTLY_IQ3_KL: default_type = GGML_TYPE_IQ3_K; break; case LLAMA_FTYPE_MOSTLY_IQ4_K: default_type = GGML_TYPE_IQ4_K; break; case LLAMA_FTYPE_MOSTLY_IQ5_K: default_type = GGML_TYPE_IQ5_K; break; case LLAMA_FTYPE_MOSTLY_IQ6_K: default_type = GGML_TYPE_IQ6_K; break;