mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-24 07:04:11 +00:00
iq2_kl: CUDA dequantize
This commit is contained in:
@@ -76,6 +76,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
||||
{ "IQ2_K_R4", LLAMA_FTYPE_MOSTLY_IQ2_K_R4, "IQ2_K repacked",},
|
||||
{ "IQ2_KS", LLAMA_FTYPE_MOSTLY_IQ2_KS, " 2.1875 bpw non-linear quantization",},
|
||||
{ "IQ2_KT", LLAMA_FTYPE_MOSTLY_IQ2_KT, " 2.125 bpw trellis quantization", },
|
||||
{ "IQ2_KL", LLAMA_FTYPE_MOSTLY_IQ2_KL, " 2.69 bpw non-linear quantization", },
|
||||
{ "IQ3_KS", LLAMA_FTYPE_MOSTLY_IQ3_KS, " 3.19 bpw non-linear quantization", },
|
||||
{ "IQ3_K", LLAMA_FTYPE_MOSTLY_IQ3_K, " 3.44 bpw non-linear quantization", },
|
||||
{ "IQ3_K_R4", LLAMA_FTYPE_MOSTLY_IQ3_K_R4, "IQ3_K repacked", },
|
||||
|
||||
@@ -3499,6 +3499,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
||||
case GGML_TYPE_IQ3_XXS:
|
||||
case GGML_TYPE_IQ4_NL:
|
||||
case GGML_TYPE_IQ4_XS:
|
||||
case GGML_TYPE_IQ2_KL:
|
||||
case GGML_TYPE_IQ3_KS:
|
||||
case GGML_TYPE_IQ4_KS:
|
||||
case GGML_TYPE_IQ4_KSS:
|
||||
|
||||
@@ -599,6 +599,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ3_K> {
|
||||
static constexpr int qi = QI4_XS;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct ggml_cuda_type_traits<GGML_TYPE_IQ2_KL> {
|
||||
static constexpr int qk = QK_K;
|
||||
static constexpr int qr = QR4_XS;
|
||||
static constexpr int qi = QI4_XS;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct ggml_cuda_type_traits<GGML_TYPE_IQ3_KS> {
|
||||
static constexpr int qk = QK_K;
|
||||
|
||||
@@ -1333,6 +1333,48 @@ static __global__ void dequantize_block_iq3_k(const void * __restrict__ vx, dst_
|
||||
}
|
||||
}
|
||||
|
||||
template<typename dst_t>
|
||||
static __global__ void dequantize_block_iq2_kl(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
|
||||
|
||||
int64_t ii = blockIdx.x;
|
||||
int64_t row = (QK_K * ii) / n_per_row;
|
||||
const char * cx = (const char *)vx + row * row_size;
|
||||
float scale = (float)*(const ggml_half *)cx * 1.025f;
|
||||
const block_iq2_kl * x = (const block_iq2_kl *)(cx + sizeof(ggml_half));
|
||||
const int64_t i = ii - (row*n_per_row)/QK_K;
|
||||
|
||||
const int64_t tid = threadIdx.x;
|
||||
const int64_t ib64 = tid/8;
|
||||
const int64_t il = tid%8;
|
||||
dst_t * y = yy + ii*QK_K + 64*ib64 + 4*il;
|
||||
const uint8_t * qs = x[i].qs + 16*ib64 + 2*il;
|
||||
const uint8_t * qh = x[i].qh + 2*il;
|
||||
auto sh = x[i].scales_h >> 4*ib64;
|
||||
const float d1 = scale * (int(((x[i].scales_l[(2*ib64+0)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 4) & 0x30)) - 32);
|
||||
const float d2 = scale * (int(((x[i].scales_l[(2*ib64+1)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 2) & 0x30)) - 32);
|
||||
if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
|
||||
for (int j = 0; j < 2; ++j) {
|
||||
uint8_t h = qh[j] >> 2*ib64;
|
||||
auto val1 = (const int8_t *)(iq2kl_values + ((qs[j] & 0xf) | ((h & 1) << 4)));
|
||||
auto val2 = (const int8_t *)(iq2kl_values + ((qs[j] >> 4) | ((h & 2) << 3)));
|
||||
y[2*j+ 0] = __float2bfloat16(d1 * val1[0]);
|
||||
y[2*j+ 1] = __float2bfloat16(d1 * val1[1]);
|
||||
y[2*j+32] = __float2bfloat16(d2 * val2[0]);
|
||||
y[2*j+33] = __float2bfloat16(d2 * val2[1]);
|
||||
}
|
||||
} else {
|
||||
for (int j = 0; j < 2; ++j) {
|
||||
uint8_t h = qh[j] >> 2*ib64;
|
||||
auto val1 = (const int8_t *)(iq2kl_values + ((qs[j] & 0xf) | ((h & 1) << 4)));
|
||||
auto val2 = (const int8_t *)(iq2kl_values + ((qs[j] >> 4) | ((h & 2) << 3)));
|
||||
y[2*j+ 0] = d1 * val1[0];
|
||||
y[2*j+ 1] = d1 * val1[1];
|
||||
y[2*j+32] = d2 * val2[0];
|
||||
y[2*j+33] = d2 * val2[1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename dst_t>
|
||||
static __global__ void dequantize_block_iq3_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
|
||||
|
||||
@@ -1618,6 +1660,14 @@ static void dequantize_row_iq3_k_cuda(const void * vx, dst_t * y, const int64_t
|
||||
dequantize_block_iq3_k<<<nb, 32, 0, stream>>>(vx, y);
|
||||
}
|
||||
|
||||
template<typename dst_t>
|
||||
static void dequantize_row_iq2_kl_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
|
||||
const int64_t k = nrows * n_per_row;
|
||||
const int64_t row_size = ggml_row_size(GGML_TYPE_IQ2_KL, n_per_row);
|
||||
const int nb = (k + QK_K - 1) / QK_K;
|
||||
dequantize_block_iq2_kl<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
|
||||
}
|
||||
|
||||
template<typename dst_t>
|
||||
static void dequantize_row_iq3_ks_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
|
||||
const int64_t k = nrows * n_per_row;
|
||||
@@ -1772,6 +1822,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) {
|
||||
return dequantize_row_iq2_k_cuda<nv_bfloat16>;
|
||||
case GGML_TYPE_IQ3_K:
|
||||
return dequantize_row_iq3_k_cuda<nv_bfloat16>;
|
||||
case GGML_TYPE_IQ2_KL:
|
||||
return dequantize_row_iq2_kl_cuda<nv_bfloat16>;
|
||||
case GGML_TYPE_IQ3_KS:
|
||||
return dequantize_row_iq3_ks_cuda<nv_bfloat16>;
|
||||
case GGML_TYPE_IQ4_KSS:
|
||||
@@ -1876,6 +1928,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
|
||||
return dequantize_row_iq2_k_cuda;
|
||||
case GGML_TYPE_IQ3_K:
|
||||
return dequantize_row_iq3_k_cuda;
|
||||
case GGML_TYPE_IQ2_KL:
|
||||
return dequantize_row_iq2_kl_cuda;
|
||||
case GGML_TYPE_IQ3_KS:
|
||||
return dequantize_row_iq3_ks_cuda;
|
||||
case GGML_TYPE_IQ4_K:
|
||||
@@ -1973,6 +2027,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
|
||||
return dequantize_row_iq2_k_cuda;
|
||||
case GGML_TYPE_IQ3_K:
|
||||
return dequantize_row_iq3_k_cuda;
|
||||
case GGML_TYPE_IQ2_KL:
|
||||
return dequantize_row_iq2_kl_cuda;
|
||||
case GGML_TYPE_IQ3_KS:
|
||||
return dequantize_row_iq3_ks_cuda;
|
||||
case GGML_TYPE_IQ4_K:
|
||||
|
||||
@@ -1016,6 +1016,75 @@ __device__ __forceinline__ void vec_dot_iq3_k_q8_1(
|
||||
|
||||
}
|
||||
|
||||
// TODO
|
||||
__device__ __forceinline__ void vec_dot_iq2_kl_q8_1(
|
||||
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
|
||||
|
||||
return;
|
||||
|
||||
float d = __half2float(*(const half *)vbq);
|
||||
const block_iq3_ks * bq3 = (const block_iq3_ks *)((const char *)vbq + sizeof(half)) + kbx;
|
||||
|
||||
int iqs = iiqs/4;
|
||||
const int ib128 = iqs/4; // 0 or 1. 0 works on quants 0...127, 1 on quants 128...255
|
||||
// Each thread processes 8 quants in each of the 4 32-blocks
|
||||
const int il8 = iqs%4; // 0...3. 0 works on quants 0...7, 1 on quants 8...15, 2 on 16...23, 3 on 24...31
|
||||
|
||||
const uint16_t * ql = (const uint16_t *)bq3->qs + 16*ib128 + 4*il8;
|
||||
const uint16_t * qh = (const uint16_t *)bq3->qh + 4*il8;
|
||||
|
||||
int32_t aux32;
|
||||
const uint8_t * aux8 = (const uint8_t *)&aux32;
|
||||
|
||||
uint16_t extra = bq3->extra >> 4*ib128;
|
||||
uint16_t extra_v = extra >> 8;
|
||||
|
||||
const uint16_t * values1 = iq3k_table + ((extra_v << 6) & 0x40);
|
||||
const uint16_t * values2 = iq3k_table + ((extra_v << 5) & 0x40);
|
||||
const uint16_t * values3 = iq3k_table + ((extra_v << 4) & 0x40);
|
||||
const uint16_t * values4 = iq3k_table + ((extra_v << 3) & 0x40);
|
||||
|
||||
const int * q8;
|
||||
int sumi[4] = {0, 0, 0, 0};
|
||||
int v;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
uint32_t vl = ql[2*i+0] | (ql[2*i+1] << 16);
|
||||
uint32_t vh = ((qh[2*i+0] | (qh[2*i+1] << 16)) >> 4*ib128) << 2;
|
||||
|
||||
q8 = (const int *)bq8_1[4*ib128+0].qs + 2*il8;
|
||||
aux32 = (vl & 0x03030303) | (vh & 0x04040404);
|
||||
v = int_from_table_2(aux8, values1);
|
||||
sumi[0] = ggml_cuda_dp4a(v, q8[i], sumi[0]);
|
||||
vl >>= 2; vh >>= 1;
|
||||
|
||||
q8 += sizeof(block_q8_1)/4;
|
||||
aux32 = (vl & 0x03030303) | (vh & 0x04040404);
|
||||
v = int_from_table_2(aux8, values2);
|
||||
sumi[1] = ggml_cuda_dp4a(v, q8[i], sumi[1]);
|
||||
vl >>= 2; vh >>= 1;
|
||||
|
||||
q8 += sizeof(block_q8_1)/4;
|
||||
aux32 = (vl & 0x03030303) | (vh & 0x04040404);
|
||||
v = int_from_table_2(aux8, values3);
|
||||
sumi[2] = ggml_cuda_dp4a(v, q8[i], sumi[2]);
|
||||
vl >>= 2; vh >>= 1;
|
||||
|
||||
q8 += sizeof(block_q8_1)/4;
|
||||
aux32 = (vl & 0x03030303) | (vh & 0x04040404);
|
||||
v = int_from_table_2(aux8, values4);
|
||||
sumi[3] = ggml_cuda_dp4a(v, q8[i], sumi[3]);
|
||||
|
||||
}
|
||||
const uint16_t * sl16 = (const uint16_t *)bq3->scales;
|
||||
aux32 = __vsub4(((sl16[0] | (sl16[1] << 16)) >> 4*ib128) & 0x0f0f0f0f, 0x10101010);
|
||||
const int8_t * a8 = (const int8_t *)&aux32;
|
||||
*result += d * (__low2float(bq8_1[4*ib128+0].ds) * (a8[0] + ((extra << 4) & 0x10)) * sumi[0] +
|
||||
__low2float(bq8_1[4*ib128+1].ds) * (a8[1] + ((extra << 3) & 0x10)) * sumi[1] +
|
||||
__low2float(bq8_1[4*ib128+2].ds) * (a8[2] + ((extra << 2) & 0x10)) * sumi[2] +
|
||||
__low2float(bq8_1[4*ib128+3].ds) * (a8[3] + ((extra << 1) & 0x10)) * sumi[3]);
|
||||
|
||||
}
|
||||
|
||||
__device__ __forceinline__ void vec_dot_iq3_ks_q8_1(
|
||||
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
|
||||
|
||||
@@ -1280,6 +1349,14 @@ void mul_mat_vec_iq4_ks_q8_1_cuda(
|
||||
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_KS, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq4_ks_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
}
|
||||
|
||||
void mul_mat_vec_iq2_kl_q8_1_cuda(
|
||||
const void * vx, const void * vy, float * dst, const char * ids_data,
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
|
||||
|
||||
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ2_KL, VDR_IQ3_K_Q8_1_MMVQ, vec_dot_iq2_kl_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
}
|
||||
|
||||
void mul_mat_vec_iq3_ks_q8_1_cuda(
|
||||
const void * vx, const void * vy, float * dst, const char * ids_data,
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
|
||||
@@ -16,6 +16,11 @@ void mul_mat_vec_iq3_k_q8_1_cuda(
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
|
||||
|
||||
void mul_mat_vec_iq2_kl_q8_1_cuda(
|
||||
const void * vx, const void * vy, float * dst, const char * ids_data,
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
|
||||
|
||||
void mul_mat_vec_iq3_ks_q8_1_cuda(
|
||||
const void * vx, const void * vy, float * dst, const char * ids_data,
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
|
||||
@@ -518,6 +518,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm
|
||||
case GGML_TYPE_IQ3_K:
|
||||
mul_mat_vec_iq3_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
break;
|
||||
case GGML_TYPE_IQ2_KL:
|
||||
mul_mat_vec_iq2_kl_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
break;
|
||||
case GGML_TYPE_IQ3_KS:
|
||||
mul_mat_vec_iq3_ks_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
break;
|
||||
@@ -682,6 +685,7 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) {
|
||||
case GGML_TYPE_IQ4_NL:
|
||||
case GGML_TYPE_IQ4_XS:
|
||||
case GGML_TYPE_IQ2_K:
|
||||
case GGML_TYPE_IQ2_KL:
|
||||
case GGML_TYPE_IQ3_KS:
|
||||
case GGML_TYPE_IQ3_K:
|
||||
case GGML_TYPE_IQ4_K:
|
||||
|
||||
@@ -204,6 +204,7 @@ extern "C" {
|
||||
LLAMA_FTYPE_MOSTLY_IQ3_KT = 152, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_IQ4_KT = 153, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_IQ3_KS = 154, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_IQ2_KL = 155, // except 1d tensors
|
||||
//
|
||||
LLAMA_FTYPE_MOSTLY_Q4_0_R8 = 202, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q8_0_R8 = 207, // except 1d tensors
|
||||
|
||||
@@ -4435,6 +4435,7 @@ struct llama_model_loader {
|
||||
case GGML_TYPE_IQ2_K: ftype = LLAMA_FTYPE_MOSTLY_IQ2_K; break;
|
||||
case GGML_TYPE_IQ2_K_R4:ftype = LLAMA_FTYPE_MOSTLY_IQ2_K_R4;break;
|
||||
case GGML_TYPE_IQ3_KS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_KS; break;
|
||||
case GGML_TYPE_IQ2_KL: ftype = LLAMA_FTYPE_MOSTLY_IQ2_KL; break;
|
||||
case GGML_TYPE_IQ3_K: ftype = LLAMA_FTYPE_MOSTLY_IQ3_K; break;
|
||||
case GGML_TYPE_IQ3_K_R4:ftype = LLAMA_FTYPE_MOSTLY_IQ3_K_R4;break;
|
||||
case GGML_TYPE_IQ4_K: ftype = LLAMA_FTYPE_MOSTLY_IQ4_K; break;
|
||||
@@ -5177,6 +5178,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
|
||||
case LLAMA_FTYPE_MOSTLY_IQ2_K: return "IQ2_K - 2.375 bpw";
|
||||
case LLAMA_FTYPE_MOSTLY_IQ2_K_R4: return "IQ2_K_R4 - 2.375 bpw";
|
||||
case LLAMA_FTYPE_MOSTLY_IQ3_KS: return "IQ3_KS - 3.1875 bpw";
|
||||
case LLAMA_FTYPE_MOSTLY_IQ2_KL: return "IQ2_KL - 2.6875 bpw";
|
||||
case LLAMA_FTYPE_MOSTLY_IQ3_K: return "IQ3_K - 3.4325 bpw";
|
||||
case LLAMA_FTYPE_MOSTLY_IQ3_K_R4: return "IQ3_K_R4 - 3.4325 bpw";
|
||||
case LLAMA_FTYPE_MOSTLY_IQ3_KL: return "IQ3_KL - 4 bpw";
|
||||
@@ -19129,7 +19131,7 @@ static ggml_type change_type_if_necessary(ggml_type new_type, int nx, int ny) {
|
||||
new_type == GGML_TYPE_IQ3_XXS_R4 || new_type == GGML_TYPE_IQ2_XXS_R4 || new_type == GGML_TYPE_IQ2_XS_R4 ||
|
||||
new_type == GGML_TYPE_IQ2_S_R4|| new_type == GGML_TYPE_IQ3_S_R4|| new_type == GGML_TYPE_IQ3_KS ||
|
||||
new_type == GGML_TYPE_IQ2_KT || new_type == GGML_TYPE_IQ3_KT || new_type == GGML_TYPE_IQ4_KT ||
|
||||
new_type == GGML_TYPE_IQ5_KS || new_type == GGML_TYPE_IQ5_KS_R4) {
|
||||
new_type == GGML_TYPE_IQ5_KS || new_type == GGML_TYPE_IQ5_KS_R4|| new_type == GGML_TYPE_IQ2_KL) {
|
||||
if (nx % QK_K != 0) {
|
||||
LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
|
||||
convert_incompatible_tensor = true;
|
||||
@@ -19161,6 +19163,7 @@ static ggml_type change_type_if_necessary(ggml_type new_type, int nx, int ny) {
|
||||
case GGML_TYPE_Q3_K_R4:
|
||||
case GGML_TYPE_IQ2_K:
|
||||
case GGML_TYPE_IQ2_K_R4:
|
||||
case GGML_TYPE_IQ2_KL:
|
||||
case GGML_TYPE_IQ3_KS:
|
||||
case GGML_TYPE_IQ3_K:
|
||||
case GGML_TYPE_IQ3_K_R4:
|
||||
@@ -19297,7 +19300,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||
ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ||
|
||||
ftype == LLAMA_FTYPE_MOSTLY_IQ1_M || ftype == LLAMA_FTYPE_MOSTLY_IQ2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K ||
|
||||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_KS ||
|
||||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 ||
|
||||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ2_KL ||
|
||||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M_R4 ||
|
||||
ftype == LLAMA_FTYPE_MOSTLY_IQ1_S_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M_R4 ||
|
||||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_KT || ftype == LLAMA_FTYPE_MOSTLY_IQ3_KT) {
|
||||
@@ -19504,6 +19507,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KS && qs.model.hparams.n_gqa() >= 2) {
|
||||
new_type = GGML_TYPE_IQ4_KS;
|
||||
}
|
||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_KL && qs.model.hparams.n_gqa() >= 2) {
|
||||
new_type = GGML_TYPE_IQ4_KS;
|
||||
}
|
||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 && qs.model.hparams.n_gqa() >= 2) {
|
||||
new_type = GGML_TYPE_IQ4_K_R4;
|
||||
}
|
||||
@@ -19549,6 +19555,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||
else if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_IQ3_S) new_type = GGML_TYPE_Q4_K;
|
||||
else if (new_type == GGML_TYPE_IQ3_K) new_type = GGML_TYPE_IQ4_K;
|
||||
else if (new_type == GGML_TYPE_IQ3_KS) new_type = GGML_TYPE_IQ4_KS;
|
||||
else if (new_type == GGML_TYPE_IQ2_KL) new_type = GGML_TYPE_IQ4_KS;
|
||||
else if (new_type == GGML_TYPE_IQ3_S_R4) new_type = GGML_TYPE_Q4_K_R4;
|
||||
else if (new_type == GGML_TYPE_Q3_K_R4) new_type = GGML_TYPE_Q4_K_R4;
|
||||
else if (new_type == GGML_TYPE_Q4_K || new_type == GGML_TYPE_IQ4_XS) new_type = GGML_TYPE_Q5_K;
|
||||
@@ -19681,6 +19688,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||
new_type = GGML_TYPE_Q5_K; // should the IQ_K quants be applied here as the new type for the IQ_K ftypes ?
|
||||
// also, this condition could be reproduced on attn_q, eventually with Q4_K instead of Q5_K.
|
||||
}
|
||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_KL) {
|
||||
new_type = GGML_TYPE_IQ4_KS;
|
||||
}
|
||||
} else {
|
||||
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K; // This list could be generalized and streamlined
|
||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
|
||||
@@ -19918,6 +19928,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
case LLAMA_FTYPE_MOSTLY_IQ2_K: default_type = GGML_TYPE_IQ2_K; break;
|
||||
case LLAMA_FTYPE_MOSTLY_IQ2_K_R4:default_type = GGML_TYPE_IQ2_K_R4;break;
|
||||
case LLAMA_FTYPE_MOSTLY_IQ3_KS: default_type = GGML_TYPE_IQ3_KS; break;
|
||||
case LLAMA_FTYPE_MOSTLY_IQ2_KL: default_type = GGML_TYPE_IQ2_KL; break;
|
||||
case LLAMA_FTYPE_MOSTLY_IQ3_K: default_type = GGML_TYPE_IQ3_K; break;
|
||||
case LLAMA_FTYPE_MOSTLY_IQ3_K_R4:default_type = GGML_TYPE_IQ3_K_R4;break;
|
||||
case LLAMA_FTYPE_MOSTLY_IQ3_KL: default_type = GGML_TYPE_IQ3_K; break;
|
||||
|
||||
Reference in New Issue
Block a user