WIP + adding iq3_kl quantization mix

This commit is contained in:
Iwan Kawrakow
2024-10-08 13:56:29 +03:00
parent 4c76471979
commit 1dd6c40c15
11 changed files with 110 additions and 10 deletions

View File

@@ -442,10 +442,10 @@ typedef struct {
static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding");
typedef struct {
uint8_t scales[2];
uint8_t scales[QK_K/64];
uint8_t qs[QK_K/2];
} block_iq4_xxs;
static_assert(sizeof(block_iq4_xxs) == 2 + QK_K/2, "wrong iq4_xxs block size/padding");
static_assert(sizeof(block_iq4_xxs) == QK_K/64 + QK_K/2, "wrong iq4_xxs block size/padding");
typedef struct {
ggml_half d;

View File

@@ -2828,6 +2828,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_XXS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:

View File

@@ -615,6 +615,29 @@ static __global__ void dequantize_block_iq4_xs(const void * __restrict__ vx, dst
}
}
template<typename dst_t>
static __global__ void dequantize_block_iq4_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
int64_t ii = blockIdx.x;
int64_t row = (QK_K * ii) / n_per_row;
const char * cx = (const char *)vx + row * row_size;
float scale = *(const float *)cx;
const block_iq4_xxs * x = (const block_iq4_xxs *)(cx + sizeof(float));
const int64_t i = ii - (row*n_per_row)/QK_K;
const int64_t tid = threadIdx.x;
const int64_t ib = tid/8; // 0...3
const int64_t il = tid%8; // 0...7
dst_t * y = yy + ii*QK_K + 64*ib + 4*il;
const uint8_t * q4 = x[i].qs + 32*ib + 4*il;
const float d = scale * ((x[i].scales[ib] & 254) - 127);
const int8_t * values = iq4k_values + ((x[i].scales[ib] & 1) << 4);
for (int j = 0; j < 4; ++j) {
y[j+ 0] = d * values[q4[j] & 0xf];
y[j+32] = d * values[q4[j] >> 4];
}
}
template<typename dst_t>
static __global__ void dequantize_block_iq4_k(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int64_t i = blockIdx.x;
@@ -921,6 +944,14 @@ static void dequantize_row_iq4_xs_cuda(const void * vx, dst_t * y, const int64_t
dequantize_block_iq4_xs<<<nb, 32, 0, stream>>>(vx, y);
}
template<typename dst_t>
static void dequantize_row_iq4_xxs_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_XXS, n_per_row);
const int nb = (k + QK_K - 1) / QK_K;
dequantize_block_iq4_xxs<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
}
template<typename dst_t>
static void dequantize_row_iq2_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
@@ -1083,6 +1114,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
return dequantize_row_iq4_nl_cuda;
case GGML_TYPE_IQ4_XS:
return dequantize_row_iq4_xs_cuda;
case GGML_TYPE_IQ4_XXS:
return dequantize_row_iq4_xxs_cuda;
case GGML_TYPE_IQ2_K:
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_K:
@@ -1152,6 +1185,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
return dequantize_row_iq4_nl_cuda;
case GGML_TYPE_IQ4_XS:
return dequantize_row_iq4_xs_cuda;
case GGML_TYPE_IQ4_XXS:
return dequantize_row_iq4_xxs_cuda;
case GGML_TYPE_IQ2_K:
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_K:

View File

@@ -214,6 +214,38 @@ __device__ __forceinline__ float vec_dot_iq4_k_q8_1(
return d * (sumi1 * ls1 + sumi2 * ls2);
}
#define VDR_IQ4_XXS_Q8_1_MMVQ 4
#define VDR_IQ4_XXS_Q8_1_MMQ 4
// TODO
__device__ __forceinline__ float vec_dot_iq4_xxs_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
return 0.f;
//
// const block_iq4_k * bq4 = (const block_iq4_k *) vbq + kbx;
// const uint8_t * all_values = (const uint8_t *)iq4k_values;
//
// // iqs is 0...28
// const int ib32 = iqs/4;
// // Why iqs/4 ?
// const int32_t * q8 = (const int *)bq8_1[ib32].qs;
// const uint16_t * q4 = (const uint16_t *)bq4->qs + 8*ib32;
// const uint16_t extra = bq4->extra >> 2*ib32;
// int v1, v2;
// int sumi1 = 0, sumi2 = 0;
// for (int j = 0; j < 4; ++j) {
// const uint32_t aux32 = q4[2*j+0] | (q4[2*j+1] << 16);
// get_int_from_table_16_shift(aux32, extra, all_values, v1, v2);
// sumi1 = ggml_cuda_dp4a(v1, q8[j+0], sumi1);
// sumi2 = ggml_cuda_dp4a(v2, q8[j+4], sumi2);
// }
// const float d = __half2float(bq4->d) * __low2float(bq8_1[ib32].ds);
// const uint8_t sh = bq4->scales_h[ib32/2] >> 4*(ib32%2);
// const int ls1 = ((bq4->scales_l[ib32] & 0xf) | ((sh << 4) & 0x30)) - 32;
// const int ls2 = ((bq4->scales_l[ib32] >> 4) | ((sh << 2) & 0x30)) - 32;
// return d * (sumi1 * ls1 + sumi2 * ls2);
}
#define VDR_IQ5_K_Q8_1_MMVQ 4
#define VDR_IQ5_K_Q8_1_MMQ 4
@@ -612,6 +644,13 @@ void mul_mat_vec_iq4_k_q8_1_cuda(
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_K, VDR_IQ4_K_Q8_1_MMVQ, vec_dot_iq4_k_q8_1>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
}
void mul_mat_vec_iq4_xxs_q8_1_cuda(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_K, VDR_IQ4_K_Q8_1_MMVQ, vec_dot_iq4_xxs_q8_1>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
}
void mul_mat_vec_iq5_k_q8_1_cuda(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {

View File

@@ -28,3 +28,7 @@ void mul_mat_vec_iq1_tn_q8_1_cuda(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
void mul_mat_vec_iq4_xxs_q8_1_cuda(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);

View File

@@ -459,6 +459,9 @@ void ggml_cuda_op_mul_mat_vec_q(
case GGML_TYPE_IQ4_K:
mul_mat_vec_iq4_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;
case GGML_TYPE_IQ4_XXS:
mul_mat_vec_iq4_xxs_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;
case GGML_TYPE_IQ5_K:
mul_mat_vec_iq5_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;

View File

@@ -14947,7 +14947,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
return false;
}
if (type != GGML_TYPE_IQ2_TN && type != GGML_TYPE_IQ1_TN && nbytes % ggml_type_size(type) != 0) {
if (type != GGML_TYPE_IQ2_TN && type != GGML_TYPE_IQ1_TN && type != GGML_TYPE_IQ4_XXS && nbytes % ggml_type_size(type) != 0) {
fprintf(stderr, "%s: invalid size %zu for type %s (type size = %zu)\n", __func__, nbytes, ggml_type_name(type), ggml_type_size(type));
return false;
}
@@ -15166,6 +15166,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
case GGML_TYPE_IQ6_K: break;
case GGML_TYPE_IQ2_TN: break;
case GGML_TYPE_IQ1_TN: break;
case GGML_TYPE_IQ4_XXS: break;
case GGML_TYPE_Q4_0_4_4:
case GGML_TYPE_Q4_0_4_8:
{

View File

@@ -2175,7 +2175,7 @@ static void quantize_row_iq4_k_impl_bs128(const int super_block_size, const int
const float * quant_weights,
const int ntry) {
GGML_ASSERT(super_block_size == 256 && block_size == 128);
//GGML_ASSERT(super_block_size == 256 && block_size == 128);
float * dptr = (float *)cy;
block_iq4_xxs * y = (block_iq4_xxs *)(dptr + 1);
@@ -2286,7 +2286,7 @@ static void quantize_row_iq4_k_impl_bs128(const int super_block_size, const int
if (!d) return;
float id = d ? 1/d : 0.f;
float sumqx = 0, sumq2 = 0;
float mse = 0;
//float mse = 0;
for (int ibl = 0; ibl < n_per_row/super_block_size; ++ibl) {
const float * xbl = x + ibl*super_block_size;
float sigma2 = 0;
@@ -2320,8 +2320,8 @@ static void quantize_row_iq4_k_impl_bs128(const int super_block_size, const int
float q2 = block_values[i2]*l;
sumqx += w1*q1*xb[j] + w2*q2*xb[j+block_size/2];
sumq2 += w1*q1*q1 + w2*q2*q2;
float diff = xb[j] - d*q1; mse += diff*diff;
diff = xb[j+block_size/2] - d*q2; mse += diff*diff;
//float diff = xb[j] - d*q1; mse += diff*diff;
//diff = xb[j+block_size/2] - d*q2; mse += diff*diff;
}
}
}
@@ -2340,7 +2340,7 @@ void quantize_row_iq4_xxs(const float * x, void * y, int64_t k) {
size_t quantize_iq4_xxs(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
//printf("============ %s(%d, %d)\n", __func__, int(nrows), int(n_per_row));
constexpr int kBlockSize = 128;
constexpr int kBlockSize = 64; //128;
GGML_ASSERT(n_per_row%QK_K == 0);
auto row_size = ggml_row_size(GGML_TYPE_IQ4_XXS, n_per_row);
char * qrow = (char *)dst;
@@ -2355,7 +2355,7 @@ size_t quantize_iq4_xxs(const float * src, void * dst, int64_t nrows, int64_t n_
}
void dequantize_row_iq4_xxs(const block_iq4_xxs * x, float * y, int64_t k) {
constexpr int kBlockSize = 128;
constexpr int kBlockSize = 64; //128;
GGML_ASSERT(k%QK_K == 0);
const float * dptr = (const float *)x;
float d = *dptr;