iq3_ks: CUDA mmvq

This commit is contained in:
Iwan Kawrakow
2025-06-30 18:25:23 +03:00
parent 73c10b8243
commit a2a134673d
2 changed files with 72 additions and 24 deletions

View File

@@ -405,29 +405,6 @@ __device__ __forceinline__ void vec_dot_iq4_ks_q8_1(
*result += dl * __low2float(bq8_1[ib32].ds) * sumi;
}
// TODO
__device__ __forceinline__ void vec_dot_iq3_ks_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
float scale = *(const float *)vbq;
const block_iq4_ks * bq4 = (const block_iq4_ks *)((const char *)vbq + sizeof(float)) + kbx;
const uint8_t * all_values = (const uint8_t *)iq4k_values;
// iqs is 0...28
const int ib32 = iqs/4; // Why iqs/4 ?
const int32_t * q8 = (const int *)bq8_1[ib32].qs;
const uint32_t * q4 = (const uint32_t *)bq4->qs + 4*ib32;
const float dl = scale * ((bq4->scales[ib32] & 254) - 127);
int v1, v2;
int sumi = 0;
for (int j = 0; j < 4; ++j) {
get_int_from_table_16_shift(q4[j], bq4->scales[ib32] & 1, all_values, v1, v2);
sumi = ggml_cuda_dp4a(v1, q8[j+0], sumi);
sumi = ggml_cuda_dp4a(v2, q8[j+4], sumi);
}
*result += dl * __low2float(bq8_1[ib32].ds) * sumi;
}
__device__ __forceinline__ void vec_dot_iq4_kt_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
@@ -1127,6 +1104,73 @@ __device__ __forceinline__ void vec_dot_iq3_k_q8_1(
}
__device__ __forceinline__ void vec_dot_iq3_ks_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
float d = __half2float(*(const half *)vbq);
const block_iq3_ks * bq3 = (const block_iq3_ks *)((const char *)vbq + sizeof(half)) + kbx;
int iqs = iiqs/4;
const int ib128 = iqs/4; // 0 or 1. 0 works on quants 0...127, 1 on quants 128...255
// Each thread processes 8 quants in each of the 4 32-blocks
const int il8 = iqs%4; // 0...3. 0 works on quants 0...7, 1 on quants 8...15, 2 on 16...23, 3 on 24...31
const int shift = 4*(il8/2);
const uint16_t * ql = (const uint16_t *)bq3->qs + 16*ib128 + 4*il8;
const uint16_t * qh = (const uint16_t *)bq3->qh + 4*il8;
int32_t aux32;
const uint8_t * aux8 = (const uint8_t *)&aux32;
uint16_t extra = bq3->extra >> 4*ib128;
uint16_t extra_v = extra >> 8;
const uint16_t * values1 = iq3k_table + ((extra_v << 6) & 0x40);
const uint16_t * values2 = iq3k_table + ((extra_v << 5) & 0x40);
const uint16_t * values3 = iq3k_table + ((extra_v << 4) & 0x40);
const uint16_t * values4 = iq3k_table + ((extra_v << 3) & 0x40);
const int * q8;
int sumi[4] = {0, 0, 0, 0};
int v;
for (int i = 0; i < 2; ++i) {
uint32_t vl = ql[2*i+0] | (ql[2*i+1] << 16);
uint32_t vh = ((qh[2*i+0] | (qh[2*i+1] << 16)) >> 4*ib128) << 2;
q8 = (const int *)bq8_1[4*ib128+0].qs + 2*il8;
aux32 = (vl & 0x03030303) | (vh & 0x04040404);
v = int_from_table_2(aux8, values1);
sumi[0] = ggml_cuda_dp4a(v, q8[i], sumi[0]);
vl >>= 2; vh >>= 1;
q8 += sizeof(block_q8_1)/4;
aux32 = (vl & 0x03030303) | (vh & 0x04040404);
v = int_from_table_2(aux8, values2);
sumi[1] = ggml_cuda_dp4a(v, q8[i], sumi[1]);
vl >>= 2; vh >>= 1;
q8 += sizeof(block_q8_1)/4;
aux32 = (vl & 0x03030303) | (vh & 0x04040404);
v = int_from_table_2(aux8, values3);
sumi[2] = ggml_cuda_dp4a(v, q8[i], sumi[2]);
vl >>= 2; vh >>= 1;
q8 += sizeof(block_q8_1)/4;
aux32 = (vl & 0x03030303) | (vh & 0x04040404);
v = int_from_table_2(aux8, values4);
sumi[3] = ggml_cuda_dp4a(v, q8[i], sumi[3]);
}
const uint16_t * sl16 = (const uint16_t *)bq3->scales;
aux32 = __vsub4(((sl16[0] | (sl16[1] << 16)) >> 4*ib128) & 0x0f0f0f0f, 0x10101010);
const int8_t * a8 = (const int8_t *)&aux32;
*result += d * (__low2float(bq8_1[4*ib128+0].ds) * (a8[0] + ((extra << 4) & 0x10)) * sumi[0] +
__low2float(bq8_1[4*ib128+1].ds) * (a8[1] + ((extra << 3) & 0x10)) * sumi[1] +
__low2float(bq8_1[4*ib128+2].ds) * (a8[2] + ((extra << 2) & 0x10)) * sumi[2] +
__low2float(bq8_1[4*ib128+3].ds) * (a8[3] + ((extra << 1) & 0x10)) * sumi[3]);
}
__device__ __forceinline__ void vec_dot_iq1_bn_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
@@ -1330,7 +1374,7 @@ void mul_mat_vec_iq3_ks_q8_1_cuda(
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ3_KS, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq3_ks_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ3_KS, VDR_IQ3_K_Q8_1_MMVQ, vec_dot_iq3_ks_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
}
void mul_mat_vec_iq4_kt_q8_1_cuda(

View File

@@ -518,6 +518,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm
case GGML_TYPE_IQ3_K:
mul_mat_vec_iq3_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
break;
case GGML_TYPE_IQ3_KS:
mul_mat_vec_iq3_ks_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
break;
case GGML_TYPE_IQ4_K:
mul_mat_vec_iq4_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
break;
@@ -679,6 +682,7 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) {
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ4_KS: