mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-24 07:04:11 +00:00
New iq4_kt: CUDA MMVQ
This commit is contained in:
@@ -648,6 +648,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ3_S> {
|
||||
static constexpr int qi = QI3_S;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct ggml_cuda_type_traits<GGML_TYPE_IQ4_KT> {
|
||||
static constexpr int qk = QK_K;
|
||||
static constexpr int qr = QR4_XS;
|
||||
static constexpr int qi = QI4_XS;
|
||||
};
|
||||
|
||||
//////////////////////
|
||||
|
||||
struct ggml_cuda_device_info {
|
||||
|
||||
@@ -433,6 +433,44 @@ __device__ __forceinline__ void vec_dot_iq4_ks_q8_1(
|
||||
*result += dl * __low2float(bq8_1[ib32].ds) * sumi;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ void vec_dot_iq4_kt_q8_1(
|
||||
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
|
||||
|
||||
constexpr uint32_t ka = 89226354;
|
||||
constexpr uint32_t kb = 64248484;
|
||||
constexpr uint32_t km = 0x3f3f3f3f;
|
||||
|
||||
float scale = *(const float *)vbq;
|
||||
const block_iq4_kt * bq4 = (const block_iq4_kt *)((const char *)vbq + 2*sizeof(float)) + kbx;
|
||||
|
||||
// iqs is 0...28
|
||||
const int ib32 = iqs/4; // Why iqs/4 ?
|
||||
const int32_t * q8 = (const int *)bq8_1[ib32].qs;
|
||||
//const int8_t * q8 = bq8_1[ib32].qs;
|
||||
const int ls = (bq4->qs[ib32] & 0xff) >> 1;
|
||||
const float dl = scale * (ls - 64);
|
||||
const uint32_t idx0 = ((bq4->qs[ib32] & 1) << 15) + 4096;
|
||||
auto ql = (const uint8_t *)(bq4->qs + 8);
|
||||
auto qh = ql + 64;
|
||||
ql += 8*ib32;
|
||||
qh += 8*(ib32%4);
|
||||
const int shift1 = 8 - 4*(ib32/4);
|
||||
int sumi = 0;
|
||||
for (int j = 0; j < 8; ++j) {
|
||||
const uint32_t sh = bq4->qs[ib32] >> (8 + 3*j);
|
||||
uint32_t val = ql[j] + ((qh[j] << shift1) & 0xf00) + ((sh & 7) << 12) + idx0;
|
||||
int v4 = 0;
|
||||
for (int k = 0; k < 4; ++k) {
|
||||
val = ka*val + kb;
|
||||
//int s = val & km;
|
||||
//sumi += q8[4*j+k] * ggml_cuda_dp4a(s, 0x01010101, -126);
|
||||
v4 |= (ggml_cuda_dp4a(val & km, 0x01010101, -126) & 0xff) << 8*k;
|
||||
}
|
||||
sumi = ggml_cuda_dp4a(v4, q8[j], sumi);
|
||||
}
|
||||
*result += dl * __low2float(bq8_1[ib32].ds) * sumi;
|
||||
}
|
||||
|
||||
#define VDR_IQ4_KSS_Q8_1_MMVQ 4
|
||||
#define VDR_IQ4_KSS_Q8_1_MMQ 4
|
||||
|
||||
@@ -1217,6 +1255,14 @@ void mul_mat_vec_iq4_ks_q8_1_cuda(
|
||||
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_KS, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq4_ks_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
}
|
||||
|
||||
void mul_mat_vec_iq4_kt_q8_1_cuda(
|
||||
const void * vx, const void * vy, float * dst, const char * ids_data,
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
|
||||
|
||||
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_KT, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq4_kt_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
}
|
||||
|
||||
void mul_mat_vec_iq4_kss_q8_1_cuda(
|
||||
const void * vx, const void * vy, float * dst, const char * ids_data,
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
|
||||
@@ -100,3 +100,8 @@ void mul_mat_vec_iq1_m_r4_q8_1_cuda(
|
||||
const void * vx, const void * vy, float * dst, const char * ids_data,
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
|
||||
|
||||
void mul_mat_vec_iq4_kt_q8_1_cuda(
|
||||
const void * vx, const void * vy, float * dst, const char * ids_data,
|
||||
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
|
||||
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
|
||||
|
||||
@@ -526,6 +526,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm
|
||||
break;
|
||||
case GGML_TYPE_IQ4_KSS:
|
||||
mul_mat_vec_iq4_kss_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
case GGML_TYPE_IQ4_KT:
|
||||
mul_mat_vec_iq4_kt_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
break;
|
||||
break;
|
||||
case GGML_TYPE_IQ2_KS:
|
||||
mul_mat_vec_iq2_ks_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
|
||||
@@ -687,6 +690,7 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) {
|
||||
case GGML_TYPE_IQ5_KS_R4:
|
||||
case GGML_TYPE_IQ1_S_R4:
|
||||
case GGML_TYPE_IQ1_M_R4:
|
||||
case GGML_TYPE_IQ4_KT:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
||||
Reference in New Issue
Block a user