mmq_id: adding iq4_k, iq4_k_r4

This commit is contained in:
Iwan Kawrakow
2025-08-25 15:48:29 +03:00
parent b0afe8dc20
commit 20ff716428
3 changed files with 178 additions and 0 deletions

View File

@@ -228,6 +228,12 @@ static void ggml_cuda_mul_mat_q_switch_type_id(ggml_backend_cuda_context & ctx,
case GGML_TYPE_IQ4_KS_R4:
mul_mat_q_case_id<GGML_TYPE_IQ4_KS_R4>(ctx, args, stream);
break;
case GGML_TYPE_IQ4_K:
mul_mat_q_case_id<GGML_TYPE_IQ4_K>(ctx, args, stream);
break;
case GGML_TYPE_IQ4_K_R4:
mul_mat_q_case_id<GGML_TYPE_IQ4_K_R4>(ctx, args, stream);
break;
default:
GGML_ABORT("fatal error");
break;
@@ -463,6 +469,8 @@ bool ggml_cuda_can_use_mmq_id(enum ggml_type type, int cc, int64_t ne11) {
case GGML_TYPE_IQ4_KSS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ4_K_R4:
mmq_supported = true;
break;
default:

View File

@@ -90,6 +90,8 @@ static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout(const ggml_type type_x) {
case GGML_TYPE_IQ4_KSS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ4_K_R4:
return MMQ_Q8_1_DS_LAYOUT_D4;
default:
GGML_ABORT("fatal error");
@@ -390,6 +392,8 @@ static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml
case GGML_TYPE_IQ4_KSS : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_KS : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_KS_R4: return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_K : return MMQ_DP4A_TXS_Q8_0_16;
case GGML_TYPE_IQ4_K_R4: return MMQ_DP4A_TXS_Q8_0_16;
default: return tile_x_sizes{0, 0, 0};
}
}
@@ -438,6 +442,8 @@ static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) {
case GGML_TYPE_IQ4_KSS : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_KS : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_KS_R4: return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_K : return MMQ_MMA_TILE_X_K_Q3_K;
case GGML_TYPE_IQ4_K_R4: return MMQ_MMA_TILE_X_K_Q3_K;
default: return 0;
}
}
@@ -3981,5 +3987,7 @@ extern DECL_MMQ_CASE(GGML_TYPE_IQ3_K_R4);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_KSS);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_KS);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_KS_R4);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_K);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_K_R4);
// -------------------------------------------------------------------------------------------------------------------------

View File

@@ -0,0 +1,162 @@
// This file has been autogenerated by generate_cu_files.py, do not edit manually.
#include "../mmq_id_common.cuh"
template <int mmq_y, bool need_check> static __device__ __forceinline__ void load_tiles_iq4_k(
const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) {
constexpr int nwarps = mmq_get_nwarps_device();
#ifdef INT8_MMA_AVAILABLE
int * x_qs = (int *) x_tile;
float * x_df = (float *) (x_qs + WARP_SIZE*2);
#else
constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16;
int * x_qs = (int *) x_tile;
float * x_df = (float *) (x_qs + txs.qs);
#endif // INT8_MMA_AVAILABLE
constexpr int qstep = 8;
const int kqsx = threadIdx.x % qstep;
uint32_t aux32[2];
const uint8_t * aux8 = (const uint8_t *)aux32;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/qstep) {
int i = i0 + threadIdx.y*(WARP_SIZE/qstep) + threadIdx.x/qstep;
if (need_check) {
i = min(i, i_max);
}
const block_iq4_k * bxi = (const block_iq4_k *)(x + i*stride) + kbx0;
const uint16_t extra = bxi->extra >> 2*kqsx;
auto values_l = iq4k_table + ((extra & 1) << 8);
auto values_h = iq4k_table + ((extra & 2) << 7);
#pragma unroll
for (int l = 0; l < qstep/2; ++l) {
const int q4 = get_int_b4(bxi->qs, (qstep/2)*kqsx + l);
aux32[0] = (q4 >> 0) & 0x0f0f0f0f;
aux32[1] = (q4 >> 4) & 0x0f0f0f0f;
int val0 = int_from_table_x(aux8+0, values_l);
int val1 = int_from_table_x(aux8+4, values_h);
#ifdef INT8_MMA_AVAILABLE
x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + l + 0] = val0;
x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + l + 4] = val1;
#else
x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + l + 0] = val0;
x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + l + 4] = val1;
#endif // INT8_MMA_AVAILABLE
}
const uint8_t sh = bxi->scales_h[kqsx/2] >> 4*(kqsx%2);
const int ls1 = ((bxi->scales_l[kqsx] & 0xf) | ((sh << 4) & 0x30)) - 32;
const int ls2 = ((bxi->scales_l[kqsx] >> 4) | ((sh << 2) & 0x30)) - 32;
const float d = bxi->d;
#ifdef INT8_MMA_AVAILABLE
x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = d * ls1;
x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = d * ls2;
#else
x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = d * ls1;
x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = d * ls2;
#endif // INT8_MMA_AVAILABLE
}
}
template <int mmq_y, bool need_check> static __device__ __forceinline__ void load_tiles_iq4_k_r4(
const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) {
constexpr int nwarps = mmq_get_nwarps_device();
#ifdef INT8_MMA_AVAILABLE
int * x_qs = (int *) x_tile;
float * x_df = (float *) (x_qs + WARP_SIZE*2);
#else
constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16;
int * x_qs = (int *) x_tile;
float * x_df = (float *) (x_qs + txs.qs);
#endif // INT8_MMA_AVAILABLE
const int kqsx = threadIdx.x/4; // 0...7 -> block of 32
uint32_t aux32[4];
const uint8_t * aux8 = (const uint8_t *)aux32;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += 4*nwarps) {
int i = i0 + 4*threadIdx.y + threadIdx.x%4;
if (need_check) {
i = min(i, i_max);
}
int i4 = i/4;
int ir = i%4;
const block_iq4_k_r4 * bxi = (const block_iq4_k_r4 *)(x + 4*i4*stride) + kbx0;
const float d = __half2float(bxi->d[ir]);
#pragma unroll
for (int l = 0; l < 2; ++l) {
auto values_l = iq4k_table + ((bxi->extra[ir+4*l] << (8 - kqsx)) & 0x100);
const int ql1 = get_int_b4(bxi->qs, 16*kqsx + ir + 4*l + 0);
const int ql2 = get_int_b4(bxi->qs, 16*kqsx + ir + 4*l + 8);
aux32[0] = (ql1 >> 0) & 0x0f0f0f0f;
aux32[1] = (ql1 >> 4) & 0x0f0f0f0f;
aux32[2] = (ql2 >> 0) & 0x0f0f0f0f;
aux32[3] = (ql2 >> 4) & 0x0f0f0f0f;
#ifdef INT8_MMA_AVAILABLE
x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 0] = int_from_table_x(aux8+ 0, values_l);
x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 2] = int_from_table_x(aux8+ 4, values_l);
x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 1] = int_from_table_x(aux8+ 8, values_l);
x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 3] = int_from_table_x(aux8+12, values_l);
#else
x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 0] = int_from_table_x(aux8+ 0, values_l);
x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 2] = int_from_table_x(aux8+ 4, values_l);
x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 1] = int_from_table_x(aux8+ 8, values_l);
x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 3] = int_from_table_x(aux8+12, values_l);
#endif // INT8_MMA_AVAILABLE
}
int is = 8*kqsx + ir;
float dl1 = d * ((((bxi->scales_l[is%32] >> 4*(is/32)) & 0xf) | (((bxi->scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32);
is += 4;
float dl2 = d * ((((bxi->scales_l[is%32] >> 4*(is/32)) & 0xf) | (((bxi->scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32);
#ifdef INT8_MMA_AVAILABLE
x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = dl1;
x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = dl2;
#else
x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = dl1;
x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = dl2;
#endif // INT8_MMA_AVAILABLE
}
}
template <int mmq_x, int mmq_y, bool need_check>
struct mmq_type_traits_id<mmq_x, mmq_y, need_check, GGML_TYPE_IQ4_K> {
static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_k<mmq_y, need_check>;
static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma<mmq_x, mmq_y>;
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a<mmq_x, mmq_y>;
};
template <int mmq_x, int mmq_y, bool need_check>
struct mmq_type_traits_id<mmq_x, mmq_y, need_check, GGML_TYPE_IQ4_K_R4> {
static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_k_r4<mmq_y, need_check>;
static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma<mmq_x, mmq_y>;
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a<mmq_x, mmq_y>;
};
DECL_MMQ_CASE(GGML_TYPE_IQ4_K);
DECL_MMQ_CASE(GGML_TYPE_IQ4_K_R4);