mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-03-11 14:30:02 +00:00
New iq4_kt: CUDA MMQ
This commit is contained in:
@@ -100,6 +100,9 @@ void ggml_cuda_op_mul_mat_q(
|
||||
case GGML_TYPE_IQ4_KS_R4:
|
||||
mul_mat_q_case<GGML_TYPE_IQ4_KS_R4>(ctx, args, stream);
|
||||
break;
|
||||
case GGML_TYPE_IQ4_KT:
|
||||
mul_mat_q_case<GGML_TYPE_IQ4_KT>(ctx, args, stream);
|
||||
break;
|
||||
case GGML_TYPE_IQ5_KS:
|
||||
mul_mat_q_case<GGML_TYPE_IQ5_KS>(ctx, args, stream);
|
||||
break;
|
||||
@@ -172,6 +175,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
|
||||
case GGML_TYPE_IQ4_K:
|
||||
case GGML_TYPE_IQ5_K:
|
||||
case GGML_TYPE_IQ6_K:
|
||||
case GGML_TYPE_IQ4_KT:
|
||||
mmq_supported = true;
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -93,6 +93,7 @@ static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout(const ggml_type type_x) {
|
||||
case GGML_TYPE_IQ5_KS:
|
||||
case GGML_TYPE_IQ5_KS_R4:
|
||||
case GGML_TYPE_IQ6_K:
|
||||
case GGML_TYPE_IQ4_KT:
|
||||
return MMQ_Q8_1_DS_LAYOUT_D4;
|
||||
default:
|
||||
GGML_ABORT("fatal error");
|
||||
@@ -202,6 +203,7 @@ static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml
|
||||
case GGML_TYPE_IQ4_K : return MMQ_DP4A_TXS_Q8_0_16;
|
||||
case GGML_TYPE_IQ5_K : return MMQ_DP4A_TXS_Q8_0_16;
|
||||
case GGML_TYPE_IQ6_K : return MMQ_DP4A_TXS_Q8_0_16;
|
||||
case GGML_TYPE_IQ4_KT : return MMQ_DP4A_TXS_Q8_0;
|
||||
default : return tile_x_sizes{0, 0, 0};
|
||||
}
|
||||
}
|
||||
@@ -250,6 +252,7 @@ static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) {
|
||||
case GGML_TYPE_IQ4_K : return MMQ_MMA_TILE_X_K_Q3_K;
|
||||
case GGML_TYPE_IQ5_K : return MMQ_MMA_TILE_X_K_Q3_K;
|
||||
case GGML_TYPE_IQ6_K : return MMQ_MMA_TILE_X_K_Q3_K;
|
||||
case GGML_TYPE_IQ4_KT : return MMQ_MMA_TILE_X_K_Q8_0;
|
||||
default : return 0;
|
||||
}
|
||||
}
|
||||
@@ -2790,6 +2793,79 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
||||
|
||||
}
|
||||
|
||||
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq4_kt(
|
||||
const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
|
||||
|
||||
constexpr uint32_t ka = 89226354;
|
||||
constexpr uint32_t kb = 64248484;
|
||||
constexpr uint32_t km = 0x3f3f3f3f;
|
||||
|
||||
#ifdef INT8_MMA_AVAILABLE
|
||||
int * x_qs = (int *) x_tile;
|
||||
float * x_df = (float *) (x_qs + WARP_SIZE*2);
|
||||
#else
|
||||
constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_XS, mmq_y);
|
||||
int * x_qs = (int *) x_tile;
|
||||
float * x_df = (float *) (x_qs + txs.qs);
|
||||
#endif // INT8_MMA_AVAILABLE
|
||||
|
||||
const int kqsx = threadIdx.x;
|
||||
|
||||
#pragma unroll
|
||||
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
|
||||
int i = i0 + threadIdx.y;
|
||||
|
||||
if (need_check) {
|
||||
i = min(i, i_max);
|
||||
}
|
||||
|
||||
const block_iq4_kt * bxi = (const block_iq4_kt *)(x + i*stride + 2*sizeof(float)) + kbx0;
|
||||
|
||||
int ib32 = kqsx/4;
|
||||
int j = kqsx%4;
|
||||
const auto shb = bxi->qs;
|
||||
const auto ql = (const uint8_t *)(shb + 8);
|
||||
const auto qh = ql + 64;
|
||||
const uint32_t sh = shb[ib32] >> (8 + 6*j);
|
||||
uint32_t offset = 4096 + ((shb[ib32] & 1) << 15);
|
||||
uint32_t val1 = offset + ql[8*ib32+2*j+0] + ((qh[8*(ib32%4)+2*j+0] << (8 - 4*(ib32/4))) & 0xf00) + ((sh & 7) << 12);
|
||||
uint32_t val2 = offset + ql[8*ib32+2*j+1] + ((qh[8*(ib32%4)+2*j+1] << (8 - 4*(ib32/4))) & 0xf00) + ((sh & 56) << 9);
|
||||
int2 v = {0, 0};
|
||||
for (int k = 0; k < 4; ++k) {
|
||||
val1 = ka*val1 + kb;
|
||||
val2 = ka*val2 + kb;
|
||||
v.x |= (ggml_cuda_dp4a(val1 & km, 0x01010101, -126) & 0xff) << 8*k;
|
||||
v.y |= (ggml_cuda_dp4a(val2 & km, 0x01010101, -126) & 0xff) << 8*k;
|
||||
}
|
||||
#ifdef INT8_MMA_AVAILABLE
|
||||
x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*ib32 + 2*j + 0] = v.x;
|
||||
x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*ib32 + 2*j + 1] = v.y;
|
||||
#else
|
||||
x_qs[i*(2*WARP_SIZE + 1) + 8*ib32 + 2*j + 0] = v.x;
|
||||
x_qs[i*(2*WARP_SIZE + 1) + 8*ib32 + 2*j + 1] = v.y;
|
||||
#endif // INT8_MMA_AVAILABLE
|
||||
}
|
||||
|
||||
#pragma unroll
|
||||
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
|
||||
int i = i0 + threadIdx.y * 4 + threadIdx.x / (WARP_SIZE/4);
|
||||
|
||||
if (need_check) {
|
||||
i = min(i, i_max);
|
||||
}
|
||||
|
||||
const float * dptr = (const float *)(x + i*stride);
|
||||
const block_iq4_kt * bxi = (const block_iq4_kt *)(dptr + 2) + kbx0;
|
||||
const int ls = (bxi->qs[threadIdx.x % 8] & 0xff) >> 1;
|
||||
|
||||
#ifdef INT8_MMA_AVAILABLE
|
||||
x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x % 8] = dptr[0] * (ls - 64);
|
||||
#else
|
||||
x_df[i*(WARP_SIZE/4) + i/4 + threadIdx.x % 8] = dptr[0] * (ls - 64);
|
||||
#endif // INT8_MMA_AVAILABLE
|
||||
}
|
||||
}
|
||||
|
||||
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq5_ks_r4(
|
||||
const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
|
||||
|
||||
@@ -3382,6 +3458,13 @@ struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ4_KS_R4> {
|
||||
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
|
||||
};
|
||||
|
||||
template <int mmq_x, int mmq_y, int nwarps, bool need_check>
|
||||
struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ4_KT> {
|
||||
static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_kt<mmq_y, nwarps, need_check>;
|
||||
static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma<mmq_x, mmq_y, nwarps, MMQ_Q8_1_DS_LAYOUT_D4>;
|
||||
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
|
||||
};
|
||||
|
||||
template <int mmq_x, int mmq_y, int nwarps, bool need_check>
|
||||
struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ5_KS> {
|
||||
static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq5_ks<mmq_y, nwarps, need_check>;
|
||||
@@ -3843,6 +3926,7 @@ extern DECL_MMQ_CASE(GGML_TYPE_IQ5_K);
|
||||
extern DECL_MMQ_CASE(GGML_TYPE_IQ5_KS);
|
||||
extern DECL_MMQ_CASE(GGML_TYPE_IQ6_K);
|
||||
extern DECL_MMQ_CASE(GGML_TYPE_IQ1_S_R4);
|
||||
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_KT);
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -7490,6 +7490,7 @@ QuantizerIQKT<block_size, group_size, num_bits, is_abs>::QuantizerIQKT(int num_c
|
||||
set_values(i, data, kScale, offset);
|
||||
data += kGroupSize;
|
||||
}
|
||||
if (num_clusters == 0) return;
|
||||
// Make 128 clusters.
|
||||
// Note: we get a slightly better result by using 64 clusters
|
||||
// at the expense of almost doubling the quantization time.
|
||||
@@ -8540,6 +8541,14 @@ const QuantizerIQ4KT& iq4kt_quantizer(bool with_offset = false) {
|
||||
return *quantizer1;
|
||||
}
|
||||
|
||||
const QuantizerIQ4KT& iq4kt_dequantizer() {
|
||||
static std::mutex mutex;
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
static std::unique_ptr<QuantizerIQ4KT> dequantizer;
|
||||
if (!dequantizer) dequantizer = std::make_unique<QuantizerIQ4KT>(0, 0, 4096);
|
||||
return *dequantizer;
|
||||
}
|
||||
|
||||
void quantize_row_iq4_kt_impl(const float * x, void * vy, int n_per_row, const float * quant_weights, float * all_scales, float * all_weights) {
|
||||
|
||||
constexpr float kSigmaScale = 2.0f;
|
||||
@@ -8741,7 +8750,7 @@ void dequantize_row_iq4_kt(const block_iq4_kt * x, float * y, int64_t k) {
|
||||
const float d = dptr[0] * Q::kScale;
|
||||
const float row_av = dptr[1];
|
||||
x = (const block_iq4_kt *)(dptr + 2);
|
||||
auto& deq = iq4kt_quantizer();
|
||||
auto& deq = iq4kt_dequantizer();
|
||||
for (int ibl = 0; ibl < nb; ++ibl) {
|
||||
auto shb = x[ibl].qs;
|
||||
auto ql = (const uint8_t *)(shb + Q::kNblock);
|
||||
|
||||
Reference in New Issue
Block a user