Moving 4D gemm logic from ggml.c to iqk_mul_mat.cpp (#207)

This allows us to optimize TG performance for GQA models.
E.g., for IQ4_XS L3-8B with 8k TG-64 goes from 8.6 to 10.26 t/s.

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-02-15 08:45:45 +02:00
committed by GitHub
parent 8e94b29e35
commit 0551e7630b
3 changed files with 101 additions and 41 deletions

View File

@@ -14069,22 +14069,12 @@ static void ggml_compute_forward_mul_mat(
#if GGML_USE_IQK_MULMAT
if (dst->type == GGML_TYPE_F32) {
int gcd = simple_gcd(ne12*ne13, nth);
int counter = 0;
for (int64_t i13 = 0; i13 < ne13; i13++) {
for (int64_t i12 = 0; i12 < ne12; i12++) {
if ((counter++ % gcd) == (ith%gcd)) {
if (!iqk_mul_mat(ne01, ne11, ne00,
src0->type, (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01, ///ggml_type_size(src0->type),
src1->type, (const char *)src1->data + i12*nb12 + i13*nb13, nb11, ///ggml_type_size(src1->type),
(float *)((char *)dst->data + i12*nb2 + i13*nb3), nb1/ggml_type_size(dst->type),
ith/gcd, nth/gcd)) goto IQK_MulMat_Not_Available1;
}
}
}
return;
if (iqk_mul_mat_4d(ne01, ne11, ne00,
ne02, ne03, ne12, ne13, nb02, nb03, nb12, nb13, nb2/sizeof(float), nb3/sizeof(float),
src0->type, src0->data, nb01,
src1->type, src1->data, nb11,
(float *)dst->data, nb1/sizeof(float), ith, nth)) return;
}
IQK_MulMat_Not_Available1:;
#endif
#if GGML_USE_LLAMAFILE
@@ -14125,6 +14115,29 @@ UseGgmlGemm1:;
assert(params->wsize >= ne13*nbw3);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
#ifdef GGML_USE_IQK_MULMAT
int ts = type_traits[vec_dot_type].type_size;
int bs = type_traits[vec_dot_type].blck_size;
int64_t blocks_per_row = ne10/bs;
int64_t num_blocks = ne11*ne12*ne13*blocks_per_row;
int gcd = simple_gcd(128, ts); // 128 is to cover cache line sizes for common architectures without getting involved
// with trying to get it from ggml
int64_t num_blocks_gcd = (num_blocks + gcd - 1)/gcd;
int64_t block_per_thread = ((num_blocks_gcd + nth - 1)/nth)*gcd;
int64_t first_block = ith*block_per_thread;
int64_t last_block = MIN(num_blocks, first_block + block_per_thread);
while (first_block < last_block) {
int64_t i13 = first_block/(ne11*ne12*blocks_per_row);
int64_t i12 = (first_block - i13*ne11*ne12*blocks_per_row)/(ne11*blocks_per_row);
int64_t i11 = (first_block - (i13*ne12 + i12)*ne11*blocks_per_row)/blocks_per_row;
int64_t i10 = first_block % blocks_per_row;
int64_t blocks_to_do = MIN(blocks_per_row - i10, last_block - first_block);
from_float((float *)((char *)src1->data + i13*nb13 + i12*nb12 + i11*nb11) + i10*bs,
(void *)(wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + i10*ts), blocks_to_do*bs);
first_block += blocks_to_do;
}
#else
for (int64_t i13 = 0; i13 < ne13; ++i13) {
for (int64_t i12 = 0; i12 < ne12; ++i12) {
int64_t i11_processed = 0;
@@ -14145,6 +14158,7 @@ UseGgmlGemm1:;
}
}
}
#endif
ggml_barrier(params->shared);
@@ -14165,34 +14179,14 @@ UseGgmlGemm1:;
#if GGML_USE_IQK_MULMAT
if (src1->type != vec_dot_type && dst->type == GGML_TYPE_F32) {
// When K*Q and V*softmax(K*Q) (so ne12*ne13 > 1), it is better (faster) to have fewer threads processing
// one matrix multiplication, but work on several heads at once.
// Hence, we find the GCD(n12*ne13, nth) and have nth/GCD(n12*ne13, nth) threads per head.
// Leaving the previous version commented out for now just in case.
const size_t row_size = ggml_row_size(vec_dot_type, ne10);
int ntg = simple_gcd(ne12*ne13, nth);
int counter = 0;
for (int64_t i13 = 0; i13 < ne13; i13++) {
for (int64_t i12 = 0; i12 < ne12; i12++) {
if (counter++ % ntg == ith%ntg) {
if (!iqk_mul_mat(ne01, ne11, ne00,
src0->type, (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01, ///ggml_type_size(src0->type),
vec_dot_type, (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, row_size, ///ggml_type_size(vec_dot_type),
(float *)((char *)dst->data + i12*nb2 + i13*nb3), nb1/ggml_type_size(dst->type),
ith/ntg, nth/ntg)) goto IQK_MulMat_Not_Available2;
}
}
}
//for (int64_t i13 = 0; i13 < ne13; i13++)
// for (int64_t i12 = 0; i12 < ne12; i12++)
// if (!iqk_mul_mat(ne01, ne11, ne00,
// src0->type, (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01, ///ggml_type_size(src0->type),
// vec_dot_type, (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, row_size, ///ggml_type_size(vec_dot_type),
// (float *)((char *)dst->data + i12*nb2 + i13*nb3), nb1/ggml_type_size(dst->type),
// ith, nth)) goto IQK_MulMat_Not_Available2;
return;
if (iqk_mul_mat_4d(ne01, ne11, ne00,
ne02, ne03, ne12, ne13, nb02, nb03, row_size*ne11, row_size*ne11*ne12,
nb2/sizeof(float), nb3/sizeof(float),
src0->type, src0->data, nb01,
vec_dot_type, wdata, row_size,
(float *)dst->data, nb1/sizeof(float), ith, nth)) return;
}
IQK_MulMat_Not_Available2:;
#endif
#if GGML_USE_LLAMAFILE

View File

@@ -340,6 +340,56 @@ bool iqk_mul_mat(long Nx, long Ny, long ne00,
return true;
}
namespace {
inline uint32_t simple_gcd(uint32_t a, uint32_t b) {
while (a != b) {
if (a > b) a -= b;
else b -= a;
}
return a;
}
}
bool iqk_mul_mat_4d(long Nx, long Ny, long ne00,
long ne02, long ne03, long ne12, long ne13,
long nb02, long nb03, long nb12, long nb13, long nb2, long nb3,
int typeA, const void * A, long strideA,
int typeB, const void * B, long strideB,
float * C, long stride_C, int ith, int nth) {
auto r2 = ne12 / ne02;
auto r3 = ne13 / ne03;
if (ne13 == 1 && Ny == 1 && r2 > 1) {
int gcd = simple_gcd(ne02, nth);
int counter = 0;
for (int64_t i12 = 0; i12 < ne02; i12++) {
if ((counter++ % gcd) == (ith%gcd)) {
if (!iqk_mul_mat(Nx, r2, ne00,
typeA, (const char *)A + i12*nb02, strideA,
typeB, (const char *)B + i12*r2*nb12, nb12,
C + r2*i12*nb2, nb2,
ith/gcd, nth/gcd)) return false;
}
}
return true;
}
int gcd = simple_gcd(ne12*ne13, nth);
int counter = 0;
for (int64_t i13 = 0; i13 < ne13; i13++) {
for (int64_t i12 = 0; i12 < ne12; i12++) {
if ((counter++ % gcd) == (ith%gcd)) {
if (!iqk_mul_mat(Nx, Ny, ne00,
typeA, (const char *)A + i12/r2*nb02 + i13/r3*nb03, strideA,
typeB, (const char *)B + i12*nb12 + i13*nb13, strideB,
C + i12*nb2 + i13*nb3, stride_C,
ith/gcd, nth/gcd)) return false;
}
}
}
return true;
}
bool iqk_mul_mat_moe(long Nx, long Ny, long ne00, int ne11,
int typeA, const void * A, long strideA,
int typeB, const void * B, long strideB,
@@ -16292,6 +16342,15 @@ bool iqk_mul_mat(int, long, long, long, int, const void *, long, int, const void
return false;
}
bool iqk_mul_mat_4d(long /*Nx*/, long /*Ny*/, long /*ne00*/,
long /*ne02*/, long /*ne03*/, long /*ne12*/, long /*ne13*/,
long /*nb02*/, long /*nb03*/, long /*nb12*/, long /*nb13*/, long /*nb2*/, long /*nb3*/,
int /*typeA*/, const void * /*A*/, long /*strideA*/,
int /*typeB*/, const void * /*B*/, long /*strideB*/,
float * /*C*/, long /*stride_C*/, int /*ith*/, int /*nth*/) {
return false;
}
bool iqk_mul_mat_moe(long, long, long, int, int, const void *, long, int, const void *, long, float *, long, long,
const void *, int, int) {
return false;

View File

@@ -16,6 +16,13 @@ bool iqk_mul_mat(long Nx, long Ny, long ne00,
int typeB, const void * B, long strideB,
float * C, long stride_C, int ith, int nth);
bool iqk_mul_mat_4d(long Nx, long Ny, long ne00,
long ne02, long ne03, long ne12, long ne13,
long nb02, long nb03, long nb12, long nb13, long nb2, long nb3,
int typeA, const void * A, long strideA,
int typeB, const void * B, long strideB,
float * C, long stride_C, int ith, int nth);
bool iqk_mul_mat_moe(long Nx, long Ny, long ne00, int ne11,
int typeA, const void * A, long strideA,
int typeB, const void * B, long strideB,