mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-01 20:19:52 +00:00
iqk_mul_mat: attentions matrix multiplications
K*Q and KQ*V are n_kv_embed x n_token x n_head matrix multiplications. Before this PR, this meant n_head calls to iqk_mul_mat to perform n_kv_embed x n_token 2D multiplications, each using nth threads. Instead, in this PR, if n_head is a multiple of nth, each thread does n_head/nth multiplications of the n_kv_embed x n_token 2D matrices. This improves PP-512(32 threads) for Bitnet-3B to 433 t/s up from 409 t/s. It is beneficial in other cases too. E.g., for LLaMA-7B, we go to 201 t/s up from 193 t/s for q4_K_S, and to 144 t/s up from 139 t/s for fp16. All these numbers are for the Ryzen-7950X CPU.
This commit is contained in:
17
ggml.c
17
ggml.c
@@ -12368,7 +12368,22 @@ static void ggml_compute_forward_mul_mat(
|
||||
// compute by src0 rows
|
||||
|
||||
#if GGML_USE_IQK_MULMAT
|
||||
if (ggml_is_contiguous(src1) && dst->type == GGML_TYPE_F32) {
|
||||
if (dst->type == GGML_TYPE_F32 && params->type == GGML_TASK_TYPE_COMPUTE && (ne12*ne13)%nth == 0) {
|
||||
int counter = 0;
|
||||
for (int64_t i13 = 0; i13 < ne13; i13++) {
|
||||
for (int64_t i12 = 0; i12 < ne12; i12++) {
|
||||
if (counter++ % nth == ith) {
|
||||
if (!iqk_mul_mat(params->type, ne01, ne11, ne00,
|
||||
src0->type, (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01/ggml_type_size(src0->type),
|
||||
src1->type, (const char *)src1->data + i12*nb12 + i13*nb13, nb11/ggml_type_size(src1->type),
|
||||
(float *)((char *)dst->data + i12*nb2 + i13*nb3), nb1/ggml_type_size(dst->type),
|
||||
0, 1)) goto IQK_MulMat_Not_Available1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (dst->type == GGML_TYPE_F32) {
|
||||
for (int64_t i13 = 0; i13 < ne13; i13++)
|
||||
for (int64_t i12 = 0; i12 < ne12; i12++)
|
||||
if (!iqk_mul_mat(params->type, ne01, ne11, ne00,
|
||||
|
||||
Reference in New Issue
Block a user