mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-04-29 19:01:47 +00:00
Fused mul + multi_add op (#858)
* Adding fused mul+multi_add + CPU implementation * fused mul+multi_add: CUDA * fused mul+multi_add: command line argument to disable it --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -46,6 +46,7 @@
|
||||
#include "ggml-cuda/conv2d-dw.cuh"
|
||||
#include "ggml-cuda/set-rows.cuh"
|
||||
#include "ggml-cuda/argmax.cuh"
|
||||
#include "ggml-cuda/multiadd.cuh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
@@ -3178,6 +3179,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||
case GGML_OP_MULTI_ADD:
|
||||
ggml_cuda_op_multi_add(ctx, dst);
|
||||
break;
|
||||
case GGML_OP_MUL_MULTI_ADD:
|
||||
ggml_cuda_op_mul_multi_add(ctx, dst);
|
||||
break;
|
||||
case GGML_OP_ACC:
|
||||
ggml_cuda_op_acc(ctx, dst);
|
||||
break;
|
||||
@@ -4408,6 +4412,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
||||
case GGML_OP_ADD:
|
||||
case GGML_OP_ADD_ID:
|
||||
case GGML_OP_MULTI_ADD:
|
||||
case GGML_OP_MUL_MULTI_ADD:
|
||||
case GGML_OP_MUL:
|
||||
case GGML_OP_DIV:
|
||||
case GGML_OP_FUSED_RMS_NORM:
|
||||
|
||||
87
ggml/src/ggml-cuda/multiadd.cu
Normal file
87
ggml/src/ggml-cuda/multiadd.cu
Normal file
@@ -0,0 +1,87 @@
|
||||
#include "multiadd.cuh"
|
||||
|
||||
static __global__ void multi_add_f32(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst) {
|
||||
const int64_t i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
int64_t k = ne0*ne1;
|
||||
if (i >= k) {
|
||||
return;
|
||||
}
|
||||
int i1 = i / ne0;
|
||||
int i0 = i % ne0;
|
||||
float * result = (float *)(dst + i1*nb1);
|
||||
const float * s = (const float *)(src0 + i1*nb01) + i0;
|
||||
if (nused == 1) {
|
||||
result[i0] = s[0];
|
||||
} else {
|
||||
float sum = s[0] + s[ne0];
|
||||
for (int j = 2; j < nused; ++j) sum += s[j*ne0];
|
||||
result[i0] = sum;
|
||||
}
|
||||
}
|
||||
|
||||
static void multi_add_f32_cuda(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst, cudaStream_t stream) {
|
||||
int64_t k = ne0 * ne1;
|
||||
const int num_blocks = (k + CUDA_MULTI_ADD_BLOCK_SIZE - 1) / CUDA_MULTI_ADD_BLOCK_SIZE;
|
||||
multi_add_f32<<<num_blocks, CUDA_MULTI_ADD_BLOCK_SIZE, 0, stream>>>(nused, ne0, ne1, nb1, nb01, src0, dst);
|
||||
}
|
||||
|
||||
void ggml_cuda_op_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT(dst->ne[2] == 1 && dst->ne[3] == 1);
|
||||
GGML_ASSERT(dst->nb[0] == sizeof(float));
|
||||
int nused = dst->op_params[0];
|
||||
GGML_ASSERT(nused >= 1);
|
||||
const char * src0 = (const char *)dst->src[0]->data;
|
||||
cudaStream_t stream = ctx.stream();
|
||||
multi_add_f32_cuda(nused, dst->ne[0], dst->ne[1], dst->nb[1], dst->src[0]->nb[1], src0, (char *)dst->data, stream);
|
||||
}
|
||||
|
||||
|
||||
static __global__ void mul_multi_add_f32(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, int64_t nb02, int64_t nb11, int64_t nb12, const char * src0, const char * src1, char * dst) {
|
||||
const int64_t i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
int64_t k = ne0*ne1;
|
||||
if (i >= k) {
|
||||
return;
|
||||
}
|
||||
int i1 = i / ne0;
|
||||
int i0 = i % ne0;
|
||||
float * result = (float *)(dst + i1*nb1);
|
||||
|
||||
auto c0 = src0 + i1*nb02;
|
||||
auto c1 = src1 + i1*nb12;
|
||||
|
||||
float sum = 0;
|
||||
for (int j = 0; j < nused; ++j) {
|
||||
auto x0 = (const float *)c0;
|
||||
auto x1 = (const float *)c1;
|
||||
sum += x0[i0] * x1[0];
|
||||
c0 += nb01;
|
||||
c1 += nb11;
|
||||
}
|
||||
result[i0] = sum;
|
||||
}
|
||||
|
||||
static void mul_multi_add_f32_cuda(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, int64_t nb02, int64_t nb11, int64_t nb12,
|
||||
const char * src0, const char * src1, char * dst, cudaStream_t stream) {
|
||||
int64_t k = ne0 * ne1;
|
||||
const int num_blocks = (k + CUDA_MULTI_ADD_BLOCK_SIZE - 1) / CUDA_MULTI_ADD_BLOCK_SIZE;
|
||||
mul_multi_add_f32<<<num_blocks, CUDA_MULTI_ADD_BLOCK_SIZE, 0, stream>>>(nused, ne0, ne1, nb1, nb01, nb02, nb11, nb12, src0, src1, dst);
|
||||
}
|
||||
|
||||
void ggml_cuda_op_mul_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
auto src0 = dst->src[0];
|
||||
auto src1 = dst->src[1];
|
||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT(src0->ne[0] == dst->ne[0]);
|
||||
GGML_ASSERT(src0->ne[2] == dst->ne[1]);
|
||||
GGML_ASSERT(src0->ne[1] == src1->ne[1]);
|
||||
GGML_ASSERT(src0->ne[2] == src1->ne[2]);
|
||||
GGML_ASSERT(src0->ne[3] == src1->ne[3]);
|
||||
GGML_ASSERT(src0->ne[3] == 1);
|
||||
GGML_ASSERT(src1->ne[0] == 1);
|
||||
|
||||
mul_multi_add_f32_cuda(src0->ne[1], dst->ne[0], dst->ne[1], dst->nb[1], src0->nb[1], src0->nb[2], src1->nb[1], src1->nb[2],
|
||||
(const char *)src0->data, (const char *)src1->data, (char *)dst->data, ctx.stream());
|
||||
}
|
||||
14
ggml/src/ggml-cuda/multiadd.cuh
Normal file
14
ggml/src/ggml-cuda/multiadd.cuh
Normal file
@@ -0,0 +1,14 @@
|
||||
//
|
||||
// Copyright (C) 2023-2024 The ggml authors
|
||||
// Copyright (C) 2024 Iwan Kawrakow
|
||||
// MIT license
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#include "common.cuh"
|
||||
|
||||
#define CUDA_MULTI_ADD_BLOCK_SIZE 256
|
||||
|
||||
void ggml_cuda_op_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
|
||||
void ggml_cuda_op_mul_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
@@ -59,25 +59,6 @@ static __global__ void fused_mul_silu_f32(const float * x, const float * y, floa
|
||||
dst[i] = x[i] * y[i] / (1.0f + expf(-x[i]));
|
||||
}
|
||||
|
||||
static __global__ void multi_add_f32(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst) {
|
||||
const int64_t i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
int64_t k = ne0*ne1;
|
||||
if (i >= k) {
|
||||
return;
|
||||
}
|
||||
int i1 = i / ne0;
|
||||
int i0 = i % ne0;
|
||||
float * result = (float *)(dst + i1*nb1);
|
||||
const float * s = (const float *)(src0 + i1*nb01) + i0;
|
||||
if (nused == 1) {
|
||||
result[i0] = s[0];
|
||||
} else {
|
||||
float sum = s[0] + s[ne0];
|
||||
for (int j = 2; j < nused; ++j) sum += s[j*ne0];
|
||||
result[i0] = sum;
|
||||
}
|
||||
}
|
||||
|
||||
static __global__ void fused_mul_relu_f32(const float * x, const float * y, float * dst, const int k) {
|
||||
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||
|
||||
@@ -261,23 +242,6 @@ static void sqrt_f32_cuda(const float * x, float * dst, const int k, cudaStream_
|
||||
sqrt_f32<<<num_blocks, CUDA_SQRT_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||
}
|
||||
|
||||
static void multi_add_f32_cuda(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst, cudaStream_t stream) {
|
||||
int64_t k = ne0 * ne1;
|
||||
const int num_blocks = (k + CUDA_MULTI_ADD_BLOCK_SIZE - 1) / CUDA_MULTI_ADD_BLOCK_SIZE;
|
||||
multi_add_f32<<<num_blocks, CUDA_MULTI_ADD_BLOCK_SIZE, 0, stream>>>(nused, ne0, ne1, nb1, nb01, src0, dst);
|
||||
}
|
||||
|
||||
void ggml_cuda_op_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT(dst->ne[2] == 1 && dst->ne[3] == 1);
|
||||
GGML_ASSERT(dst->nb[0] == sizeof(float));
|
||||
int nused = dst->op_params[0];
|
||||
GGML_ASSERT(nused >= 1);
|
||||
const char * src0 = (const char *)dst->src[0]->data;
|
||||
cudaStream_t stream = ctx.stream();
|
||||
multi_add_f32_cuda(nused, dst->ne[0], dst->ne[1], dst->nb[1], dst->src[0]->nb[1], src0, (char *)dst->data, stream);
|
||||
}
|
||||
|
||||
void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||
const ggml_tensor * src0 = dst->src[0];
|
||||
const float * src0_d = (const float *)src0->data;
|
||||
|
||||
@@ -4222,6 +4222,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
|
||||
"OUT_PROD",
|
||||
"FUSED_UP_GATE",
|
||||
"MOE_FUSED_UP_GATE",
|
||||
"MUL_MULTI_ADD",
|
||||
|
||||
"SCALE",
|
||||
"SET",
|
||||
@@ -4289,7 +4290,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
|
||||
"GLU",
|
||||
};
|
||||
|
||||
static_assert(GGML_OP_COUNT == 88, "GGML_OP_COUNT != 88");
|
||||
static_assert(GGML_OP_COUNT == 89, "GGML_OP_COUNT != 89");
|
||||
|
||||
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"none",
|
||||
@@ -4326,6 +4327,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"X*Y",
|
||||
"X*Y1&X*Y2",
|
||||
"X*Y1&X*Y2",
|
||||
"x1*y1+x2*y2+...",
|
||||
|
||||
"x*v",
|
||||
"y-\\>view(x)",
|
||||
@@ -4393,7 +4395,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"glu(x),"
|
||||
};
|
||||
|
||||
static_assert(GGML_OP_COUNT == 88, "GGML_OP_COUNT != 88");
|
||||
static_assert(GGML_OP_COUNT == 89, "GGML_OP_COUNT != 89");
|
||||
|
||||
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
|
||||
|
||||
@@ -6103,6 +6105,31 @@ struct ggml_tensor * ggml_multi_add(
|
||||
return result;
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_mul_multi_add(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b) {
|
||||
|
||||
bool is_node = false;
|
||||
|
||||
GGML_ASSERT(a->ne[1] == b->ne[1]);
|
||||
GGML_ASSERT(a->ne[2] == b->ne[2]);
|
||||
GGML_ASSERT(a->ne[3] == b->ne[3]);
|
||||
GGML_ASSERT(a->ne[3] == 1);
|
||||
GGML_ASSERT(b->ne[0] == 1);
|
||||
|
||||
int64_t ne[GGML_MAX_DIMS] = { a->ne[0], a->ne[2], 1, 1 };
|
||||
|
||||
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, ne);
|
||||
|
||||
result->op = GGML_OP_MUL_MULTI_ADD;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
result->src[1] = b;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// ggml_add_cast
|
||||
|
||||
static struct ggml_tensor * ggml_add_cast_impl(
|
||||
@@ -22319,6 +22346,10 @@ static int ggml_compute_forward(struct ggml_compute_params * params, struct ggml
|
||||
{
|
||||
ggml_compute_forward_multi_add(params, tensor);
|
||||
} break;
|
||||
case GGML_OP_MUL_MULTI_ADD:
|
||||
{
|
||||
iqk_mul_multi_add(tensor, params->ith, params->nth);
|
||||
} break;
|
||||
case GGML_OP_ACC:
|
||||
{
|
||||
ggml_compute_forward_acc(params, tensor);
|
||||
@@ -23157,6 +23188,10 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
||||
{
|
||||
GGML_ABORT("fatal error"); // TODO: implement
|
||||
}
|
||||
case GGML_OP_MUL_MULTI_ADD:
|
||||
{
|
||||
GGML_ABORT("fatal error"); // TODO: implement
|
||||
}
|
||||
case GGML_OP_CONCAT:
|
||||
{
|
||||
GGML_ABORT("fatal error"); // TODO: implement
|
||||
@@ -24241,6 +24276,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
|
||||
case GGML_OP_ADD1:
|
||||
case GGML_OP_ACC:
|
||||
case GGML_OP_MULTI_ADD:
|
||||
case GGML_OP_MUL_MULTI_ADD:
|
||||
{
|
||||
n_tasks = n_threads;
|
||||
} break;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
|
||||
namespace {
|
||||
// Playing around with group scores: use sum of probabilities in the group
|
||||
@@ -409,3 +410,41 @@ void iqk_openai_experts(struct ggml_tensor * topk, struct ggml_tensor * softmax,
|
||||
for (int j = 0; j < ne0; ++j) weights[j] *= norm;
|
||||
}
|
||||
}
|
||||
|
||||
void iqk_mul_multi_add(struct ggml_tensor * dst, int ith, int nth) {
|
||||
auto src0 = dst->src[0];
|
||||
auto src1 = dst->src[1];
|
||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT(src0->ne[0] == dst->ne[0]);
|
||||
GGML_ASSERT(src0->ne[2] == dst->ne[1]);
|
||||
GGML_ASSERT(src0->ne[1] == src1->ne[1]);
|
||||
GGML_ASSERT(src0->ne[2] == src1->ne[2]);
|
||||
GGML_ASSERT(src0->ne[3] == src1->ne[3]);
|
||||
GGML_ASSERT(src0->ne[3] == 1);
|
||||
GGML_ASSERT(src1->ne[0] == 1);
|
||||
|
||||
int nrows = dst->ne[1];
|
||||
int npt = (nrows + nth - 1)/nth;
|
||||
int first = ith*npt;
|
||||
int last = std::min(nrows, first + npt);
|
||||
|
||||
int ne01 = src0->ne[1];
|
||||
int ne00 = src0->ne[0];
|
||||
|
||||
for (int ir = first; ir < last; ++ir) {
|
||||
auto c0 = (const char *)src0->data + ir*src0->nb[2];
|
||||
auto c1 = (const char *)src1->data + ir*src1->nb[2];
|
||||
auto cy = ( char *) dst->data + ir* dst->nb[1];
|
||||
std::memset(cy, 0, ne00*sizeof(float));
|
||||
for (int j = 0; j < ne01; ++j) {
|
||||
auto x0 = (const float *)c0;
|
||||
auto x1 = (const float *)c1;
|
||||
auto y = ( float *)cy;
|
||||
for (int k = 0; k < ne00; ++k) y[k] += x0[k] * x1[0];
|
||||
c0 += src0->nb[1];
|
||||
c1 += src1->nb[1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ void iqk_glm45moe_experts(struct ggml_tensor * dst, struct ggml_tensor * topk_vi
|
||||
|
||||
void iqk_openai_experts(struct ggml_tensor * topk, struct ggml_tensor * softmax, int ith, int nth);
|
||||
|
||||
void iqk_mul_multi_add(struct ggml_tensor * dst, int ith, int nth);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user