From 8de297b7958964abddadc3613717ce16c3ea30dd Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Sun, 31 Aug 2025 18:16:36 +0300 Subject: [PATCH] Fused FFN_UP+FFN_GATE op (#741) * Fused up+gate+unary for regular (not MoE) FFN - CPU * WIP CUDA * Seems to be working on CUDA For a dense model we get 2-3% speedup for PP and ~0.6% for TG. * Add command line option This time the option is ON by default, and one needs to turn it off via -no-fug or --no-fused-up-gate --------- Co-authored-by: Iwan Kawrakow --- common/common.cpp | 7 ++ common/common.h | 1 + examples/llama-bench/llama-bench.cpp | 33 ++++++- ggml/include/ggml.h | 8 ++ ggml/src/ggml-cuda.cu | 67 +++++++++++++- ggml/src/ggml-cuda/mmq.cuh | 1 + ggml/src/ggml.c | 132 ++++++++++++++++++++++++++- ggml/src/iqk/iqk_mul_mat.cpp | 2 +- include/llama.h | 3 +- src/llama.cpp | 34 +++++++ 10 files changed, 276 insertions(+), 12 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 459e7c27..717f9466 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1004,6 +1004,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.fused_moe_up_gate = true; return true; } + if (arg == "-no-fug" || arg == "--no-fused-up-gate") { + params.fused_up_gate = false; + return true; + } if (arg == "-ser" || arg == "--smart-expert-reduction") { CHECK_ARG auto values = string_split_pairs(argv[i], ','); @@ -1760,6 +1764,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-mla, --mla-use", "enable MLA (default: %d)", params.mla_attn }); options.push_back({ "*", "-amb, --attention-max-batch", "max batch size for attention computations (default: %d)", params.attn_max_batch}); options.push_back({ "*", "-fmoe, --fused-moe", "enable fused MoE (default: %s)", params.fused_moe_up_gate ? "enabled" : "disabled" }); + options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" }); options.push_back({ "*", "-ser, --smart-expert-reduction,","experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts}); options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n" "in conversation mode, this will be used as system prompt\n" @@ -2660,6 +2665,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param cparams.mla_attn = params.mla_attn; cparams.attn_max_batch = params.attn_max_batch; cparams.fused_moe_up_gate = params.fused_moe_up_gate; + cparams.fused_up_gate = params.fused_up_gate; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; @@ -3756,6 +3762,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l fprintf(stream, "mla_attn: %d # default: 0\n", params.mla_attn); fprintf(stream, "attn_max_batch: %d # default: 0\n", params.attn_max_batch); fprintf(stream, "fused_moe: %s # default: false\n", params.fused_moe_up_gate ? "true" : "false"); + fprintf(stream, "fused_up_gate: %s # default: true\n", params.fused_up_gate ? "true" : "false"); fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts); fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp); diff --git a/common/common.h b/common/common.h index fb15c77f..6e705eef 100644 --- a/common/common.h +++ b/common/common.h @@ -191,6 +191,7 @@ struct gpt_params { int mla_attn = 0; // MLA 0: standard attention, 1: MLA with K and transposed V cache, 2: MLA with just K cache int attn_max_batch = 0; // Max batch size to use when computing attention (only applicable if flash_attn = false) bool fused_moe_up_gate = false; // fused up*unary(gate) op for MoE models + bool fused_up_gate = true; // fused up*unary(gate) op int min_experts = -1; float thresh_experts = 0; diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 74f51494..6bb646bd 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -261,6 +261,7 @@ struct cmd_params { bool warmup; bool repack = false; bool fmoe = false; + bool no_fug = false; bool use_thp = false; output_formats output_format; output_formats output_format_stderr; @@ -297,6 +298,7 @@ static const cmd_params cmd_params_defaults = { /* repack */ false, /* use_thp */ false, /* fmoe */ false, + /* no_fug */ false, /* output_format */ MARKDOWN, /* output_format_stderr */ NONE, }; @@ -339,6 +341,7 @@ static void print_usage(int /* argc */, char ** argv) { printf(" -thp, --transparent-huge-pages <0|1> (default: %s)\n", cmd_params_defaults.use_thp? "1" : "0"); printf(" -ot, --override-tensor pattern (default: none)\n"); printf(" -fmoe, --fused-moe <0|1> (default: %s)\n", cmd_params_defaults.fmoe? "1" : "0"); + printf(" -no-fug, --no-fused-up-gate <0|1> (default: %s)\n", cmd_params_defaults.no_fug? "1" : "0"); printf("\n"); printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n"); } @@ -736,6 +739,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { break; } params.fmoe = std::stoi(argv[i]); + } else if (arg == "-no-fug" || arg == "--no-fused-up-gate") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.no_fug = std::stoi(argv[i]); } else if (arg == "-ot" || arg == "--override-tensor") { if (++i >= argc) { invalid_param = true; @@ -820,6 +829,7 @@ struct cmd_params_instance { bool embeddings; bool repack = false; bool fmoe = false; + bool no_fug = false; bool use_thp = false; const llama_model_tensor_buft_override* buft_overrides; @@ -866,6 +876,7 @@ struct cmd_params_instance { cparams.mla_attn = mla_attn; cparams.attn_max_batch = attn_max_batch; cparams.fused_moe_up_gate = fmoe; + cparams.fused_up_gate = !no_fug; cparams.min_experts = ser.first; cparams.thresh_experts = ser.second; cparams.embeddings = embeddings; @@ -924,6 +935,7 @@ static std::vector get_cmd_params_instances(const cmd_param /* .embeddings = */ embd, /* .repack = */ params.repack, /* .fmoe = */ params.fmoe, + /* .no_fug = */ params.no_fug, /* .use_thp = */ params.use_thp, /* .buft_overrides=*/ params.buft_overrides.data(), }; @@ -958,6 +970,7 @@ static std::vector get_cmd_params_instances(const cmd_param /* .embeddings = */ embd, /* .repack = */ params.repack, /* .fmoe = */ params.fmoe, + /* .no_fug = */ params.no_fug, /* .use_thp = */ params.use_thp, /* .buft_overrides=*/ params.buft_overrides.data(), }; @@ -992,6 +1005,7 @@ static std::vector get_cmd_params_instances(const cmd_param /* .embeddings = */ embd, /* .repack = */ params.repack, /* .fmoe = */ params.fmoe, + /* .no_fug = */ params.no_fug, /* .use_thp = */ params.use_thp, /* .buft_overrides=*/ params.buft_overrides.data(), }; @@ -1026,6 +1040,7 @@ static std::vector get_cmd_params_instances(const cmd_param /* .embeddings = */ embd, /* .repack = */ params.repack, /* .fmoe = */ params.fmoe, + /* .no_fug = */ params.no_fug, /* .use_thp = */ params.use_thp, /* .buft_overrides=*/ params.buft_overrides.data(), }; @@ -1071,6 +1086,7 @@ struct test { bool embeddings; bool repack = false; bool fmoe = false; + bool no_fug = false; bool use_thp = false; int n_prompt; int n_gen; @@ -1104,7 +1120,7 @@ struct test { use_mmap = inst.use_mmap; embeddings = inst.embeddings; repack = inst.repack; - fmoe = inst.fmoe; + no_fug = inst.no_fug; use_thp = inst.use_thp; n_prompt = inst.n_prompt; n_gen = inst.n_gen; @@ -1196,7 +1212,7 @@ struct test { "n_threads", "type_k", "type_v", "n_gpu_layers", "split_mode", "main_gpu", "no_kv_offload", "flash_attn", "mla_attn", "attn_max_batch", "ser", - "tensor_split", "use_mmap", "embeddings", "repack", "fused_moe", "use_thp", + "tensor_split", "use_mmap", "embeddings", "repack", "fused_moe", "fused_up_gate", "use_thp", "n_prompt", "n_gen", "test_time", "avg_ns", "stddev_ns", "avg_ts", "stddev_ts", "test", @@ -1218,7 +1234,7 @@ struct test { if (field == "cuda" || field == "vulkan" || field == "kompute" || field == "metal" || field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" || field == "flash_attn" || field == "use_mmap" || field == "embeddings" || field == "repack" || field == "use_thp" || - field == "fused_moe") { + field == "fused_moe" || field == "fused_up_gate") { return BOOL; } if (field == "avg_ts" || field == "stddev_ts") { @@ -1261,7 +1277,7 @@ struct test { std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn), std::to_string(mla_attn), std::to_string(attn_max_batch), ser_to_string(ser), tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings), - std::to_string(repack), std::to_string(fmoe), std::to_string(use_thp), + std::to_string(repack), std::to_string(fmoe), std::to_string(no_fug), std::to_string(use_thp), std::to_string(n_prompt), std::to_string(n_gen), test_time, std::to_string(avg_ns()), std::to_string(stdev_ns()), std::to_string(avg_ts()), std::to_string(stdev_ts()), @@ -1445,6 +1461,9 @@ struct markdown_printer : public printer { if (field == "fused_moe") { return 4; } + if (field == "fused_up_gate") { + return 6; + } if (field == "test") { return 13; } @@ -1494,6 +1513,9 @@ struct markdown_printer : public printer { if (field == "fused_moe") { return "fmoe"; } + if (field == "fused_up_gate") { + return "no-fug"; + } if (field == "embeddings") { return "embd"; } @@ -1567,6 +1589,9 @@ struct markdown_printer : public printer { if (params.fmoe != cmd_params_defaults.fmoe) { fields.emplace_back("fused_moe"); } + if (params.no_fug != cmd_params_defaults.no_fug) { + fields.emplace_back("fused_up_gate"); + } fields.emplace_back("test"); fields.emplace_back("t/s"); diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 286e0cc4..9e78903c 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -611,6 +611,7 @@ extern "C" { GGML_OP_MUL_MAT, GGML_OP_MUL_MAT_ID, GGML_OP_OUT_PROD, + GGML_OP_FUSED_UP_GATE, GGML_OP_MOE_FUSED_UP_GATE, GGML_OP_SCALE, @@ -1408,6 +1409,13 @@ extern "C" { struct ggml_tensor * a_gate_b, enum ggml_unary_op op); + GGML_API struct ggml_tensor * ggml_fused_up_gate( + struct ggml_context * ctx, + struct ggml_tensor * up, + struct ggml_tensor * gate, + struct ggml_tensor * b, + enum ggml_unary_op op); + // A: m columns, n rows, // B: p columns, n rows, // result is m columns, p rows diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index f7e0b489..d8b1a2aa 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -2521,7 +2521,7 @@ static bool ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor * return false; } -static bool ggml_cuda_up_gate_unary(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * next) { +static bool ggml_cuda_moe_up_gate_unary(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * next) { const ggml_tensor * src0_1 = dst->src[0]; const ggml_tensor * src0_2 = dst->src[1]; const ggml_tensor * src0 = src0_1; @@ -2972,6 +2972,60 @@ static bool ggml_cuda_up_gate_unary(ggml_backend_cuda_context & ctx, ggml_tensor return fuse_down; } +static void ggml_cuda_up_gate_unary(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0_1 = dst->src[0]; + const ggml_tensor * src0_2 = dst->src[1]; + const ggml_tensor * src1 = dst->src[2]; + + GGML_ASSERT(ggml_is_quantized(src0_1->type)); + GGML_ASSERT(src0_1->type == src0_2->type); + GGML_ASSERT(src1->ne[2] == 1); + GGML_ASSERT(src1->ne[3] == 1); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(!ggml_backend_buffer_is_cuda_split(src0_1->buffer)); + GGML_ASSERT(!ggml_backend_buffer_is_cuda_split(src0_2->buffer)); + + auto stream = ctx.stream(); + + auto ne10_padded = GGML_PAD(src1->ne[0], MATRIX_ROW_PADDING); + auto nb10_padded = ne10_padded*sizeof(block_q8_1)/QK8_1; + auto quantized_size = nb10_padded*src1->ne[1]; + if (src1->ne[1] > 8) { + quantized_size += get_mmq_x_max_host(ggml_cuda_info().devices[ctx.device].cc)*sizeof(block_q8_1_mmq); + } + ggml_cuda_pool_alloc dst_up(ctx.pool(), ggml_nelements(dst)); + ggml_cuda_pool_alloc src1_quantized(ctx.pool(), quantized_size); + if (src1->ne[1] <= 8) { + quantize_row_q8_1_cuda((const float *)src1->data, (void *)src1_quantized.get(), src1->ne[0], src1->ne[1], 1, ne10_padded, + src0_1->type, stream); + CUDA_CHECK(cudaGetLastError()); + + ggml_cuda_op_mul_mat_vec_q(ctx, src0_1, src1, dst, (const char *)src0_1->data, nullptr, src1_quantized.get(), dst_up.get(), + 0, src0_1->ne[1], src1->ne[1], ne10_padded, stream); + CUDA_CHECK(cudaGetLastError()); + + ggml_cuda_op_mul_mat_vec_q(ctx, src0_2, src1, dst, (const char *)src0_2->data, nullptr, src1_quantized.get(), (float *)dst->data, + 0, src0_2->ne[1], src1->ne[1], ne10_padded, stream); + CUDA_CHECK(cudaGetLastError()); + } else { + quantize_mmq_q8_1_cuda((const float *)src1->data, src1_quantized.get(), src1->ne[0], src1->ne[1], 1, ne10_padded, src0_1->type, stream); + CUDA_CHECK(cudaGetLastError()); + + ggml_cuda_op_mul_mat_q(ctx, src0_1, src1, dst, (const char *)src0_1->data, nullptr, src1_quantized.get(), dst_up.get(), + 0, src0_1->ne[1], src1->ne[1], ne10_padded, stream); + CUDA_CHECK(cudaGetLastError()); + + ggml_cuda_op_mul_mat_q(ctx, src0_2, src1, dst, (const char *)src0_2->data, nullptr, src1_quantized.get(), (float *)dst->data, + 0, src0_1->ne[1], src1->ne[1], ne10_padded, stream); + CUDA_CHECK(cudaGetLastError()); + } + + ggml_fused_mul_unary(ctx, (ggml_unary_op)dst->op_params[0], ggml_nelements(dst), + (const float *)dst->data, dst_up.get(), (float *)dst->data); + CUDA_CHECK(cudaGetLastError()); + +} + static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct ggml_tensor * dst, struct ggml_tensor * next, bool& skip_next) { // why is this here instead of mul_mat? if (dst->src[0] != nullptr && ggml_backend_buffer_is_cuda_split(dst->src[0]->buffer)) { @@ -3097,7 +3151,10 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg skip_next = ggml_cuda_mul_mat_id(ctx, dst, next); break; case GGML_OP_MOE_FUSED_UP_GATE: - skip_next = ggml_cuda_up_gate_unary(ctx, dst, next); + skip_next = ggml_cuda_moe_up_gate_unary(ctx, dst, next); + break; + case GGML_OP_FUSED_UP_GATE: + ggml_cuda_up_gate_unary(ctx, dst); break; case GGML_OP_SCALE: ggml_cuda_op_scale(ctx, dst); @@ -3950,10 +4007,12 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: case GGML_OP_MOE_FUSED_UP_GATE: + case GGML_OP_FUSED_UP_GATE: { + bool is_fused_up_gate = op->op == GGML_OP_MOE_FUSED_UP_GATE || op->op == GGML_OP_FUSED_UP_GATE; struct ggml_tensor * a = op->src[0]; - struct ggml_tensor * b = op->op == GGML_OP_MOE_FUSED_UP_GATE ? op->src[2] : op->src[1]; - if (op->op == GGML_OP_MOE_FUSED_UP_GATE && a->type != op->src[1]->type) { + struct ggml_tensor * b = is_fused_up_gate ? op->src[2] : op->src[1]; + if (is_fused_up_gate && a->type != op->src[1]->type) { printf("%s: returning false for GGML_OP_MOE_FUSED_UP_GATE because src0->type != src1->type\n", __func__); return false; } diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index 648e8d71..9bf16427 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -108,6 +108,7 @@ static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout(const ggml_type type_x) { case GGML_TYPE_IQ4_KT: return MMQ_Q8_1_DS_LAYOUT_D4; default: + fprintf(stderr, "Unhandled type %s (%d)\n", ggml_type_name(type_x), type_x); GGML_ABORT("fatal error"); break; } diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index d671d539..c6912301 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -4054,6 +4054,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "MUL_MAT", "MUL_MAT_ID", "OUT_PROD", + "FUSED_UP_GATE", "MOE_FUSED_UP_GATE", "SCALE", @@ -4115,7 +4116,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "CROSS_ENTROPY_LOSS_BACK", }; -static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); +static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 82"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -4151,6 +4152,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "X[i]*Y", "X*Y", "X*Y1&X*Y2", + "X*Y1&X*Y2", "x*v", "y-\\>view(x)", @@ -4211,7 +4213,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "cross_entropy_loss_back(x,y)", }; -static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); +static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 82"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -7162,6 +7164,44 @@ struct ggml_tensor * ggml_moe_up_gate_ext( return result; } +struct ggml_tensor * ggml_fused_up_gate( + struct ggml_context * ctx, + struct ggml_tensor * up, + struct ggml_tensor * gate, + struct ggml_tensor * b, + enum ggml_unary_op op) { + if (!ggml_is_quantized(up->type) || up->type != gate->type || !ggml_are_same_shape(up, gate)) { + struct ggml_tensor * result_up = ggml_mul_mat(ctx, up, b); + struct ggml_tensor * result_gate = ggml_mul_mat(ctx, gate, b); + return ggml_fused_mul_unary(ctx, result_gate, result_up, op); + } + GGML_ASSERT(!ggml_is_transposed(up)); + GGML_ASSERT(!ggml_is_transposed(gate)); + + GGML_ASSERT(up->ne[2] == 1); // as is 3d (one matrix per expert) + GGML_ASSERT(up->ne[3] == 1); // as is 3d (one matrix per expert) + GGML_ASSERT(b->ne[2] == 1); // b is 3d + GGML_ASSERT(b->ne[3] == 1); // b is 3d + GGML_ASSERT(up->ne[0] == b->ne[0]); // can_mul_mat + + const bool is_node = false; + + const int64_t ne[4] = { up->ne[1], b->ne[1], 1, 1 }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + + result->op = GGML_OP_FUSED_UP_GATE; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src[0] = up; + result->src[1] = gate; + result->src[2] = b; + result->src[3] = NULL; + result->src[4] = NULL; + + ggml_set_op_params_i32(result, 0, (int32_t) op); + + return result; +} + // ggml_out_prod @@ -15667,6 +15707,75 @@ static void ggml_compute_forward_mul_mat_id_up_gate( #undef MMID_MATRIX_ROW } + +static void ggml_compute_forward_mul_mat_up_gate( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + GGML_ASSERT(dst->src[0]->type == dst->src[1]->type); + GGML_ASSERT(ggml_are_same_shape(dst->src[0], dst->src[1])); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + const struct ggml_tensor * src1 = dst->src[2]; + const struct ggml_tensor * src0_1 = dst->src[0]; + const struct ggml_tensor * src0_2 = dst->src[1]; + const struct ggml_tensor * src0 = src0_1; // so GGML_TENSOR_BINARY_OP_LOCALS works + + GGML_ASSERT(ggml_is_quantized(src0_1->type) && src0_1->type == src0_2->type); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const enum ggml_type type = src0->type; + + enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type; + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + GGML_ASSERT(ne13 == 1); + + ggml_from_float_t const from_float = type_traits[vec_dot_type].from_float; + + char * wdata = params->wdata; + + const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); + const size_t nbw2 = nbw1*ne11; + const size_t nbw3 = nbw2*ne12; + + assert(params->wsize >= ne13*nbw3); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + for (int64_t i13 = 0; i13 < ne13; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = ith; i11 < ne11; i11 += nth) { + from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), + (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), + ne10); + } + } + } + + ggml_barrier(params->shared); + + const size_t row_size = ggml_row_size(vec_dot_type, ne10); + + if (!iqk_moe_fused_up_gate(ne01, ne11, ne00, ne11, dst->op_params[0], + type, src0_1->data, src0_2->data, nb01, + vec_dot_type, (const char *)wdata, row_size, + NULL, NULL, + (float *)dst->data, nb1, nb2, + NULL, ith, nth)) GGML_ABORT("fatal error"); + +} #endif // ggml_compute_forward_out_prod @@ -20403,6 +20512,10 @@ static bool ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_mul_mat_id_up_gate(params, tensor); } break; + case GGML_OP_FUSED_UP_GATE: + { + ggml_compute_forward_mul_mat_up_gate(params, tensor); + } break; case GGML_OP_OUT_PROD: { ggml_compute_forward_out_prod(params, tensor); @@ -21172,6 +21285,10 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor { GGML_ABORT("fatal error"); // TODO: not implemented } + case GGML_OP_FUSED_UP_GATE: + { + GGML_ABORT("fatal error"); // TODO: not implemented + } case GGML_OP_OUT_PROD: { GGML_ABORT("fatal error"); // TODO: not implemented @@ -22189,6 +22306,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: case GGML_OP_MOE_FUSED_UP_GATE: + case GGML_OP_FUSED_UP_GATE: case GGML_OP_OUT_PROD: { n_tasks = n_threads; @@ -22411,6 +22529,16 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa cur += n_as * sizeof(int64_t); // matrix_row_counts cur += n_as * src2->ne[2] * sizeof(int64_t); // matrix_rows } break; + case GGML_OP_FUSED_UP_GATE: + { + cur = 0; + const struct ggml_tensor * src0 = node->src[0]; + const struct ggml_tensor * src2 = node->src[2]; + const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type; + if (src2->type != vec_dot_type) { + cur += ggml_row_size(vec_dot_type, node->src[1]->ne[0]) * ggml_nrows(node->src[1]); + } + } break; case GGML_OP_OUT_PROD: { if (ggml_is_quantized(node->src[0]->type)) { diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp index eb50cc3e..7d7126f5 100644 --- a/ggml/src/iqk/iqk_mul_mat.cpp +++ b/ggml/src/iqk/iqk_mul_mat.cpp @@ -739,7 +739,7 @@ extern "C" IQK_API bool iqk_moe_fused_up_gate(long Nx, long Ny, long ne00, int n float * C, long nb1, long nb2, const void * vrow_mapping, int ith, int nth) { const mmid_row_mapping * row_mapping = (const mmid_row_mapping *)vrow_mapping; - assert(row_mapping != nullptr); + //assert(row_mapping != nullptr); MulMat mm; diff --git a/include/llama.h b/include/llama.h index a5939769..6095d404 100644 --- a/include/llama.h +++ b/include/llama.h @@ -419,7 +419,8 @@ extern "C" { bool flash_attn; // whether to use flash attention [EXPERIMENTAL] int mla_attn; // whether to use MLA attention [EXPERIMENTAL] int attn_max_batch; // maximum batch size for attention computations [EXPERIMENTAL] - bool fused_moe_up_gate; // whether to use fused MoE up/down op [EXPERIMENTAL] + bool fused_moe_up_gate; // whether to use fused MoE up/gate op + bool fused_up_gate; // whether to use fused up/gate op [EXPERIMENTAL] int min_experts; float thresh_experts; diff --git a/src/llama.cpp b/src/llama.cpp index 3aabb1b0..20d2ec4b 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2072,6 +2072,7 @@ struct llama_cparams { int mla_attn; int attn_max_batch; bool fused_moe_up_gate; + bool fused_up_gate; int min_experts; float thresh_experts; @@ -7612,6 +7613,34 @@ static struct ggml_tensor * llm_build_ffn( llm_ffn_gate_type type_gate, const llm_build_cb & cb, int il) { + + if (lctx.cparams.fused_up_gate && + up && gate && !up_b && !up_s && !gate_b && !gate_s && type_gate == LLM_FFN_PAR && + (type_op == LLM_FFN_SILU || type_op == LLM_FFN_RELU || (type_op == LLM_FFN_GELU && !act_scales))) { + auto unary_op = type_op == LLM_FFN_SILU ? GGML_UNARY_OP_SILU : + type_op == LLM_FFN_RELU ? GGML_UNARY_OP_RELU : GGML_UNARY_OP_GELU; + cur = ggml_fused_up_gate(ctx, up, gate, cur, unary_op); + cb(cur, "ffn_up_gate", il); + if (down) { + cur = llm_build_lora_mm(lctx, ctx, down, cur); + if (lctx.model.arch == LLM_ARCH_GLM4 || lctx.model.arch == LLM_ARCH_GLM4_MOE) { + // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators + ggml_mul_mat_set_prec(cur, GGML_PREC_F32); + } + } + if (down_b) { + cb(cur, "ffn_down", il); + } + if (down_b) { + cur = ggml_add(ctx, cur, down_b); + } + if (down_s) { + cur = ggml_mul(ctx, cur, down_s); + cb(cur, "ffn_down_s", il); + } + return cur; + } + struct ggml_tensor * tmp = up ? llm_build_lora_mm(lctx, ctx, up, cur) : cur; cb(tmp, "ffn_up", il); @@ -8223,6 +8252,7 @@ struct llm_build_context { const int mla_attn; const int attn_max_batch; const bool fused_moe_up_gate; + const bool fused_up_gate; const int min_experts; const float thresh_experts; @@ -8278,6 +8308,7 @@ struct llm_build_context { mla_attn (cparams.mla_attn), attn_max_batch (cparams.attn_max_batch), fused_moe_up_gate(cparams.fused_moe_up_gate), + fused_up_gate (cparams.fused_up_gate), min_experts (cparams.min_experts), thresh_experts (cparams.thresh_experts), pooling_type (cparams.pooling_type), @@ -18923,6 +18954,7 @@ struct llama_context_params llama_context_default_params() { /*.mla_attn =*/ 0, /*.attn_max_batch =*/ 0, /*.fused_moe_up_gate =*/ false, + /*.fused_up_gate =*/ true, /*.min_experts =*/ -1, /*.thtesh_experts =*/ 0.0f, /*.abort_callback =*/ nullptr, @@ -19130,6 +19162,7 @@ struct llama_context * llama_new_context_with_model( cparams.mla_attn = params.mla_attn; cparams.attn_max_batch = params.attn_max_batch; cparams.fused_moe_up_gate= params.fused_moe_up_gate; + cparams.fused_up_gate = params.fused_up_gate; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; @@ -19209,6 +19242,7 @@ struct llama_context * llama_new_context_with_model( LLAMA_LOG_INFO("%s: mla_attn = %d\n", __func__, cparams.mla_attn); LLAMA_LOG_INFO("%s: attn_max_b = %d\n", __func__, cparams.attn_max_batch); LLAMA_LOG_INFO("%s: fused_moe = %d\n", __func__, cparams.fused_moe_up_gate); + LLAMA_LOG_INFO("%s: fused_up_gate = %d\n", __func__, cparams.fused_up_gate); LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);