mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-03-11 14:30:02 +00:00
Fused FFN_UP+FFN_GATE op (#741)
* Fused up+gate+unary for regular (not MoE) FFN - CPU * WIP CUDA * Seems to be working on CUDA For a dense model we get 2-3% speedup for PP and ~0.6% for TG. * Add command line option This time the option is ON by default, and one needs to turn it off via -no-fug or --no-fused-up-gate --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -611,6 +611,7 @@ extern "C" {
|
||||
GGML_OP_MUL_MAT,
|
||||
GGML_OP_MUL_MAT_ID,
|
||||
GGML_OP_OUT_PROD,
|
||||
GGML_OP_FUSED_UP_GATE,
|
||||
GGML_OP_MOE_FUSED_UP_GATE,
|
||||
|
||||
GGML_OP_SCALE,
|
||||
@@ -1408,6 +1409,13 @@ extern "C" {
|
||||
struct ggml_tensor * a_gate_b,
|
||||
enum ggml_unary_op op);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_fused_up_gate(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * up,
|
||||
struct ggml_tensor * gate,
|
||||
struct ggml_tensor * b,
|
||||
enum ggml_unary_op op);
|
||||
|
||||
// A: m columns, n rows,
|
||||
// B: p columns, n rows,
|
||||
// result is m columns, p rows
|
||||
|
||||
Reference in New Issue
Block a user