From 5e64235d4c4f54072191e38ddfd5c1d89feb948c Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Wed, 24 Dec 2025 14:01:29 +0100 Subject: [PATCH] Be able to set reduce op data type for split mode "graph" (#1087) Co-authored-by: Iwan Kawrakow --- common/common.cpp | 12 ++++++++++++ common/common.h | 1 + include/llama.h | 1 + src/llama-build-context.cpp | 10 +++++----- src/llama-cparams.h | 1 + src/llama-load-tensors.cpp | 1 - src/llama.cpp | 3 +++ 7 files changed, 23 insertions(+), 6 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 17b9f72d..626da8d6 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1436,6 +1436,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.split_mode_graph_scheduling = true; return true; } + if (arg == "-smf16" || arg == "--split-mode-f16") { + params.split_mode_f16 = true; + return true; + } + if (arg == "-smf32" || arg == "--split-mode-f32") { + params.split_mode_f16 = false; + return true; + } if (arg == "--numa") { CHECK_ARG std::string value(argv[i]); @@ -2122,6 +2130,8 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-ser, --smart-expert-reduction", "experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts}); options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv}); options.push_back({ "*", "-khad, --k-cache-hadamard,", "Use Hadamard transform for K-cache (default: %d)", params.k_cache_hadamard}); + options.push_back({ "*", "-smf16, --split-mode-f16,", "Use f16 for data exchange between GPUs (default: %d)", params.split_mode_f16}); + options.push_back({ "*", "-smf32, --split-mode-f32,", "Use f32 for data exchange between GPUs (default: %d)", !params.split_mode_f16}); options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling}); options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants}); options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n" @@ -3156,6 +3166,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param cparams.graph_reuse = params.graph_reuse; cparams.k_cache_hadamard = params.k_cache_hadamard; cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling; + cparams.split_mode_f16 = params.split_mode_f16; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; cparams.only_active_experts = params.only_active_exps; @@ -4138,6 +4149,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l fprintf(stream, "graph_reuse: %s # default: false\n", params.graph_reuse ? "true" : "false"); fprintf(stream, "k_cache_hadamard: %s # default: false\n", params.k_cache_hadamard ? "true" : "false"); fprintf(stream, "split_mode_graph_scheduling: %s # default: false\n", params.split_mode_graph_scheduling ? "true" : "false"); + fprintf(stream, "split_mode_f16: %s # default: true\n", params.split_mode_f16 ? "true" : "false"); fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts); fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp); diff --git a/common/common.h b/common/common.h index f3a70beb..8fe2287b 100644 --- a/common/common.h +++ b/common/common.h @@ -289,6 +289,7 @@ struct gpt_params { bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache) bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling + bool split_mode_f16 = true; // if true, intermediate results will be cast to f16 before copying to other GPUs to perform reduce ops std::string cache_type_k = "f16"; // KV cache data type for the K std::string cache_type_v = "f16"; // KV cache data type for the V diff --git a/include/llama.h b/include/llama.h index 2f56a01c..a33ce538 100644 --- a/include/llama.h +++ b/include/llama.h @@ -444,6 +444,7 @@ extern "C" { bool only_active_experts; bool k_cache_hadamard; // if true, apply Hadamard transfrom to K-cache bool split_mode_graph_scheduling; // if true, force split mode graph scheduling + bool split_mode_f16; // if true, cast intermediate results to f16 before copying to other GPUs // Abort callback // if it returns true, execution of llama_decode() will be aborted diff --git a/src/llama-build-context.cpp b/src/llama-build-context.cpp index f3fc0caf..83e65bf7 100644 --- a/src/llama-build-context.cpp +++ b/src/llama-build-context.cpp @@ -697,7 +697,7 @@ ggml_tensor * llm_build_context::llm_build_ffn( // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators ggml_mul_mat_set_prec(cur, GGML_PREC_F32); } - if (cur->ne[1] >= 32) { + if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) { cur = ggml_cast(ctx, cur, GGML_TYPE_F16); } if (graph) { @@ -1185,7 +1185,7 @@ llm_expert_gating_func_type gating_op, split_down_shexp->splits[id], split_down_b_shexp ? split_down_b_shexp->splits[id] : nullptr, nullptr, nullptr, type_op_shexp, LLM_FFN_PAR, cb, il); cb(shared_out, "ffn_shexp_out", il_cb); - if (shared_out->ne[1] > 32) { + if (shared_out->ne[1] > 32 && lctx.cparams.split_mode_f16) { shared_out = ggml_cast(ctx, shared_out, GGML_TYPE_F16); } results.push_back(shared_out); @@ -1202,7 +1202,7 @@ llm_expert_gating_func_type gating_op, cb(cur, "ffn_shared_combined", il); } } - if (routed_out->ne[1] > 32) { + if (routed_out->ne[1] > 32 && lctx.cparams.split_mode_f16) { auto routed_out_f16 = ggml_cast(ctx, routed_out, GGML_TYPE_F16); cur = ggml_add(ctx, routed_out_f16, cur); } else { @@ -1279,7 +1279,7 @@ llm_expert_gating_func_type gating_op, } else { cur = routed_out; } - if (cur->ne[1] >= 32) { + if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) { cur = ggml_cast(ctx, cur, GGML_TYPE_F16); cb(cur, "ffn_out_f16", il_cb); } @@ -9513,7 +9513,7 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens cur = ggml_add(ctx0, cur, bo->splits[id]); cb(cur, "kqv_wo_biased", il_cb); } - if (cur->ne[1] >= 32) { + if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) { cur = ggml_cast(ctx0, cur, GGML_TYPE_F16); } ggml_build_forward_expand(gf, cur); diff --git a/src/llama-cparams.h b/src/llama-cparams.h index c45d9975..b639e818 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -41,6 +41,7 @@ struct llama_cparams { bool graph_reuse; bool k_cache_hadamard; bool split_mode_graph_scheduling; + bool split_mode_f16; int min_experts; float thresh_experts; diff --git a/src/llama-load-tensors.cpp b/src/llama-load-tensors.cpp index a0f5261d..cc50a647 100644 --- a/src/llama-load-tensors.cpp +++ b/src/llama-load-tensors.cpp @@ -2265,7 +2265,6 @@ bool create_tensors_helper::create_cohere2_tensors(const LLM_TN & tn) { for (int i = 0; i < n_layer; ++i) { auto & layer = model.layers[i]; ggml_context * ctx_split = ctx_for_layer_split(i); - ggml_context * ctx_layer = ctx_for_layer(i); layer.attn_norm = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0); diff --git a/src/llama.cpp b/src/llama.cpp index 326bf0e7..94242f0c 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -4055,6 +4055,7 @@ struct llama_context_params llama_context_default_params() { /*.only_active_experts =*/ false, /*.k_cache_hadamard =*/ false, /*.split_mode_graph_scheduling =*/ false, + /*.split_mode_f16 =*/ true, /*.abort_callback =*/ nullptr, /*.abort_callback_data =*/ nullptr, /*.offload_policy =*/ nullptr, @@ -4344,6 +4345,7 @@ struct llama_context * llama_new_context_with_model( cparams.graph_reuse = params.graph_reuse; cparams.k_cache_hadamard = params.k_cache_hadamard; cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling; + cparams.split_mode_f16 = params.split_mode_f16; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; cparams.cuda_params = params.cuda_params; @@ -4433,6 +4435,7 @@ struct llama_context * llama_new_context_with_model( LLAMA_LOG_INFO("%s: graph_reuse = %d\n", __func__, cparams.graph_reuse); LLAMA_LOG_INFO("%s: k_cache_hadam = %d\n", __func__, cparams.k_cache_hadamard); LLAMA_LOG_INFO("%s: split_mode_graph_scheduling = %d\n", __func__, cparams.split_mode_graph_scheduling); + LLAMA_LOG_INFO("%s: split_mode_f16= %d\n", __func__, cparams.split_mode_f16); LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);