Be able to set reduce op data type for split mode "graph"

This commit is contained in:
Iwan Kawrakow
2025-12-24 10:57:41 +00:00
parent 1d7d0225a0
commit c6a3903571
7 changed files with 23 additions and 6 deletions

View File

@@ -1436,6 +1436,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.split_mode_graph_scheduling = true;
return true;
}
if (arg == "-smf16" || arg == "--split-mode-f16") {
params.split_mode_f16 = true;
return true;
}
if (arg == "-smf32" || arg == "--split-mode-f32") {
params.split_mode_f16 = false;
return true;
}
if (arg == "--numa") {
CHECK_ARG
std::string value(argv[i]);
@@ -2122,6 +2130,8 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", "-ser, --smart-expert-reduction", "experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
options.push_back({ "*", "-khad, --k-cache-hadamard,", "Use Hadamard transform for K-cache (default: %d)", params.k_cache_hadamard});
options.push_back({ "*", "-smf16, --split-mode-f16,", "Use f16 for data exchange between GPUs (default: %d)", params.split_mode_f16});
options.push_back({ "*", "-smf32, --split-mode-f32,", "Use f32 for data exchange between GPUs (default: %d)", !params.split_mode_f16});
options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling});
options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants});
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
@@ -3156,6 +3166,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
cparams.graph_reuse = params.graph_reuse;
cparams.k_cache_hadamard = params.k_cache_hadamard;
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
cparams.split_mode_f16 = params.split_mode_f16;
cparams.min_experts = params.min_experts;
cparams.thresh_experts = params.thresh_experts;
cparams.only_active_experts = params.only_active_exps;
@@ -4138,6 +4149,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
fprintf(stream, "graph_reuse: %s # default: false\n", params.graph_reuse ? "true" : "false");
fprintf(stream, "k_cache_hadamard: %s # default: false\n", params.k_cache_hadamard ? "true" : "false");
fprintf(stream, "split_mode_graph_scheduling: %s # default: false\n", params.split_mode_graph_scheduling ? "true" : "false");
fprintf(stream, "split_mode_f16: %s # default: true\n", params.split_mode_f16 ? "true" : "false");
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);

View File

@@ -289,6 +289,7 @@ struct gpt_params {
bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor
bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache)
bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling
bool split_mode_f16 = true; // if true, intermediate results will be cast to f16 before copying to other GPUs to perform reduce ops
std::string cache_type_k = "f16"; // KV cache data type for the K
std::string cache_type_v = "f16"; // KV cache data type for the V

View File

@@ -444,6 +444,7 @@ extern "C" {
bool only_active_experts;
bool k_cache_hadamard; // if true, apply Hadamard transfrom to K-cache
bool split_mode_graph_scheduling; // if true, force split mode graph scheduling
bool split_mode_f16; // if true, cast intermediate results to f16 before copying to other GPUs
// Abort callback
// if it returns true, execution of llama_decode() will be aborted

View File

@@ -697,7 +697,7 @@ ggml_tensor * llm_build_context::llm_build_ffn(
// GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
}
if (cur->ne[1] >= 32) {
if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) {
cur = ggml_cast(ctx, cur, GGML_TYPE_F16);
}
if (graph) {
@@ -1185,7 +1185,7 @@ llm_expert_gating_func_type gating_op,
split_down_shexp->splits[id], split_down_b_shexp ? split_down_b_shexp->splits[id] : nullptr, nullptr,
nullptr, type_op_shexp, LLM_FFN_PAR, cb, il);
cb(shared_out, "ffn_shexp_out", il_cb);
if (shared_out->ne[1] > 32) {
if (shared_out->ne[1] > 32 && lctx.cparams.split_mode_f16) {
shared_out = ggml_cast(ctx, shared_out, GGML_TYPE_F16);
}
results.push_back(shared_out);
@@ -1202,7 +1202,7 @@ llm_expert_gating_func_type gating_op,
cb(cur, "ffn_shared_combined", il);
}
}
if (routed_out->ne[1] > 32) {
if (routed_out->ne[1] > 32 && lctx.cparams.split_mode_f16) {
auto routed_out_f16 = ggml_cast(ctx, routed_out, GGML_TYPE_F16);
cur = ggml_add(ctx, routed_out_f16, cur);
} else {
@@ -1279,7 +1279,7 @@ llm_expert_gating_func_type gating_op,
} else {
cur = routed_out;
}
if (cur->ne[1] >= 32) {
if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) {
cur = ggml_cast(ctx, cur, GGML_TYPE_F16);
cb(cur, "ffn_out_f16", il_cb);
}
@@ -9513,7 +9513,7 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
cur = ggml_add(ctx0, cur, bo->splits[id]);
cb(cur, "kqv_wo_biased", il_cb);
}
if (cur->ne[1] >= 32) {
if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) {
cur = ggml_cast(ctx0, cur, GGML_TYPE_F16);
}
ggml_build_forward_expand(gf, cur);

View File

@@ -41,6 +41,7 @@ struct llama_cparams {
bool graph_reuse;
bool k_cache_hadamard;
bool split_mode_graph_scheduling;
bool split_mode_f16;
int min_experts;
float thresh_experts;

View File

@@ -2265,7 +2265,6 @@ bool create_tensors_helper::create_cohere2_tensors(const LLM_TN & tn) {
for (int i = 0; i < n_layer; ++i) {
auto & layer = model.layers[i];
ggml_context * ctx_split = ctx_for_layer_split(i);
ggml_context * ctx_layer = ctx_for_layer(i);
layer.attn_norm = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);

View File

@@ -4055,6 +4055,7 @@ struct llama_context_params llama_context_default_params() {
/*.only_active_experts =*/ false,
/*.k_cache_hadamard =*/ false,
/*.split_mode_graph_scheduling =*/ false,
/*.split_mode_f16 =*/ true,
/*.abort_callback =*/ nullptr,
/*.abort_callback_data =*/ nullptr,
/*.offload_policy =*/ nullptr,
@@ -4344,6 +4345,7 @@ struct llama_context * llama_new_context_with_model(
cparams.graph_reuse = params.graph_reuse;
cparams.k_cache_hadamard = params.k_cache_hadamard;
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
cparams.split_mode_f16 = params.split_mode_f16;
cparams.min_experts = params.min_experts;
cparams.thresh_experts = params.thresh_experts;
cparams.cuda_params = params.cuda_params;
@@ -4433,6 +4435,7 @@ struct llama_context * llama_new_context_with_model(
LLAMA_LOG_INFO("%s: graph_reuse = %d\n", __func__, cparams.graph_reuse);
LLAMA_LOG_INFO("%s: k_cache_hadam = %d\n", __func__, cparams.k_cache_hadamard);
LLAMA_LOG_INFO("%s: split_mode_graph_scheduling = %d\n", __func__, cparams.split_mode_graph_scheduling);
LLAMA_LOG_INFO("%s: split_mode_f16= %d\n", __func__, cparams.split_mode_f16);
LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts);
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);