diff --git a/common/common.cpp b/common/common.cpp index be3606c1..664a6f4c 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1408,6 +1408,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.k_cache_hadamard = true; return true; } + if (arg == "-smgs" || arg == "--split-mode-graph-scheduling") { + params.split_mode_graph_scheduling = true; + return true; + } if (arg == "--numa") { CHECK_ARG std::string value(argv[i]); @@ -2089,6 +2093,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-ser, --smart-expert-reduction", "experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts}); options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv}); options.push_back({ "*", "-khad, --k-cache-hadamard,", "Use Hadamard transform for K-cache (default: %d)", params.k_cache_hadamard}); + options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling}); options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants}); options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n" "in conversation mode, this will be used as system prompt\n" @@ -3112,6 +3117,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param cparams.rope_cache = params.rope_cache; cparams.graph_reuse = params.graph_reuse; cparams.k_cache_hadamard = params.k_cache_hadamard; + cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; cparams.only_active_experts = params.only_active_exps; @@ -4093,6 +4099,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l fprintf(stream, "rope_cache: %s # default: false\n", params.rope_cache ? "true" : "false"); fprintf(stream, "graph_reuse: %s # default: false\n", params.graph_reuse ? "true" : "false"); fprintf(stream, "k_cache_hadamard: %s # default: false\n", params.k_cache_hadamard ? "true" : "false"); + fprintf(stream, "split_mode_graph_scheduling: %s # default: false\n", params.split_mode_graph_scheduling ? "true" : "false"); fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts); fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp); diff --git a/common/common.h b/common/common.h index 9b6fc25a..e0b7f53b 100644 --- a/common/common.h +++ b/common/common.h @@ -279,6 +279,7 @@ struct gpt_params { bool only_active_exps = true; // if true, offload only active experts (relevant only for hybrid CPU/GPU) bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache) + bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling std::string cache_type_k = "f16"; // KV cache data type for the K std::string cache_type_v = "f16"; // KV cache data type for the V diff --git a/include/llama.h b/include/llama.h index a4637407..2f56a01c 100644 --- a/include/llama.h +++ b/include/llama.h @@ -443,6 +443,7 @@ extern "C" { float thresh_experts; bool only_active_experts; bool k_cache_hadamard; // if true, apply Hadamard transfrom to K-cache + bool split_mode_graph_scheduling; // if true, force split mode graph scheduling // Abort callback // if it returns true, execution of llama_decode() will be aborted diff --git a/src/llama-build-context.cpp b/src/llama-build-context.cpp index ba2ffc32..0c0ef048 100644 --- a/src/llama-build-context.cpp +++ b/src/llama-build-context.cpp @@ -53,6 +53,7 @@ llm_build_context::llm_build_context( fused_mmad (cparams.fused_mmad), rope_cache (cparams.rope_cache), k_cache_hadamard (cparams.k_cache_hadamard), + split_mode_graph_scheduling (cparams.split_mode_graph_scheduling), min_experts (cparams.min_experts), thresh_experts (cparams.thresh_experts), pooling_type (cparams.pooling_type), diff --git a/src/llama-build-context.h b/src/llama-build-context.h index 7d8a0931..347b177a 100644 --- a/src/llama-build-context.h +++ b/src/llama-build-context.h @@ -83,6 +83,7 @@ struct llm_build_context { const bool fused_mmad; const bool rope_cache; const bool k_cache_hadamard; + const bool split_mode_graph_scheduling; const int min_experts; const float thresh_experts; diff --git a/src/llama-cparams.h b/src/llama-cparams.h index d911378d..c45d9975 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -40,6 +40,7 @@ struct llama_cparams { bool rope_cache; bool graph_reuse; bool k_cache_hadamard; + bool split_mode_graph_scheduling; int min_experts; float thresh_experts; diff --git a/src/llama.cpp b/src/llama.cpp index 178b8f77..326bf0e7 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -4054,6 +4054,7 @@ struct llama_context_params llama_context_default_params() { /*.thtesh_experts =*/ 0.0f, /*.only_active_experts =*/ false, /*.k_cache_hadamard =*/ false, + /*.split_mode_graph_scheduling =*/ false, /*.abort_callback =*/ nullptr, /*.abort_callback_data =*/ nullptr, /*.offload_policy =*/ nullptr, @@ -4342,6 +4343,7 @@ struct llama_context * llama_new_context_with_model( cparams.rope_cache = params.rope_cache; cparams.graph_reuse = params.graph_reuse; cparams.k_cache_hadamard = params.k_cache_hadamard; + cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; cparams.cuda_params = params.cuda_params; @@ -4430,6 +4432,7 @@ struct llama_context * llama_new_context_with_model( LLAMA_LOG_INFO("%s: rope_cache = %d\n", __func__, cparams.rope_cache); LLAMA_LOG_INFO("%s: graph_reuse = %d\n", __func__, cparams.graph_reuse); LLAMA_LOG_INFO("%s: k_cache_hadam = %d\n", __func__, cparams.k_cache_hadamard); + LLAMA_LOG_INFO("%s: split_mode_graph_scheduling = %d\n", __func__, cparams.split_mode_graph_scheduling); LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale); @@ -4773,10 +4776,14 @@ struct llama_context * llama_new_context_with_model( LLAMA_LOG_INFO("XXXXXXXXXXXXXXXXXXXXX Setting only active experts offload\n"); ggml_backend_sched_set_only_active_experts(ctx->sched, true); } - if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && !model->has_tensor_overrides()) { + if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && (!model->has_tensor_overrides() || cparams.split_mode_graph_scheduling)) { ggml_backend_sched_set_split_mode_graph(ctx->sched, true); ggml_backend_sched_set_max_extra_alloc(ctx->sched, params.max_extra_alloc); - } + if (model->has_tensor_overrides() && cparams.split_mode_graph_scheduling) { + LLAMA_LOG_INFO("XXXXXXXX Split Mode Graph Scheduling is FORCED despite tensor overrides due to user choice.\n"); + LLAMA_LOG_INFO("XXXXXXXX It may or might NOT infer properly due to unsupported combinations between SMGS and every possible tensor overrides.\n"); + } + } return ctx; }