From 29d323117c76d6b5da9d8eb6a386a57286e0b56b Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Sat, 27 Dec 2025 06:24:01 +0000 Subject: [PATCH] Command line option to turn on async. Set to false by defualt for now --- common/common.cpp | 7 +++++++ common/common.h | 1 + ggml/include/ggml-backend.h | 2 +- ggml/src/ggml-backend.cpp | 10 ++++------ include/llama.h | 1 + src/llama-cparams.h | 1 + src/llama.cpp | 7 +++++-- 7 files changed, 20 insertions(+), 9 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 626da8d6..0d9d12c8 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1436,6 +1436,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.split_mode_graph_scheduling = true; return true; } + if (arg == "-sas" || arg == "--scheduler-async") { + params.scheduler_async = true; + return true; + } if (arg == "-smf16" || arg == "--split-mode-f16") { params.split_mode_f16 = true; return true; @@ -2133,6 +2137,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-smf16, --split-mode-f16,", "Use f16 for data exchange between GPUs (default: %d)", params.split_mode_f16}); options.push_back({ "*", "-smf32, --split-mode-f32,", "Use f32 for data exchange between GPUs (default: %d)", !params.split_mode_f16}); options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling}); + options.push_back({ "*", "-sas, ==scheduler_async,", "Async evaluation of compute graphs: %d)", params.scheduler_async}); options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants}); options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n" "in conversation mode, this will be used as system prompt\n" @@ -3167,6 +3172,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param cparams.k_cache_hadamard = params.k_cache_hadamard; cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling; cparams.split_mode_f16 = params.split_mode_f16; + cparams.scheduler_async = params.scheduler_async; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; cparams.only_active_experts = params.only_active_exps; @@ -4150,6 +4156,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l fprintf(stream, "k_cache_hadamard: %s # default: false\n", params.k_cache_hadamard ? "true" : "false"); fprintf(stream, "split_mode_graph_scheduling: %s # default: false\n", params.split_mode_graph_scheduling ? "true" : "false"); fprintf(stream, "split_mode_f16: %s # default: true\n", params.split_mode_f16 ? "true" : "false"); + fprintf(stream, "scheduler_async: %s # default: false\n", params.scheduler_async ? "true" : "false"); fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts); fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp); diff --git a/common/common.h b/common/common.h index 8fe2287b..a5b231fe 100644 --- a/common/common.h +++ b/common/common.h @@ -290,6 +290,7 @@ struct gpt_params { bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache) bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling bool split_mode_f16 = true; // if true, intermediate results will be cast to f16 before copying to other GPUs to perform reduce ops + bool scheduler_async = false; // if true, in split mode graph the scheduler will use multiple threads to evaluate the graph std::string cache_type_k = "f16"; // KV cache data type for the K std::string cache_type_v = "f16"; // KV cache data type for the V diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h index 82f05092..e75606dc 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -211,7 +211,7 @@ extern "C" { // enable or disable op offload for a given op GGML_API void ggml_backend_sched_set_op_offload(ggml_backend_sched_t sched, enum ggml_op op, bool on_or_off); GGML_API void ggml_backend_sched_set_only_active_experts(ggml_backend_sched_t sched, bool on_or_off); - GGML_API void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off); + GGML_API void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off, bool async); GGML_API void ggml_backend_sched_set_max_extra_alloc(ggml_backend_sched_t sched, int extra_alloc_MiB); // diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index ff3adf48..345eddd2 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -1182,6 +1182,7 @@ struct ggml_backend_sched { bool only_active_experts; bool split_mode_graph; + bool is_async = false; bool debug; bool has_reduce = false; }; @@ -1208,9 +1209,10 @@ void ggml_backend_sched_set_only_active_experts(ggml_backend_sched_t sched, bool sched->only_active_experts = on_or_off; } -void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off) { +void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off, bool async) { if (!sched) return; sched->split_mode_graph = on_or_off; + sched->is_async = async; } void ggml_backend_sched_set_max_extra_alloc(ggml_backend_sched_t sched, int extra_alloc_MiB) { @@ -2152,13 +2154,9 @@ static ggml_status ggml_backend_sched_eval(ggml_backend_sched_t sched, ggml_back static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) { - //if (!sched->split_mode_graph) { - // for (auto & item : sched->own_cpy ) item = false; - // for (auto & item : sched->needs_sync) item = true; - //} for (auto & item : sched->needs_sync) item = true; - if (sched->n_backends > 2 && sched->split_mode_graph && sched->has_reduce) { + if (sched->is_async && sched->n_backends > 2 && sched->split_mode_graph && sched->has_reduce) { for (auto & s : sched->statuses) s = GGML_STATUS_SUCCESS; diff --git a/include/llama.h b/include/llama.h index a33ce538..16558c5b 100644 --- a/include/llama.h +++ b/include/llama.h @@ -445,6 +445,7 @@ extern "C" { bool k_cache_hadamard; // if true, apply Hadamard transfrom to K-cache bool split_mode_graph_scheduling; // if true, force split mode graph scheduling bool split_mode_f16; // if true, cast intermediate results to f16 before copying to other GPUs + bool scheduler_async; // if true, with split mode "graph" graph evaluation will be done using multiple threads // Abort callback // if it returns true, execution of llama_decode() will be aborted diff --git a/src/llama-cparams.h b/src/llama-cparams.h index b639e818..3735a474 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -42,6 +42,7 @@ struct llama_cparams { bool k_cache_hadamard; bool split_mode_graph_scheduling; bool split_mode_f16; + bool scheduler_async; int min_experts; float thresh_experts; diff --git a/src/llama.cpp b/src/llama.cpp index 94242f0c..468c5c40 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -4056,6 +4056,7 @@ struct llama_context_params llama_context_default_params() { /*.k_cache_hadamard =*/ false, /*.split_mode_graph_scheduling =*/ false, /*.split_mode_f16 =*/ true, + /*.scheduler_async =*/ false, /*.abort_callback =*/ nullptr, /*.abort_callback_data =*/ nullptr, /*.offload_policy =*/ nullptr, @@ -4346,6 +4347,7 @@ struct llama_context * llama_new_context_with_model( cparams.k_cache_hadamard = params.k_cache_hadamard; cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling; cparams.split_mode_f16 = params.split_mode_f16; + cparams.scheduler_async = params.scheduler_async; cparams.min_experts = params.min_experts; cparams.thresh_experts = params.thresh_experts; cparams.cuda_params = params.cuda_params; @@ -4436,6 +4438,7 @@ struct llama_context * llama_new_context_with_model( LLAMA_LOG_INFO("%s: k_cache_hadam = %d\n", __func__, cparams.k_cache_hadamard); LLAMA_LOG_INFO("%s: split_mode_graph_scheduling = %d\n", __func__, cparams.split_mode_graph_scheduling); LLAMA_LOG_INFO("%s: split_mode_f16= %d\n", __func__, cparams.split_mode_f16); + LLAMA_LOG_INFO("%s: sched_async = %d\n", __func__, cparams.scheduler_async); LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale); @@ -4780,13 +4783,13 @@ struct llama_context * llama_new_context_with_model( ggml_backend_sched_set_only_active_experts(ctx->sched, true); } if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && (!model->has_tensor_overrides() || cparams.split_mode_graph_scheduling)) { - ggml_backend_sched_set_split_mode_graph(ctx->sched, true); + ggml_backend_sched_set_split_mode_graph(ctx->sched, true, cparams.scheduler_async); ggml_backend_sched_set_max_extra_alloc(ctx->sched, params.max_extra_alloc); if (model->has_tensor_overrides() && cparams.split_mode_graph_scheduling) { LLAMA_LOG_INFO("XXXXXXXX Split Mode Graph Scheduling is FORCED despite tensor overrides due to user choice.\n"); LLAMA_LOG_INFO("XXXXXXXX It may or might NOT infer properly due to unsupported combinations between SMGS and every possible tensor overrides.\n"); } - } + } return ctx; }