mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Command line option to turn on async. Set to false by defualt for now
This commit is contained in:
@@ -1436,6 +1436,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
params.split_mode_graph_scheduling = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "-sas" || arg == "--scheduler-async") {
|
||||
params.scheduler_async = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "-smf16" || arg == "--split-mode-f16") {
|
||||
params.split_mode_f16 = true;
|
||||
return true;
|
||||
@@ -2133,6 +2137,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
||||
options.push_back({ "*", "-smf16, --split-mode-f16,", "Use f16 for data exchange between GPUs (default: %d)", params.split_mode_f16});
|
||||
options.push_back({ "*", "-smf32, --split-mode-f32,", "Use f32 for data exchange between GPUs (default: %d)", !params.split_mode_f16});
|
||||
options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling});
|
||||
options.push_back({ "*", "-sas, ==scheduler_async,", "Async evaluation of compute graphs: %d)", params.scheduler_async});
|
||||
options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants});
|
||||
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
|
||||
"in conversation mode, this will be used as system prompt\n"
|
||||
@@ -3167,6 +3172,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
||||
cparams.k_cache_hadamard = params.k_cache_hadamard;
|
||||
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
|
||||
cparams.split_mode_f16 = params.split_mode_f16;
|
||||
cparams.scheduler_async = params.scheduler_async;
|
||||
cparams.min_experts = params.min_experts;
|
||||
cparams.thresh_experts = params.thresh_experts;
|
||||
cparams.only_active_experts = params.only_active_exps;
|
||||
@@ -4150,6 +4156,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
||||
fprintf(stream, "k_cache_hadamard: %s # default: false\n", params.k_cache_hadamard ? "true" : "false");
|
||||
fprintf(stream, "split_mode_graph_scheduling: %s # default: false\n", params.split_mode_graph_scheduling ? "true" : "false");
|
||||
fprintf(stream, "split_mode_f16: %s # default: true\n", params.split_mode_f16 ? "true" : "false");
|
||||
fprintf(stream, "scheduler_async: %s # default: false\n", params.scheduler_async ? "true" : "false");
|
||||
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
|
||||
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
|
||||
|
||||
|
||||
@@ -290,6 +290,7 @@ struct gpt_params {
|
||||
bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache)
|
||||
bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling
|
||||
bool split_mode_f16 = true; // if true, intermediate results will be cast to f16 before copying to other GPUs to perform reduce ops
|
||||
bool scheduler_async = false; // if true, in split mode graph the scheduler will use multiple threads to evaluate the graph
|
||||
|
||||
std::string cache_type_k = "f16"; // KV cache data type for the K
|
||||
std::string cache_type_v = "f16"; // KV cache data type for the V
|
||||
|
||||
@@ -211,7 +211,7 @@ extern "C" {
|
||||
// enable or disable op offload for a given op
|
||||
GGML_API void ggml_backend_sched_set_op_offload(ggml_backend_sched_t sched, enum ggml_op op, bool on_or_off);
|
||||
GGML_API void ggml_backend_sched_set_only_active_experts(ggml_backend_sched_t sched, bool on_or_off);
|
||||
GGML_API void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off);
|
||||
GGML_API void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off, bool async);
|
||||
GGML_API void ggml_backend_sched_set_max_extra_alloc(ggml_backend_sched_t sched, int extra_alloc_MiB);
|
||||
|
||||
//
|
||||
|
||||
@@ -1182,6 +1182,7 @@ struct ggml_backend_sched {
|
||||
|
||||
bool only_active_experts;
|
||||
bool split_mode_graph;
|
||||
bool is_async = false;
|
||||
bool debug;
|
||||
bool has_reduce = false;
|
||||
};
|
||||
@@ -1208,9 +1209,10 @@ void ggml_backend_sched_set_only_active_experts(ggml_backend_sched_t sched, bool
|
||||
sched->only_active_experts = on_or_off;
|
||||
}
|
||||
|
||||
void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off) {
|
||||
void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off, bool async) {
|
||||
if (!sched) return;
|
||||
sched->split_mode_graph = on_or_off;
|
||||
sched->is_async = async;
|
||||
}
|
||||
|
||||
void ggml_backend_sched_set_max_extra_alloc(ggml_backend_sched_t sched, int extra_alloc_MiB) {
|
||||
@@ -2152,13 +2154,9 @@ static ggml_status ggml_backend_sched_eval(ggml_backend_sched_t sched, ggml_back
|
||||
|
||||
static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) {
|
||||
|
||||
//if (!sched->split_mode_graph) {
|
||||
// for (auto & item : sched->own_cpy ) item = false;
|
||||
// for (auto & item : sched->needs_sync) item = true;
|
||||
//}
|
||||
for (auto & item : sched->needs_sync) item = true;
|
||||
|
||||
if (sched->n_backends > 2 && sched->split_mode_graph && sched->has_reduce) {
|
||||
if (sched->is_async && sched->n_backends > 2 && sched->split_mode_graph && sched->has_reduce) {
|
||||
|
||||
for (auto & s : sched->statuses) s = GGML_STATUS_SUCCESS;
|
||||
|
||||
|
||||
@@ -445,6 +445,7 @@ extern "C" {
|
||||
bool k_cache_hadamard; // if true, apply Hadamard transfrom to K-cache
|
||||
bool split_mode_graph_scheduling; // if true, force split mode graph scheduling
|
||||
bool split_mode_f16; // if true, cast intermediate results to f16 before copying to other GPUs
|
||||
bool scheduler_async; // if true, with split mode "graph" graph evaluation will be done using multiple threads
|
||||
|
||||
// Abort callback
|
||||
// if it returns true, execution of llama_decode() will be aborted
|
||||
|
||||
@@ -42,6 +42,7 @@ struct llama_cparams {
|
||||
bool k_cache_hadamard;
|
||||
bool split_mode_graph_scheduling;
|
||||
bool split_mode_f16;
|
||||
bool scheduler_async;
|
||||
int min_experts;
|
||||
float thresh_experts;
|
||||
|
||||
|
||||
@@ -4056,6 +4056,7 @@ struct llama_context_params llama_context_default_params() {
|
||||
/*.k_cache_hadamard =*/ false,
|
||||
/*.split_mode_graph_scheduling =*/ false,
|
||||
/*.split_mode_f16 =*/ true,
|
||||
/*.scheduler_async =*/ false,
|
||||
/*.abort_callback =*/ nullptr,
|
||||
/*.abort_callback_data =*/ nullptr,
|
||||
/*.offload_policy =*/ nullptr,
|
||||
@@ -4346,6 +4347,7 @@ struct llama_context * llama_new_context_with_model(
|
||||
cparams.k_cache_hadamard = params.k_cache_hadamard;
|
||||
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
|
||||
cparams.split_mode_f16 = params.split_mode_f16;
|
||||
cparams.scheduler_async = params.scheduler_async;
|
||||
cparams.min_experts = params.min_experts;
|
||||
cparams.thresh_experts = params.thresh_experts;
|
||||
cparams.cuda_params = params.cuda_params;
|
||||
@@ -4436,6 +4438,7 @@ struct llama_context * llama_new_context_with_model(
|
||||
LLAMA_LOG_INFO("%s: k_cache_hadam = %d\n", __func__, cparams.k_cache_hadamard);
|
||||
LLAMA_LOG_INFO("%s: split_mode_graph_scheduling = %d\n", __func__, cparams.split_mode_graph_scheduling);
|
||||
LLAMA_LOG_INFO("%s: split_mode_f16= %d\n", __func__, cparams.split_mode_f16);
|
||||
LLAMA_LOG_INFO("%s: sched_async = %d\n", __func__, cparams.scheduler_async);
|
||||
LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts);
|
||||
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
|
||||
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
|
||||
@@ -4780,7 +4783,7 @@ struct llama_context * llama_new_context_with_model(
|
||||
ggml_backend_sched_set_only_active_experts(ctx->sched, true);
|
||||
}
|
||||
if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && (!model->has_tensor_overrides() || cparams.split_mode_graph_scheduling)) {
|
||||
ggml_backend_sched_set_split_mode_graph(ctx->sched, true);
|
||||
ggml_backend_sched_set_split_mode_graph(ctx->sched, true, cparams.scheduler_async);
|
||||
ggml_backend_sched_set_max_extra_alloc(ctx->sched, params.max_extra_alloc);
|
||||
if (model->has_tensor_overrides() && cparams.split_mode_graph_scheduling) {
|
||||
LLAMA_LOG_INFO("XXXXXXXX Split Mode Graph Scheduling is FORCED despite tensor overrides due to user choice.\n");
|
||||
|
||||
Reference in New Issue
Block a user