mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
add split-mode-graph-scheduling parameter (#1068)
Use -smgs or --split-mode-graph-scheduling in CLI to bypass the disabling of split mode graph scheduling when tensor overrides is used. Co-authored-by: Kawrakow <iwankawrakow@gmail.com>
This commit is contained in:
@@ -1408,6 +1408,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
params.k_cache_hadamard = true;
|
params.k_cache_hadamard = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "-smgs" || arg == "--split-mode-graph-scheduling") {
|
||||||
|
params.split_mode_graph_scheduling = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "--numa") {
|
if (arg == "--numa") {
|
||||||
CHECK_ARG
|
CHECK_ARG
|
||||||
std::string value(argv[i]);
|
std::string value(argv[i]);
|
||||||
@@ -2089,6 +2093,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||||||
options.push_back({ "*", "-ser, --smart-expert-reduction", "experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
|
options.push_back({ "*", "-ser, --smart-expert-reduction", "experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
|
||||||
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
|
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
|
||||||
options.push_back({ "*", "-khad, --k-cache-hadamard,", "Use Hadamard transform for K-cache (default: %d)", params.k_cache_hadamard});
|
options.push_back({ "*", "-khad, --k-cache-hadamard,", "Use Hadamard transform for K-cache (default: %d)", params.k_cache_hadamard});
|
||||||
|
options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling});
|
||||||
options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants});
|
options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants});
|
||||||
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
|
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
|
||||||
"in conversation mode, this will be used as system prompt\n"
|
"in conversation mode, this will be used as system prompt\n"
|
||||||
@@ -3112,6 +3117,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||||||
cparams.rope_cache = params.rope_cache;
|
cparams.rope_cache = params.rope_cache;
|
||||||
cparams.graph_reuse = params.graph_reuse;
|
cparams.graph_reuse = params.graph_reuse;
|
||||||
cparams.k_cache_hadamard = params.k_cache_hadamard;
|
cparams.k_cache_hadamard = params.k_cache_hadamard;
|
||||||
|
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
|
||||||
cparams.min_experts = params.min_experts;
|
cparams.min_experts = params.min_experts;
|
||||||
cparams.thresh_experts = params.thresh_experts;
|
cparams.thresh_experts = params.thresh_experts;
|
||||||
cparams.only_active_experts = params.only_active_exps;
|
cparams.only_active_experts = params.only_active_exps;
|
||||||
@@ -4093,6 +4099,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
|||||||
fprintf(stream, "rope_cache: %s # default: false\n", params.rope_cache ? "true" : "false");
|
fprintf(stream, "rope_cache: %s # default: false\n", params.rope_cache ? "true" : "false");
|
||||||
fprintf(stream, "graph_reuse: %s # default: false\n", params.graph_reuse ? "true" : "false");
|
fprintf(stream, "graph_reuse: %s # default: false\n", params.graph_reuse ? "true" : "false");
|
||||||
fprintf(stream, "k_cache_hadamard: %s # default: false\n", params.k_cache_hadamard ? "true" : "false");
|
fprintf(stream, "k_cache_hadamard: %s # default: false\n", params.k_cache_hadamard ? "true" : "false");
|
||||||
|
fprintf(stream, "split_mode_graph_scheduling: %s # default: false\n", params.split_mode_graph_scheduling ? "true" : "false");
|
||||||
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
|
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
|
||||||
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
|
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
|
||||||
|
|
||||||
|
|||||||
@@ -279,6 +279,7 @@ struct gpt_params {
|
|||||||
bool only_active_exps = true; // if true, offload only active experts (relevant only for hybrid CPU/GPU)
|
bool only_active_exps = true; // if true, offload only active experts (relevant only for hybrid CPU/GPU)
|
||||||
bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor
|
bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor
|
||||||
bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache)
|
bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache)
|
||||||
|
bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling
|
||||||
|
|
||||||
std::string cache_type_k = "f16"; // KV cache data type for the K
|
std::string cache_type_k = "f16"; // KV cache data type for the K
|
||||||
std::string cache_type_v = "f16"; // KV cache data type for the V
|
std::string cache_type_v = "f16"; // KV cache data type for the V
|
||||||
|
|||||||
@@ -443,6 +443,7 @@ extern "C" {
|
|||||||
float thresh_experts;
|
float thresh_experts;
|
||||||
bool only_active_experts;
|
bool only_active_experts;
|
||||||
bool k_cache_hadamard; // if true, apply Hadamard transfrom to K-cache
|
bool k_cache_hadamard; // if true, apply Hadamard transfrom to K-cache
|
||||||
|
bool split_mode_graph_scheduling; // if true, force split mode graph scheduling
|
||||||
|
|
||||||
// Abort callback
|
// Abort callback
|
||||||
// if it returns true, execution of llama_decode() will be aborted
|
// if it returns true, execution of llama_decode() will be aborted
|
||||||
|
|||||||
@@ -53,6 +53,7 @@ llm_build_context::llm_build_context(
|
|||||||
fused_mmad (cparams.fused_mmad),
|
fused_mmad (cparams.fused_mmad),
|
||||||
rope_cache (cparams.rope_cache),
|
rope_cache (cparams.rope_cache),
|
||||||
k_cache_hadamard (cparams.k_cache_hadamard),
|
k_cache_hadamard (cparams.k_cache_hadamard),
|
||||||
|
split_mode_graph_scheduling (cparams.split_mode_graph_scheduling),
|
||||||
min_experts (cparams.min_experts),
|
min_experts (cparams.min_experts),
|
||||||
thresh_experts (cparams.thresh_experts),
|
thresh_experts (cparams.thresh_experts),
|
||||||
pooling_type (cparams.pooling_type),
|
pooling_type (cparams.pooling_type),
|
||||||
|
|||||||
@@ -83,6 +83,7 @@ struct llm_build_context {
|
|||||||
const bool fused_mmad;
|
const bool fused_mmad;
|
||||||
const bool rope_cache;
|
const bool rope_cache;
|
||||||
const bool k_cache_hadamard;
|
const bool k_cache_hadamard;
|
||||||
|
const bool split_mode_graph_scheduling;
|
||||||
const int min_experts;
|
const int min_experts;
|
||||||
const float thresh_experts;
|
const float thresh_experts;
|
||||||
|
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ struct llama_cparams {
|
|||||||
bool rope_cache;
|
bool rope_cache;
|
||||||
bool graph_reuse;
|
bool graph_reuse;
|
||||||
bool k_cache_hadamard;
|
bool k_cache_hadamard;
|
||||||
|
bool split_mode_graph_scheduling;
|
||||||
int min_experts;
|
int min_experts;
|
||||||
float thresh_experts;
|
float thresh_experts;
|
||||||
|
|
||||||
|
|||||||
@@ -4054,6 +4054,7 @@ struct llama_context_params llama_context_default_params() {
|
|||||||
/*.thtesh_experts =*/ 0.0f,
|
/*.thtesh_experts =*/ 0.0f,
|
||||||
/*.only_active_experts =*/ false,
|
/*.only_active_experts =*/ false,
|
||||||
/*.k_cache_hadamard =*/ false,
|
/*.k_cache_hadamard =*/ false,
|
||||||
|
/*.split_mode_graph_scheduling =*/ false,
|
||||||
/*.abort_callback =*/ nullptr,
|
/*.abort_callback =*/ nullptr,
|
||||||
/*.abort_callback_data =*/ nullptr,
|
/*.abort_callback_data =*/ nullptr,
|
||||||
/*.offload_policy =*/ nullptr,
|
/*.offload_policy =*/ nullptr,
|
||||||
@@ -4342,6 +4343,7 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
cparams.rope_cache = params.rope_cache;
|
cparams.rope_cache = params.rope_cache;
|
||||||
cparams.graph_reuse = params.graph_reuse;
|
cparams.graph_reuse = params.graph_reuse;
|
||||||
cparams.k_cache_hadamard = params.k_cache_hadamard;
|
cparams.k_cache_hadamard = params.k_cache_hadamard;
|
||||||
|
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
|
||||||
cparams.min_experts = params.min_experts;
|
cparams.min_experts = params.min_experts;
|
||||||
cparams.thresh_experts = params.thresh_experts;
|
cparams.thresh_experts = params.thresh_experts;
|
||||||
cparams.cuda_params = params.cuda_params;
|
cparams.cuda_params = params.cuda_params;
|
||||||
@@ -4430,6 +4432,7 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
LLAMA_LOG_INFO("%s: rope_cache = %d\n", __func__, cparams.rope_cache);
|
LLAMA_LOG_INFO("%s: rope_cache = %d\n", __func__, cparams.rope_cache);
|
||||||
LLAMA_LOG_INFO("%s: graph_reuse = %d\n", __func__, cparams.graph_reuse);
|
LLAMA_LOG_INFO("%s: graph_reuse = %d\n", __func__, cparams.graph_reuse);
|
||||||
LLAMA_LOG_INFO("%s: k_cache_hadam = %d\n", __func__, cparams.k_cache_hadamard);
|
LLAMA_LOG_INFO("%s: k_cache_hadam = %d\n", __func__, cparams.k_cache_hadamard);
|
||||||
|
LLAMA_LOG_INFO("%s: split_mode_graph_scheduling = %d\n", __func__, cparams.split_mode_graph_scheduling);
|
||||||
LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts);
|
LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts);
|
||||||
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
|
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
|
||||||
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
|
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
|
||||||
@@ -4773,10 +4776,14 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
LLAMA_LOG_INFO("XXXXXXXXXXXXXXXXXXXXX Setting only active experts offload\n");
|
LLAMA_LOG_INFO("XXXXXXXXXXXXXXXXXXXXX Setting only active experts offload\n");
|
||||||
ggml_backend_sched_set_only_active_experts(ctx->sched, true);
|
ggml_backend_sched_set_only_active_experts(ctx->sched, true);
|
||||||
}
|
}
|
||||||
if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && !model->has_tensor_overrides()) {
|
if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && (!model->has_tensor_overrides() || cparams.split_mode_graph_scheduling)) {
|
||||||
ggml_backend_sched_set_split_mode_graph(ctx->sched, true);
|
ggml_backend_sched_set_split_mode_graph(ctx->sched, true);
|
||||||
ggml_backend_sched_set_max_extra_alloc(ctx->sched, params.max_extra_alloc);
|
ggml_backend_sched_set_max_extra_alloc(ctx->sched, params.max_extra_alloc);
|
||||||
}
|
if (model->has_tensor_overrides() && cparams.split_mode_graph_scheduling) {
|
||||||
|
LLAMA_LOG_INFO("XXXXXXXX Split Mode Graph Scheduling is FORCED despite tensor overrides due to user choice.\n");
|
||||||
|
LLAMA_LOG_INFO("XXXXXXXX It may or might NOT infer properly due to unsupported combinations between SMGS and every possible tensor overrides.\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user