Command line option to set max. extra VRAM that the scheduler can use

This commit is contained in:
Iwan Kawrakow
2025-12-16 06:35:06 +00:00
parent 5235c8b3e4
commit ec2ba592b5
6 changed files with 24 additions and 5 deletions

View File

@@ -757,6 +757,11 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.defrag_thold = std::stof(argv[i]);
return true;
}
if (arg == "--max-extra-alloc" || arg == "-mea") {
CHECK_ARG
params.max_extra_alloc_MiB = std::stoi(argv[i]);
return true;
}
if (arg == "--samplers") {
CHECK_ARG
const auto sampler_names = string_split(argv[i], ";");
@@ -2218,6 +2223,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "parallel" });
options.push_back({ "*", "-dt, --defrag-thold N", "KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold });
options.push_back({ "*", "-mea, --max-extra-alloc", "Max extra VRAM allocation per GPU (default: %d)", params.max_extra_alloc_MiB});
options.push_back({ "*", "-np, --parallel N", "number of parallel sequences to decode (default: %d)", params.n_parallel });
options.push_back({ "*", "-ns, --sequences N", "number of sequences to decode (default: %d)", params.n_sequences });
options.push_back({ "*", "-cb, --cont-batching", "enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled" });
@@ -3109,7 +3115,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
cparams.min_experts = params.min_experts;
cparams.thresh_experts = params.thresh_experts;
cparams.only_active_experts = params.only_active_exps;
cparams.k_cache_hadamard = params.k_cache_hadamard;
cparams.max_extra_alloc = params.max_extra_alloc_MiB;
cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
cparams.type_v = kv_cache_type_from_str(params.cache_type_v);
@@ -4049,6 +4055,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
fprintf(stream, "use_thp: %s # default: false\n", params.use_thp ? "true" : "false");
fprintf(stream, "validate_quants: %s # default: false\n", params.validate_quants ? "true" : "false");
fprintf(stream, "merge_qkv: %s # default: false\n", params.merge_qkv ? "true" : "false");
fprintf(stream, "max_extra_alloc: %d # default: 256\n", params.max_extra_alloc_MiB);
fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false");
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);

View File

@@ -167,6 +167,7 @@ struct gpt_params {
float yarn_beta_slow = -1.0f; // YaRN high correction dim
int32_t yarn_orig_ctx = 0; // YaRN original context length
float defrag_thold = -1.0f; // KV cache defragmentation threshold
int32_t max_extra_alloc_MiB = 256; // additional VRAM per GPU the scheduler may allocate for more efficient compute graph evaluation
ggml_backend_sched_eval_callback cb_eval = nullptr;
void * cb_eval_user_data = nullptr;

View File

@@ -212,6 +212,7 @@ extern "C" {
GGML_API void ggml_backend_sched_set_op_offload(ggml_backend_sched_t sched, enum ggml_op op, bool on_or_off);
GGML_API void ggml_backend_sched_set_only_active_experts(ggml_backend_sched_t sched, bool on_or_off);
GGML_API void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off);
GGML_API void ggml_backend_sched_set_max_extra_alloc(ggml_backend_sched_t sched, int extra_alloc_MiB);
//
// Utils

View File

@@ -19,8 +19,6 @@
#define MAX(a, b) ((a) > (b) ? (a) : (b))
constexpr size_t k_max_extra_alloc = 1024*1024*256;
// backend buffer type
const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
@@ -1150,6 +1148,8 @@ struct ggml_backend_sched {
int n_splits;
int splits_capacity;
size_t max_extra_alloc = 0;
// pipeline parallelism support
int n_copies;
int cur_copy;
@@ -1201,6 +1201,13 @@ void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on
sched->split_mode_graph = on_or_off;
}
void ggml_backend_sched_set_max_extra_alloc(ggml_backend_sched_t sched, int extra_alloc_MiB) {
if (!sched) return;
if (extra_alloc_MiB >= 0) {
sched->max_extra_alloc = size_t(extra_alloc_MiB)*1024*1024;
}
}
static inline bool ggml_backend_sched_offload_enabled(ggml_backend_sched_t sched, enum ggml_op op) {
int int_op = (int)op;
if (!sched || op < 0 || op >= GGML_OP_COUNT) return false;
@@ -2126,7 +2133,7 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
this_size += tensor_size(split->inputs[j]);
}
}
if (input_size + this_size > k_max_extra_alloc) {
if (input_size + this_size > sched->max_extra_alloc) {
if (i - last_split < 3) {
can_alloc = false;
break;
@@ -2191,7 +2198,7 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
}
for (int backend_id = 0; backend_id < sched->n_backends; ++backend_id) {
if (!input_size[backend_id]) continue; // this backend has no inputs, so no need to worry about it.
if (input_size[backend_id] <= k_max_extra_alloc) {
if (input_size[backend_id] <= sched->max_extra_alloc) {
if (sched->input_memory_bufs[backend_id] && sched->input_memory_bufs[backend_id]->size < input_size[backend_id]) {
ggml_backend_buffer_free(sched->input_memory_bufs[backend_id]);
sched->input_memory_bufs[backend_id] = nullptr;

View File

@@ -404,6 +404,7 @@ extern "C" {
uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
uint32_t n_threads; // number of threads to use for generation
uint32_t n_threads_batch; // number of threads to use for batch processing
int32_t max_extra_alloc; // Max. additional VRAM the scheduler is allowed to allocate
enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id

View File

@@ -4022,6 +4022,7 @@ struct llama_context_params llama_context_default_params() {
/*.n_seq_max =*/ 1,
/*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
/*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
/*.max_extra_alloc =*/ 256,
/*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
/*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
/*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED,
@@ -4774,6 +4775,7 @@ struct llama_context * llama_new_context_with_model(
}
if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && !model->has_tensor_overrides()) {
ggml_backend_sched_set_split_mode_graph(ctx->sched, true);
ggml_backend_sched_set_max_extra_alloc(ctx->sched, params.max_extra_alloc);
}
return ctx;