mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Better PP performance with split mode "graph" and 3+ GPUs (#1069)
* This should do the trick for PP * Command line option to set max. extra VRAM that the scheduler can use * Fix bug and cleanup * Looks like with this change it is working with tensor overrides * Nah, it is not working * OK, this seems to be working * Disable split scheduling with tensor overrides --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -757,6 +757,11 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
params.defrag_thold = std::stof(argv[i]);
|
params.defrag_thold = std::stof(argv[i]);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "--max-extra-alloc" || arg == "-mea") {
|
||||||
|
CHECK_ARG
|
||||||
|
params.max_extra_alloc_MiB = std::stoi(argv[i]);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "--samplers") {
|
if (arg == "--samplers") {
|
||||||
CHECK_ARG
|
CHECK_ARG
|
||||||
const auto sampler_names = string_split(argv[i], ";");
|
const auto sampler_names = string_split(argv[i], ";");
|
||||||
@@ -2218,6 +2223,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||||||
|
|
||||||
options.push_back({ "parallel" });
|
options.push_back({ "parallel" });
|
||||||
options.push_back({ "*", "-dt, --defrag-thold N", "KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold });
|
options.push_back({ "*", "-dt, --defrag-thold N", "KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold });
|
||||||
|
options.push_back({ "*", "-mea, --max-extra-alloc", "Max extra VRAM allocation per GPU (default: %d)", params.max_extra_alloc_MiB});
|
||||||
options.push_back({ "*", "-np, --parallel N", "number of parallel sequences to decode (default: %d)", params.n_parallel });
|
options.push_back({ "*", "-np, --parallel N", "number of parallel sequences to decode (default: %d)", params.n_parallel });
|
||||||
options.push_back({ "*", "-ns, --sequences N", "number of sequences to decode (default: %d)", params.n_sequences });
|
options.push_back({ "*", "-ns, --sequences N", "number of sequences to decode (default: %d)", params.n_sequences });
|
||||||
options.push_back({ "*", "-cb, --cont-batching", "enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled" });
|
options.push_back({ "*", "-cb, --cont-batching", "enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled" });
|
||||||
@@ -3109,7 +3115,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||||||
cparams.min_experts = params.min_experts;
|
cparams.min_experts = params.min_experts;
|
||||||
cparams.thresh_experts = params.thresh_experts;
|
cparams.thresh_experts = params.thresh_experts;
|
||||||
cparams.only_active_experts = params.only_active_exps;
|
cparams.only_active_experts = params.only_active_exps;
|
||||||
cparams.k_cache_hadamard = params.k_cache_hadamard;
|
cparams.max_extra_alloc = params.max_extra_alloc_MiB;
|
||||||
|
|
||||||
cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
|
cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
|
||||||
cparams.type_v = kv_cache_type_from_str(params.cache_type_v);
|
cparams.type_v = kv_cache_type_from_str(params.cache_type_v);
|
||||||
@@ -4049,6 +4055,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
|||||||
fprintf(stream, "use_thp: %s # default: false\n", params.use_thp ? "true" : "false");
|
fprintf(stream, "use_thp: %s # default: false\n", params.use_thp ? "true" : "false");
|
||||||
fprintf(stream, "validate_quants: %s # default: false\n", params.validate_quants ? "true" : "false");
|
fprintf(stream, "validate_quants: %s # default: false\n", params.validate_quants ? "true" : "false");
|
||||||
fprintf(stream, "merge_qkv: %s # default: false\n", params.merge_qkv ? "true" : "false");
|
fprintf(stream, "merge_qkv: %s # default: false\n", params.merge_qkv ? "true" : "false");
|
||||||
|
fprintf(stream, "max_extra_alloc: %d # default: 256\n", params.max_extra_alloc_MiB);
|
||||||
fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false");
|
fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false");
|
||||||
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
||||||
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
||||||
|
|||||||
@@ -167,6 +167,7 @@ struct gpt_params {
|
|||||||
float yarn_beta_slow = -1.0f; // YaRN high correction dim
|
float yarn_beta_slow = -1.0f; // YaRN high correction dim
|
||||||
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
||||||
float defrag_thold = -1.0f; // KV cache defragmentation threshold
|
float defrag_thold = -1.0f; // KV cache defragmentation threshold
|
||||||
|
int32_t max_extra_alloc_MiB = 256; // additional VRAM per GPU the scheduler may allocate for more efficient compute graph evaluation
|
||||||
|
|
||||||
ggml_backend_sched_eval_callback cb_eval = nullptr;
|
ggml_backend_sched_eval_callback cb_eval = nullptr;
|
||||||
void * cb_eval_user_data = nullptr;
|
void * cb_eval_user_data = nullptr;
|
||||||
|
|||||||
@@ -212,6 +212,7 @@ extern "C" {
|
|||||||
GGML_API void ggml_backend_sched_set_op_offload(ggml_backend_sched_t sched, enum ggml_op op, bool on_or_off);
|
GGML_API void ggml_backend_sched_set_op_offload(ggml_backend_sched_t sched, enum ggml_op op, bool on_or_off);
|
||||||
GGML_API void ggml_backend_sched_set_only_active_experts(ggml_backend_sched_t sched, bool on_or_off);
|
GGML_API void ggml_backend_sched_set_only_active_experts(ggml_backend_sched_t sched, bool on_or_off);
|
||||||
GGML_API void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off);
|
GGML_API void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on_or_off);
|
||||||
|
GGML_API void ggml_backend_sched_set_max_extra_alloc(ggml_backend_sched_t sched, int extra_alloc_MiB);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Utils
|
// Utils
|
||||||
|
|||||||
@@ -13,13 +13,12 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <array>
|
#include <array>
|
||||||
|
#include <chrono>
|
||||||
|
|
||||||
#define IK_PRINT_TIMING 0
|
#define IK_PRINT_TIMING 0
|
||||||
|
|
||||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||||
|
|
||||||
constexpr size_t k_max_extra_alloc = 1024*1024*64;
|
|
||||||
|
|
||||||
// backend buffer type
|
// backend buffer type
|
||||||
|
|
||||||
const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
|
const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
|
||||||
@@ -1149,6 +1148,8 @@ struct ggml_backend_sched {
|
|||||||
int n_splits;
|
int n_splits;
|
||||||
int splits_capacity;
|
int splits_capacity;
|
||||||
|
|
||||||
|
size_t max_extra_alloc = 0;
|
||||||
|
|
||||||
// pipeline parallelism support
|
// pipeline parallelism support
|
||||||
int n_copies;
|
int n_copies;
|
||||||
int cur_copy;
|
int cur_copy;
|
||||||
@@ -1200,6 +1201,13 @@ void ggml_backend_sched_set_split_mode_graph(ggml_backend_sched_t sched, bool on
|
|||||||
sched->split_mode_graph = on_or_off;
|
sched->split_mode_graph = on_or_off;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_backend_sched_set_max_extra_alloc(ggml_backend_sched_t sched, int extra_alloc_MiB) {
|
||||||
|
if (!sched) return;
|
||||||
|
if (extra_alloc_MiB >= 0) {
|
||||||
|
sched->max_extra_alloc = size_t(extra_alloc_MiB)*1024*1024;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool ggml_backend_sched_offload_enabled(ggml_backend_sched_t sched, enum ggml_op op) {
|
static inline bool ggml_backend_sched_offload_enabled(ggml_backend_sched_t sched, enum ggml_op op) {
|
||||||
int int_op = (int)op;
|
int int_op = (int)op;
|
||||||
if (!sched || op < 0 || op >= GGML_OP_COUNT) return false;
|
if (!sched || op < 0 || op >= GGML_OP_COUNT) return false;
|
||||||
@@ -2004,11 +2012,7 @@ static void ggml_backend_sched_copy_inputs(ggml_backend_sched_t sched, ggml_back
|
|||||||
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
|
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
|
||||||
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
|
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
|
||||||
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
|
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
|
||||||
int input_backend_id = tensor_backend_id(input);
|
ggml_backend_synchronize(input_backend);
|
||||||
if (needs_sync[input_backend_id]) {
|
|
||||||
ggml_backend_synchronize(input_backend);
|
|
||||||
needs_sync[input_backend_id] = k_set_sync;
|
|
||||||
}
|
|
||||||
if (needs_sync[split_backend_id]) {
|
if (needs_sync[split_backend_id]) {
|
||||||
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
||||||
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
||||||
@@ -2101,50 +2105,83 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
|
|||||||
std::array<bool, GGML_SCHED_MAX_BACKENDS> own_cpy{{false}};
|
std::array<bool, GGML_SCHED_MAX_BACKENDS> own_cpy{{false}};
|
||||||
|
|
||||||
if (sched->split_mode_graph) {
|
if (sched->split_mode_graph) {
|
||||||
std::vector<size_t> input_size(sched->n_backends, 0);
|
auto tensor_size = [] (const ggml_tensor * t) {
|
||||||
|
auto nbytes = ggml_nbytes(t);
|
||||||
|
nbytes = 256*((nbytes + 255)/256);
|
||||||
|
return nbytes;
|
||||||
|
};
|
||||||
|
//auto tim1 = std::chrono::steady_clock::now();
|
||||||
|
std::vector<std::vector<ggml_backend_sched_split*>> backend_splits(sched->n_backends);
|
||||||
for (int i = 0; i < sched->n_splits; i++) {
|
for (int i = 0; i < sched->n_splits; i++) {
|
||||||
auto split = &sched->splits[i];
|
backend_splits[sched->splits[i].backend_id].push_back(&sched->splits[i]);
|
||||||
int split_backend_id = split->backend_id;
|
|
||||||
for (int j = 0; j < split->n_inputs; ++j) {
|
|
||||||
auto nbytes = ggml_nbytes(split->inputs[j]);
|
|
||||||
nbytes = 256*((nbytes + 255)/256);
|
|
||||||
input_size[split_backend_id] += nbytes;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
for (int backend_id = 0; backend_id < sched->n_backends; ++backend_id) {
|
for (int backend_id = 0; backend_id < sched->n_backends; ++backend_id) {
|
||||||
if (!input_size[backend_id]) continue; // this backend has no inputs, so no need to worry about it.
|
if (ggml_backend_is_cpu(ggml_backend_sched_get_backend(sched, backend_id))) continue;
|
||||||
if (input_size[backend_id] <= k_max_extra_alloc) {
|
if (backend_splits[backend_id].empty()) continue;
|
||||||
if (sched->input_memory_bufs[backend_id] && sched->input_memory_bufs[backend_id]->size < input_size[backend_id]) {
|
size_t input_size = 0;
|
||||||
ggml_backend_buffer_free(sched->input_memory_bufs[backend_id]);
|
size_t max_input_size = 0;
|
||||||
sched->input_memory_bufs[backend_id] = nullptr;
|
int last_split = 0;
|
||||||
}
|
bool can_alloc = true;
|
||||||
if (!sched->input_memory_bufs[backend_id]) {
|
for (int i = 0; i < int(backend_splits[backend_id].size()); ++i) {
|
||||||
sched->input_memory_bufs[backend_id] = ggml_backend_alloc_buffer(sched->backends[backend_id], input_size[backend_id]);
|
auto split = backend_splits[backend_id][i];
|
||||||
}
|
if (split->n_inputs < 1) continue;
|
||||||
auto ptr = (char *)ggml_backend_buffer_get_base(sched->input_memory_bufs[backend_id]);
|
size_t this_size = 0;
|
||||||
for (int i = 0; i < sched->n_splits; ++i) {
|
for (int j = 0; j < split->n_inputs; ++j) {
|
||||||
auto split = &sched->splits[i];
|
if (!ggml_backend_buffer_is_host(split->inputs[j]->buffer)) {
|
||||||
if (split->backend_id != backend_id) continue;
|
this_size += tensor_size(split->inputs[j]);
|
||||||
for (int j = 0; j < split->n_inputs; ++j) {
|
|
||||||
auto input_cpy = tensor_copy(split->inputs[j], backend_id, sched->cur_copy);
|
|
||||||
for (int k = 0; k < split->graph.n_nodes; ++k) {
|
|
||||||
auto node = split->graph.nodes[k];
|
|
||||||
for (int l = 0; l < GGML_MAX_SRC; ++l) {
|
|
||||||
if (node->src[l] && node->src[l]->data == input_cpy->data) node->src[l]->data = ptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
input_cpy->data = ptr;
|
|
||||||
auto nbytes = ggml_nbytes(split->inputs[j]);
|
|
||||||
nbytes = 256*((nbytes + 255)/256);
|
|
||||||
ptr += nbytes;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
needs_sync[backend_id] = false;
|
if (input_size + this_size > sched->max_extra_alloc) {
|
||||||
own_cpy[backend_id] = true;
|
if (i - last_split < 3) {
|
||||||
|
can_alloc = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
max_input_size = std::max(max_input_size, input_size);
|
||||||
|
input_size = 0;
|
||||||
|
last_split = i - 1;
|
||||||
|
}
|
||||||
|
input_size += this_size;
|
||||||
}
|
}
|
||||||
|
max_input_size = std::max(max_input_size, input_size);
|
||||||
|
if (!can_alloc || !max_input_size) continue;
|
||||||
|
if (sched->input_memory_bufs[backend_id] && sched->input_memory_bufs[backend_id]->size < max_input_size) {
|
||||||
|
ggml_backend_buffer_free(sched->input_memory_bufs[backend_id]);
|
||||||
|
sched->input_memory_bufs[backend_id] = nullptr;
|
||||||
|
}
|
||||||
|
if (!sched->input_memory_bufs[backend_id]) {
|
||||||
|
sched->input_memory_bufs[backend_id] = ggml_backend_alloc_buffer(sched->backends[backend_id], max_input_size);
|
||||||
|
}
|
||||||
|
auto ptr = (char *)ggml_backend_buffer_get_base(sched->input_memory_bufs[backend_id]);
|
||||||
|
input_size = 0;
|
||||||
|
for (int i = 0; i < int(backend_splits[backend_id].size()); ++i) {
|
||||||
|
auto split = backend_splits[backend_id][i];
|
||||||
|
size_t this_size = 0;
|
||||||
|
for (int j = 0; j < split->n_inputs; ++j) {
|
||||||
|
if (!ggml_backend_buffer_is_host(split->inputs[j]->buffer)) {
|
||||||
|
this_size += tensor_size(split->inputs[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (input_size + this_size > max_input_size) {
|
||||||
|
ptr = (char *)ggml_backend_buffer_get_base(sched->input_memory_bufs[backend_id]);
|
||||||
|
input_size = 0;
|
||||||
|
}
|
||||||
|
for (int j = 0; j < split->n_inputs; ++j) {
|
||||||
|
if (ggml_backend_buffer_is_host(split->inputs[j]->buffer)) continue;
|
||||||
|
auto input_cpy = tensor_copy(split->inputs[j], backend_id, sched->cur_copy);
|
||||||
|
for (int k = 0; k < split->graph.n_nodes; ++k) {
|
||||||
|
auto node = split->graph.nodes[k];
|
||||||
|
for (int l = 0; l < GGML_MAX_SRC; ++l) {
|
||||||
|
if (node->src[l] && node->src[l]->data == input_cpy->data) node->src[l]->data = ptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
input_cpy->data = ptr;
|
||||||
|
ptr += tensor_size(split->inputs[j]);
|
||||||
|
}
|
||||||
|
input_size += this_size;
|
||||||
|
}
|
||||||
|
needs_sync[backend_id] = false;
|
||||||
|
own_cpy[backend_id] = true;
|
||||||
}
|
}
|
||||||
//printf("=== Input memory per backend:\n");
|
|
||||||
//for (int i = 0; i < sched->n_backends; ++i) printf(" %d: %.2f MiB\n", i, input_size[i]/1024./1024.);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_backend_sched_split * splits = sched->splits;
|
struct ggml_backend_sched_split * splits = sched->splits;
|
||||||
@@ -2166,6 +2203,12 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
|
|||||||
|
|
||||||
if (split->n_inputs > 0 && !own_cpy[split_backend_id]) {
|
if (split->n_inputs > 0 && !own_cpy[split_backend_id]) {
|
||||||
needs_sync[split_backend_id] = true;
|
needs_sync[split_backend_id] = true;
|
||||||
|
} else {
|
||||||
|
for (int j = 0; j < split->n_inputs; ++j) {
|
||||||
|
if (ggml_backend_buffer_is_host(split->inputs[j]->buffer)) {
|
||||||
|
needs_sync[split_backend_id] = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (!sched->callback_eval) {
|
if (!sched->callback_eval) {
|
||||||
#if IK_PRINT_TIMING
|
#if IK_PRINT_TIMING
|
||||||
|
|||||||
@@ -404,6 +404,7 @@ extern "C" {
|
|||||||
uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
|
uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
|
||||||
uint32_t n_threads; // number of threads to use for generation
|
uint32_t n_threads; // number of threads to use for generation
|
||||||
uint32_t n_threads_batch; // number of threads to use for batch processing
|
uint32_t n_threads_batch; // number of threads to use for batch processing
|
||||||
|
int32_t max_extra_alloc; // Max. additional VRAM the scheduler is allowed to allocate
|
||||||
|
|
||||||
enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
|
enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
|
||||||
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
|
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
|
||||||
|
|||||||
@@ -4022,6 +4022,7 @@ struct llama_context_params llama_context_default_params() {
|
|||||||
/*.n_seq_max =*/ 1,
|
/*.n_seq_max =*/ 1,
|
||||||
/*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
|
/*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
|
||||||
/*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
|
/*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
|
||||||
|
/*.max_extra_alloc =*/ 256,
|
||||||
/*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
|
/*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
|
||||||
/*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
|
/*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
|
||||||
/*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED,
|
/*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED,
|
||||||
@@ -4774,6 +4775,7 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
}
|
}
|
||||||
if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && !model->has_tensor_overrides()) {
|
if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && !model->has_tensor_overrides()) {
|
||||||
ggml_backend_sched_set_split_mode_graph(ctx->sched, true);
|
ggml_backend_sched_set_split_mode_graph(ctx->sched, true);
|
||||||
|
ggml_backend_sched_set_max_extra_alloc(ctx->sched, params.max_extra_alloc);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
|
|||||||
Reference in New Issue
Block a user