mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
OK, this seems to be working
This commit is contained in:
@@ -2012,11 +2012,7 @@ static void ggml_backend_sched_copy_inputs(ggml_backend_sched_t sched, ggml_back
|
||||
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
|
||||
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
|
||||
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
|
||||
int input_backend_id = tensor_backend_id(input);
|
||||
if (needs_sync[input_backend_id]) {
|
||||
ggml_backend_synchronize(input_backend);
|
||||
needs_sync[input_backend_id] = k_set_sync;
|
||||
}
|
||||
ggml_backend_synchronize(input_backend);
|
||||
if (needs_sync[split_backend_id]) {
|
||||
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
||||
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
||||
|
||||
@@ -4773,7 +4773,7 @@ struct llama_context * llama_new_context_with_model(
|
||||
LLAMA_LOG_INFO("XXXXXXXXXXXXXXXXXXXXX Setting only active experts offload\n");
|
||||
ggml_backend_sched_set_only_active_experts(ctx->sched, true);
|
||||
}
|
||||
if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH && !model->has_tensor_overrides()) {
|
||||
if (model->split_mode == LLAMA_SPLIT_MODE_GRAPH) { // && !model->has_tensor_overrides()) {
|
||||
ggml_backend_sched_set_split_mode_graph(ctx->sched, true);
|
||||
ggml_backend_sched_set_max_extra_alloc(ctx->sched, params.max_extra_alloc);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user