mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Fix sync logic (#1064)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -1892,13 +1892,10 @@ static void ggml_backend_sched_copy_inputs(ggml_backend_sched_t sched, ggml_back
|
||||
|
||||
if (input->flags & GGML_TENSOR_FLAG_INPUT) {
|
||||
// inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
|
||||
if (needs_sync[split_backend_id]) {
|
||||
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
||||
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
||||
} else {
|
||||
ggml_backend_synchronize(split_backend);
|
||||
}
|
||||
//needs_sync[split_backend_id] = false;
|
||||
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
||||
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
||||
} else {
|
||||
ggml_backend_synchronize(split_backend);
|
||||
}
|
||||
ggml_backend_tensor_copy(input, input_cpy);
|
||||
} else {
|
||||
@@ -1909,7 +1906,7 @@ static void ggml_backend_sched_copy_inputs(ggml_backend_sched_t sched, ggml_back
|
||||
} else {
|
||||
ggml_backend_synchronize(split_backend);
|
||||
}
|
||||
//needs_sync[split_backend_id] = false;
|
||||
needs_sync[split_backend_id] = false;
|
||||
}
|
||||
|
||||
ggml_tensor * node = split->graph.nodes[0];
|
||||
@@ -1923,7 +1920,6 @@ static void ggml_backend_sched_copy_inputs(ggml_backend_sched_t sched, ggml_back
|
||||
last_input_backend = input_backend;
|
||||
}
|
||||
|
||||
//printf("node: %s have %d inputs, processing input %d\n", node->name, split->n_inputs, j);
|
||||
ggml_tensor * ids_tensor = node->op == GGML_OP_MUL_MAT_ID ? node->src[2] : node->src[3];
|
||||
auto ids_backend = split_backend;
|
||||
|
||||
@@ -1945,7 +1941,7 @@ static void ggml_backend_sched_copy_inputs(ggml_backend_sched_t sched, ggml_back
|
||||
ggml_backend_tensor_get_async(ids_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor));
|
||||
|
||||
ggml_backend_synchronize(ids_backend);
|
||||
//needs_sync[tensor_backend_id(ids_tensor)] = false;
|
||||
needs_sync[tensor_backend_id(ids_tensor)] = false;
|
||||
|
||||
unique_ids.resize((n_expert + 31)/32);
|
||||
std::memset(unique_ids.data(), 0, unique_ids.size()*sizeof(uint32_t));
|
||||
@@ -2005,7 +2001,7 @@ static void ggml_backend_sched_copy_inputs(ggml_backend_sched_t sched, ggml_back
|
||||
int input_backend_id = tensor_backend_id(input);
|
||||
if (needs_sync[input_backend_id]) {
|
||||
ggml_backend_synchronize(input_backend);
|
||||
//needs_sync[input_backend_id] = false;
|
||||
needs_sync[input_backend_id] = false;
|
||||
}
|
||||
if (needs_sync[split_backend_id]) {
|
||||
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
||||
@@ -2013,7 +2009,7 @@ static void ggml_backend_sched_copy_inputs(ggml_backend_sched_t sched, ggml_back
|
||||
} else {
|
||||
ggml_backend_synchronize(split_backend);
|
||||
}
|
||||
//needs_sync[split_backend_id] = false;
|
||||
needs_sync[split_backend_id] = false;
|
||||
}
|
||||
ggml_backend_tensor_copy(input, input_cpy);
|
||||
}
|
||||
@@ -2034,7 +2030,6 @@ static ggml_status ggml_backend_sched_compute_splits_sm_graph(ggml_backend_sched
|
||||
for (int i = 0; i < sched->n_splits; ++i) {
|
||||
auto split_i = &splits[i];
|
||||
this_split.clear();
|
||||
//auto& this_split = all_splits.emplace_back();
|
||||
this_split.push_back(split_i);
|
||||
for (int j = i+1; j < sched->n_splits; ++j) {
|
||||
auto split_j = &splits[j];
|
||||
|
||||
Reference in New Issue
Block a user