Fixing split mode graph with many GPUs (#1152)

* Attempt to fix the many GPU issue in split mode graph

* WIP: this seems more stable

Still hanging after a while if I try to use all 7 GPUs

* Reenable OpenMP in scheduler async

Seems solid up to 4 GPUs. It did hang with --max-gpu 6.

* printf cleanup
This commit is contained in:
Kawrakow
2026-01-17 08:05:24 +02:00
committed by GitHub
parent cb1063f6cd
commit 709e1a5375
3 changed files with 85 additions and 40 deletions

View File

@@ -2180,10 +2180,20 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
}
}
}
int first_reduce = -1;
for (int i = 0; i < sched->n_splits; i++) {
auto split = &sched->splits[i];
if (split->graph.n_nodes == 1 && split->graph.nodes[0]->op == GGML_OP_REDUCE) {
first_reduce = split->backend_id;
break;
}
}
if (!has_cpu_work) {
#pragma omp parallel num_threads(sched->n_backends)
{
int last_reduce = first_reduce;
int ith = omp_get_thread_num();
struct ggml_backend_sched_split * splits = sched->splits;
@@ -2206,9 +2216,17 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
#pragma omp barrier
}
if (split->n_inputs > 0) {
int copy_thread = last_reduce >= 0 ? last_reduce : 0;
if (ith == copy_thread) {
ggml_backend_sched_copy_inputs(sched, split, sched->needs_sync, ids, unique_ids, last_ids_tensor);
}
#pragma omp barrier
}
if (ith == split_backend_id) {
// copy the input tensors to the split backend
ggml_backend_sched_copy_inputs(sched, split, sched->needs_sync, ids, unique_ids, last_ids_tensor);
sched->statuses[ith] = ggml_backend_sched_eval(sched, split_backend, split);
if (split->n_inputs > 0 && !sched->own_cpy[split_backend_id]) {
sched->needs_sync[split_backend_id] = true;
@@ -2219,10 +2237,10 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
}
}
}
sched->statuses[ith] = ggml_backend_sched_eval(sched, split_backend, split);
}
if (split->graph.nodes[0]->op == GGML_OP_REDUCE) {
last_reduce = split_backend_id;
#pragma omp barrier
}
@@ -2238,8 +2256,9 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
}
#endif
if (!work_done) {
std::barrier barrier(sched->n_backends, [] () noexcept {});
auto compute = [sched, &barrier] (int ith) {
std::barrier barrier(sched->n_backends);
auto compute = [sched, &barrier, first_reduce] (int ith) {
struct ggml_backend_sched_split * splits = sched->splits;
@@ -2247,6 +2266,8 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
std::vector<uint32_t> unique_ids;
ggml_tensor * last_ids_tensor = nullptr;
int last_reduce = first_reduce;
for (int i = 0; i < sched->n_splits; i++) {
#if IK_PRINT_TIMING
int64_t tim1 = ggml_time_us();
@@ -2261,10 +2282,17 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
barrier.arrive_and_wait();
}
if (ith == split_backend_id) {
// copy the input tensors to the split backend
ggml_backend_sched_copy_inputs(sched, split, sched->needs_sync, ids, unique_ids, last_ids_tensor);
if (split->n_inputs > 0) {
int copy_thread = last_reduce >= 0 ? last_reduce : 0;
if (ith == copy_thread) {
ggml_backend_sched_copy_inputs(sched, split, sched->needs_sync, ids, unique_ids, last_ids_tensor);
}
barrier.arrive_and_wait();
}
if (ith == split_backend_id) {
sched->statuses[ith] = ggml_backend_sched_eval(sched, split_backend, split);
if (split->n_inputs > 0 && !sched->own_cpy[split_backend_id]) {
sched->needs_sync[split_backend_id] = true;
} else {
@@ -2274,10 +2302,10 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
}
}
}
sched->statuses[ith] = ggml_backend_sched_eval(sched, split_backend, split);
}
if (split->graph.nodes[0]->op == GGML_OP_REDUCE) {
last_reduce = split_backend_id;
barrier.arrive_and_wait();
}
//if (needs_barrier) {
@@ -2287,6 +2315,7 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
// record the event of this copy
if (split->n_inputs > 0) {
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
printf("Recording event %d, %d\n", split_backend_id, sched->cur_copy);
ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy]);
}
}

View File

@@ -270,9 +270,15 @@ void ggml_cuda_op_reduce([[maybe_unused]] ggml_backend_cuda_context & ctx, ggml_
(const char *)dst->src[peer]->data + ichunk*nelem_per_device*elem_size, info.all_ctx[peer]->device,
this_nelem*elem_size, info.all_ctx[peer]->stream()));
CUDA_CHECK(cudaEventRecord(info.all_ctx[peer]->copy_event, info.all_ctx[peer]->stream()));
//ggml_cuda_set_device(info.all_ctx[i]->device);
//CUDA_CHECK(cudaStreamWaitEvent(info.all_ctx[i]->stream(), info.all_ctx[peer]->copy_event, 0));
ichunk = (ichunk + 1)%nhave;
}
for (int ii = 0; ii < nhave; ++ii) {
int i = idx[ii];
int peer = idx[(ii+1)%nhave];
ggml_cuda_set_device(info.all_ctx[i]->device);
CUDA_CHECK(cudaStreamWaitEvent(info.all_ctx[i]->stream(), info.all_ctx[peer]->copy_event, 0));
ichunk = (ichunk + 1)%nhave;
}
}
ggml_cuda_set_device(ctx.device);
@@ -344,7 +350,7 @@ void ggml_cuda_op_reduce([[maybe_unused]] ggml_backend_cuda_context & ctx, ggml_
ggml_cuda_set_device(ctx.device);
return;
}
if (dst->ne[1] <= 8 && ctx.p2p_enabled) {
if (dst->ne[1] < 32 && ctx.p2p_enabled) {
for (int ii = 0; ii < nhave; ++ii) {
int i = idx[ii];
GGML_ASSERT(dst->src[i]->type == dst->type);
@@ -357,7 +363,9 @@ void ggml_cuda_op_reduce([[maybe_unused]] ggml_backend_cuda_context & ctx, ggml_
}
//printf("Recorded events\n");
auto nelem = ggml_nelements(dst);
auto nelem_per_device = (nelem + nhave - 1)/nhave;
auto nelem8 = (nelem + 7)/8;
auto nelem_per_device = 8*((nelem8 + nhave - 1)/nhave);
//auto nelem_per_device = (nelem + nhave - 1)/nhave;
auto elem_size = ggml_element_size(dst);
for (int ii = 0; ii < nhave; ++ii) {
int i = idx[ii];
@@ -471,6 +479,12 @@ void ggml_cuda_op_reduce([[maybe_unused]] ggml_backend_cuda_context & ctx, ggml_
ggml_cuda_set_device(i);
CUDA_CHECK(cudaStreamWaitEvent(info.all_ctx[i]->stream(), ctx.copy_event, 0));
CUDA_CHECK(cudaMemcpyPeerAsync(dst->src[i]->data, i, dst->data, ctx.device, nbytes, info.all_ctx[i]->stream()));
CUDA_CHECK(cudaEventRecord(info.all_ctx[i]->copy_event, info.all_ctx[i]->stream()));
}
ggml_cuda_set_device(ctx.device);
for (int ii = 0; ii < nhave; ++ii) {
int i = idx[ii];
if (i == ctx.device) continue;
CUDA_CHECK(cudaStreamWaitEvent(ctx.stream(), info.all_ctx[i]->copy_event, 0));
}
}