This commit is contained in:
Andreas Kieslinger 2026-01-02 23:47:03 +02:00 committed by GitHub
commit ff1848c7ae
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 50 additions and 7 deletions

View File

@ -736,6 +736,38 @@ struct ggml_backend_sched {
int debug_prev_graph_size;
};
static void ggml_backend_synchronize_if_required(ggml_backend_t current_backend, bool backend_implicitly_synced) {
if (backend_implicitly_synced) {
return;
}
ggml_backend_synchronize(current_backend);
}
static bool ggml_backend_implicitly_synced(ggml_backend_t current_backend) {
/*
* Some backends have implicit synchronization mechanisms, which allows several parallel asynchronous memory copies without data races.
* An example for that is the CUDA backend with the CUDA stream.
* For these backends, we can skip costly explicit synchronizations during compute split scheduling.
*/
static bool disable_scheduler_sync_opt = (getenv("GGML_SCHED_DISABLE_SYNC_OPT") != nullptr);
if (disable_scheduler_sync_opt) {
return false;
}
// To not change any APIs or change what ggml-base links to, we can only detect backends by string matching
auto backend_name = ggml_backend_name(current_backend);
if (strncmp(backend_name, "CUDA", 4) == 0) {
return true;
}
// sync other backends to ensure correctness
return false;
}
#define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor)
#define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)]
#define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)]
@ -1452,6 +1484,8 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
struct ggml_backend_sched_split * split = &splits[split_id];
int split_backend_id = split->backend_id;
ggml_backend_t split_backend = sched->backends[split_backend_id];
// some backends can avoid costly syncs between async copies
bool backend_implicitly_synced = ggml_backend_implicitly_synced(split_backend);
// copy the input tensors to the split backend
for (int input_id = 0; input_id < split->n_inputs; input_id++) {
@ -1464,15 +1498,16 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
} else {
ggml_backend_synchronize(split_backend);
ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced);
}
ggml_backend_tensor_copy(input, input_cpy);
ggml_backend_tensor_copy_async(input_backend, split_backend, input, input_cpy);
ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced);
} else {
// wait for the split backend to finish using the input before overwriting it
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
} else {
ggml_backend_synchronize(split_backend);
ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced);
}
// when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used

View File

@ -2780,11 +2780,14 @@ static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_
ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer;
ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer;
if (!ggml_backend_is_cuda(backend_src) || !ggml_backend_is_cuda(backend_dst)) {
//enables async copies from CPU to CUDA, instead of only CUDA-to-CUDA
bool copy_from_host = ggml_backend_buffer_is_host(src->buffer);
if (!(copy_from_host || ggml_backend_is_cuda(backend_src)) || !ggml_backend_is_cuda(backend_dst)) {
return false;
}
if (!ggml_backend_buffer_is_cuda(src->buffer) || !ggml_backend_buffer_is_cuda(dst->buffer)) {
if (!(copy_from_host || ggml_backend_buffer_is_cuda(src->buffer)) || !ggml_backend_buffer_is_cuda(dst->buffer)) {
return false;
}
@ -2795,14 +2798,19 @@ static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_
ggml_backend_cuda_buffer_context * buf_ctx_src = (ggml_backend_cuda_buffer_context *)buf_src->context;
ggml_backend_cuda_buffer_context * buf_ctx_dst = (ggml_backend_cuda_buffer_context *)buf_dst->context;
if (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device) {
if (!copy_from_host && (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device)) {
#ifndef NDEBUG
GGML_LOG_DEBUG("%s: backend and buffer devices do not match\n", __func__);
#endif
return false;
}
if (backend_src != backend_dst) {
if (copy_from_host) {
if (!cuda_ctx_dst->stream()) {
return false;
}
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyHostToDevice, cuda_ctx_dst->stream()));
} else if (backend_src != backend_dst) {
// copy on src stream
if (cuda_ctx_src->device == cuda_ctx_dst->device) {
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_src->stream()));