From b039e01a1ebde9fc542c869193a0756110221b35 Mon Sep 17 00:00:00 2001 From: aendk Date: Mon, 12 Jan 2026 14:16:01 +0100 Subject: [PATCH] Makes opt-in to relax use of explicit syncs more general. Backends like vulkan which require a synchronization between HtoD copies and graph execution could also adopt this change now. --- ggml/src/ggml-backend.cpp | 42 +++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index c768337db0..8de584c906 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -670,6 +670,12 @@ static bool ggml_is_view_op(enum ggml_op op) { #define GGML_SCHED_MAX_COPIES 4 #endif +enum ggml_backend_sync_mode { + GGML_SPLIT_SYNC_MODE_IMPLICIT = 0, // splits which can rely on implicit sync mechanisms of its backend like a queue or stream + GGML_SPLIT_SYNC_MODE_WRITE_READ_BOUNDARY = 1, // splits which require only a single explicit sync between the last write and the first read + GGML_SPLIT_SYNC_MODE_EXPLICIT = 2 // splits which require explicit synchronization throughout (default) +}; + struct ggml_backend_sched_split { int backend_id; int i_start; @@ -678,6 +684,7 @@ struct ggml_backend_sched_split { int n_inputs; // graph view of this split struct ggml_cgraph graph; + enum ggml_backend_sync_mode backend_sync_mode = GGML_SPLIT_SYNC_MODE_EXPLICIT; }; struct ggml_backend_sched { @@ -736,30 +743,40 @@ struct ggml_backend_sched { int debug_prev_graph_size; }; -static void ggml_backend_synchronize_if_required(ggml_backend_t current_backend, bool backend_implicitly_synced) { - if (backend_implicitly_synced) { +static void ggml_backend_synchronize_if_required(ggml_backend_sched_split * split, ggml_backend_t current_backend, bool is_final_write = 0) { + + if (split->backend_sync_mode == GGML_SPLIT_SYNC_MODE_IMPLICIT) { + return; + } + + if (split->backend_sync_mode == GGML_SPLIT_SYNC_MODE_WRITE_READ_BOUNDARY && !is_final_write) { return; } ggml_backend_synchronize(current_backend); } -static bool ggml_backend_implicitly_synced(ggml_backend_t current_backend) { +static void ggml_backend_implicitly_synced(ggml_backend_sched_split * split, ggml_backend_t current_backend) { /* * Some backends have implicit synchronization mechanisms, which allows several parallel asynchronous memory copies without data races. * An example for that is the CUDA backend with the CUDA stream. * For these backends, we can skip costly explicit synchronizations during compute split scheduling. */ + if (split->backend_sync_mode != GGML_SPLIT_SYNC_MODE_EXPLICIT) { + // indicates that this function has already changed the default value, no repeat check necessary + return; + } // To not change any APIs or change what ggml-base links to, we can only detect backends by string matching auto backend_name = ggml_backend_name(current_backend); if (strncmp(backend_name, "CUDA", 4) == 0) { - return true; + split->backend_sync_mode = GGML_SPLIT_SYNC_MODE_IMPLICIT; + return; } - // sync other backends to ensure correctness - return false; + // retain default explicit synchronization on other backends for correctness + return; } #define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor) @@ -1478,30 +1495,33 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s struct ggml_backend_sched_split * split = &splits[split_id]; int split_backend_id = split->backend_id; ggml_backend_t split_backend = sched->backends[split_backend_id]; - // some backends can avoid costly syncs between async copies - bool backend_implicitly_synced = ggml_backend_implicitly_synced(split_backend); + + // determine if backend can avoid costly syncs between HtoD async copies + ggml_backend_implicitly_synced(split, split_backend); + // copy the input tensors to the split backend for (int input_id = 0; input_id < split->n_inputs; input_id++) { ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]); struct ggml_tensor * input = split->inputs[input_id]; struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy); + bool last_input = (input_id + 1) == split->n_inputs; if (input->flags & GGML_TENSOR_FLAG_INPUT) { // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done if (sched->events[split_backend_id][sched->cur_copy] != NULL) { ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); } else { - ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced); + ggml_backend_synchronize_if_required(split, split_backend); } ggml_backend_tensor_copy_async(input_backend, split_backend, input, input_cpy); - ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced); + ggml_backend_synchronize_if_required(split, split_backend, last_input); } else { // wait for the split backend to finish using the input before overwriting it if (sched->events[split_backend_id][sched->cur_copy] != NULL) { ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]); } else { - ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced); + ggml_backend_synchronize_if_required(split, split_backend, last_input); } // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used