Makes opt-in to relax use of explicit syncs more general. Backends like

vulkan which require a synchronization between HtoD copies and graph
execution could also adopt this change now.
This commit is contained in:
aendk 2026-01-12 14:16:01 +01:00
parent 1419050d06
commit 353daeddce
1 changed files with 31 additions and 11 deletions

View File

@ -672,6 +672,12 @@ static bool ggml_is_view_op(enum ggml_op op) {
#define GGML_SCHED_MAX_COPIES 4 #define GGML_SCHED_MAX_COPIES 4
#endif #endif
enum ggml_backend_sync_mode {
GGML_SPLIT_SYNC_MODE_IMPLICIT = 0, // splits which can rely on implicit sync mechanisms of its backend like a queue or stream
GGML_SPLIT_SYNC_MODE_WRITE_READ_BOUNDARY = 1, // splits which require only a single explicit sync between the last write and the first read
GGML_SPLIT_SYNC_MODE_EXPLICIT = 2 // splits which require explicit synchronization throughout (default)
};
struct ggml_backend_sched_split { struct ggml_backend_sched_split {
int backend_id; int backend_id;
int i_start; int i_start;
@ -680,6 +686,7 @@ struct ggml_backend_sched_split {
int n_inputs; int n_inputs;
// graph view of this split // graph view of this split
struct ggml_cgraph graph; struct ggml_cgraph graph;
enum ggml_backend_sync_mode backend_sync_mode = GGML_SPLIT_SYNC_MODE_EXPLICIT;
}; };
struct ggml_backend_sched { struct ggml_backend_sched {
@ -738,30 +745,40 @@ struct ggml_backend_sched {
int debug_prev_graph_size; int debug_prev_graph_size;
}; };
static void ggml_backend_synchronize_if_required(ggml_backend_t current_backend, bool backend_implicitly_synced) {
if (backend_implicitly_synced) { static void ggml_backend_synchronize_if_required(ggml_backend_sched_split * split, ggml_backend_t current_backend, bool is_final_write = 0) {
if (split->backend_sync_mode == GGML_SPLIT_SYNC_MODE_IMPLICIT) {
return;
}
if (split->backend_sync_mode == GGML_SPLIT_SYNC_MODE_WRITE_READ_BOUNDARY && !is_final_write) {
return; return;
} }
ggml_backend_synchronize(current_backend); ggml_backend_synchronize(current_backend);
} }
static bool ggml_backend_implicitly_synced(ggml_backend_t current_backend) { static void ggml_backend_implicitly_synced(ggml_backend_sched_split * split, ggml_backend_t current_backend) {
/* /*
* Some backends have implicit synchronization mechanisms, which allows several parallel asynchronous memory copies without data races. * Some backends have implicit synchronization mechanisms, which allows several parallel asynchronous memory copies without data races.
* An example for that is the CUDA backend with the CUDA stream. * An example for that is the CUDA backend with the CUDA stream.
* For these backends, we can skip costly explicit synchronizations during compute split scheduling. * For these backends, we can skip costly explicit synchronizations during compute split scheduling.
*/ */
if (split->backend_sync_mode != GGML_SPLIT_SYNC_MODE_EXPLICIT) {
// indicates that this function has already changed the default value, no repeat check necessary
return;
}
// To not change any APIs or change what ggml-base links to, we can only detect backends by string matching // To not change any APIs or change what ggml-base links to, we can only detect backends by string matching
auto backend_name = ggml_backend_name(current_backend); auto backend_name = ggml_backend_name(current_backend);
if (strncmp(backend_name, "CUDA", 4) == 0) { if (strncmp(backend_name, "CUDA", 4) == 0) {
return true; split->backend_sync_mode = GGML_SPLIT_SYNC_MODE_IMPLICIT;
return;
} }
// sync other backends to ensure correctness // retain default explicit synchronization on other backends for correctness
return false; return;
} }
#define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor) #define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor)
@ -1480,30 +1497,33 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
struct ggml_backend_sched_split * split = &splits[split_id]; struct ggml_backend_sched_split * split = &splits[split_id];
int split_backend_id = split->backend_id; int split_backend_id = split->backend_id;
ggml_backend_t split_backend = sched->backends[split_backend_id]; ggml_backend_t split_backend = sched->backends[split_backend_id];
// some backends can avoid costly syncs between async copies
bool backend_implicitly_synced = ggml_backend_implicitly_synced(split_backend); // determine if backend can avoid costly syncs between HtoD async copies
ggml_backend_implicitly_synced(split, split_backend);
// copy the input tensors to the split backend // copy the input tensors to the split backend
for (int input_id = 0; input_id < split->n_inputs; input_id++) { for (int input_id = 0; input_id < split->n_inputs; input_id++) {
ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]); ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]);
struct ggml_tensor * input = split->inputs[input_id]; struct ggml_tensor * input = split->inputs[input_id];
struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy); struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy);
bool last_input = (input_id + 1) == split->n_inputs;
if (input->flags & GGML_TENSOR_FLAG_INPUT) { if (input->flags & GGML_TENSOR_FLAG_INPUT) {
// inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
if (sched->events[split_backend_id][sched->cur_copy] != NULL) { if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
} else { } else {
ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced); ggml_backend_synchronize_if_required(split, split_backend);
} }
ggml_backend_tensor_copy_async(input_backend, split_backend, input, input_cpy); ggml_backend_tensor_copy_async(input_backend, split_backend, input, input_cpy);
ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced); ggml_backend_synchronize_if_required(split, split_backend, last_input);
} else { } else {
// wait for the split backend to finish using the input before overwriting it // wait for the split backend to finish using the input before overwriting it
if (sched->events[split_backend_id][sched->cur_copy] != NULL) { if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]); ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
} else { } else {
ggml_backend_synchronize_if_required(split_backend, backend_implicitly_synced); ggml_backend_synchronize_if_required(split, split_backend, last_input);
} }
// when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used