Merge 50344142f4 into 3bc8d2cf23
This commit is contained in:
commit
860b70804f
|
|
@ -1453,6 +1453,10 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
|
||||||
int split_backend_id = split->backend_id;
|
int split_backend_id = split->backend_id;
|
||||||
ggml_backend_t split_backend = sched->backends[split_backend_id];
|
ggml_backend_t split_backend = sched->backends[split_backend_id];
|
||||||
|
|
||||||
|
if (sched->events[split_backend_id][sched->cur_copy] == NULL) {
|
||||||
|
ggml_backend_synchronize(split_backend);
|
||||||
|
}
|
||||||
|
|
||||||
// copy the input tensors to the split backend
|
// copy the input tensors to the split backend
|
||||||
for (int input_id = 0; input_id < split->n_inputs; input_id++) {
|
for (int input_id = 0; input_id < split->n_inputs; input_id++) {
|
||||||
ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]);
|
ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]);
|
||||||
|
|
@ -1463,16 +1467,12 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
|
||||||
// inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
|
// inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
|
||||||
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
||||||
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
||||||
} else {
|
|
||||||
ggml_backend_synchronize(split_backend);
|
|
||||||
}
|
}
|
||||||
ggml_backend_tensor_copy(input, input_cpy);
|
ggml_backend_tensor_copy_async(input_backend, split_backend, input, input_cpy);
|
||||||
} else {
|
} else {
|
||||||
// wait for the split backend to finish using the input before overwriting it
|
// wait for the split backend to finish using the input before overwriting it
|
||||||
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
||||||
ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
|
ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
|
||||||
} else {
|
|
||||||
ggml_backend_synchronize(split_backend);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used
|
// when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used
|
||||||
|
|
@ -1576,6 +1576,10 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sched->events[split_backend_id][sched->cur_copy] == NULL) {
|
||||||
|
ggml_backend_synchronize(split_backend);
|
||||||
|
}
|
||||||
|
|
||||||
if (!sched->callback_eval) {
|
if (!sched->callback_eval) {
|
||||||
enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph);
|
enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph);
|
||||||
if (ec != GGML_STATUS_SUCCESS) {
|
if (ec != GGML_STATUS_SUCCESS) {
|
||||||
|
|
|
||||||
|
|
@ -2794,11 +2794,14 @@ static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_
|
||||||
ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer;
|
ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer;
|
||||||
ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer;
|
ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer;
|
||||||
|
|
||||||
if (!ggml_backend_is_cuda(backend_src) || !ggml_backend_is_cuda(backend_dst)) {
|
//enables async copies from CPU to CUDA, instead of only CUDA-to-CUDA
|
||||||
|
bool copy_from_host = ggml_backend_buffer_is_host(src->buffer) && ggml_backend_dev_type(backend_src->device) == GGML_BACKEND_DEVICE_TYPE_CPU;
|
||||||
|
|
||||||
|
if (!(copy_from_host || ggml_backend_is_cuda(backend_src)) || !ggml_backend_is_cuda(backend_dst)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ggml_backend_buffer_is_cuda(src->buffer) || !ggml_backend_buffer_is_cuda(dst->buffer)) {
|
if (!(copy_from_host || ggml_backend_buffer_is_cuda(src->buffer)) || !ggml_backend_buffer_is_cuda(dst->buffer)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2809,14 +2812,17 @@ static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_
|
||||||
ggml_backend_cuda_buffer_context * buf_ctx_src = (ggml_backend_cuda_buffer_context *)buf_src->context;
|
ggml_backend_cuda_buffer_context * buf_ctx_src = (ggml_backend_cuda_buffer_context *)buf_src->context;
|
||||||
ggml_backend_cuda_buffer_context * buf_ctx_dst = (ggml_backend_cuda_buffer_context *)buf_dst->context;
|
ggml_backend_cuda_buffer_context * buf_ctx_dst = (ggml_backend_cuda_buffer_context *)buf_dst->context;
|
||||||
|
|
||||||
if (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device) {
|
if ((copy_from_host && cuda_ctx_dst->device != buf_ctx_dst->device) ||
|
||||||
|
!copy_from_host && (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device)) {
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
GGML_LOG_DEBUG("%s: backend and buffer devices do not match\n", __func__);
|
GGML_LOG_DEBUG("%s: backend and buffer devices do not match\n", __func__);
|
||||||
#endif
|
#endif
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (backend_src != backend_dst) {
|
if (copy_from_host) {
|
||||||
|
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyHostToDevice, cuda_ctx_dst->stream()));
|
||||||
|
} else if (backend_src != backend_dst) {
|
||||||
// copy on src stream
|
// copy on src stream
|
||||||
if (cuda_ctx_src->device == cuda_ctx_dst->device) {
|
if (cuda_ctx_src->device == cuda_ctx_dst->device) {
|
||||||
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_src->stream()));
|
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_src->stream()));
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue