From 80b32bdadbfbe7f35e10dfb4abc214638882376e Mon Sep 17 00:00:00 2001 From: aendk Date: Fri, 9 Jan 2026 17:07:19 +0100 Subject: [PATCH] Minor cleanup --- ggml/src/ggml-backend.cpp | 6 ------ ggml/src/ggml-cuda/ggml-cuda.cu | 6 ++---- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index cf5548964f..c768337db0 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -752,12 +752,6 @@ static bool ggml_backend_implicitly_synced(ggml_backend_t current_backend) { * For these backends, we can skip costly explicit synchronizations during compute split scheduling. */ - static bool disable_scheduler_sync_opt = (getenv("GGML_SCHED_DISABLE_SYNC_OPT") != nullptr); - - if (disable_scheduler_sync_opt) { - return false; - } - // To not change any APIs or change what ggml-base links to, we can only detect backends by string matching auto backend_name = ggml_backend_name(current_backend); if (strncmp(backend_name, "CUDA", 4) == 0) { diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 81c26d9fa8..7445099043 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2811,7 +2811,8 @@ static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_ ggml_backend_cuda_buffer_context * buf_ctx_src = (ggml_backend_cuda_buffer_context *)buf_src->context; ggml_backend_cuda_buffer_context * buf_ctx_dst = (ggml_backend_cuda_buffer_context *)buf_dst->context; - if (!copy_from_host && (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device)) { + if ((copy_from_host && cuda_ctx_dst->device != buf_ctx_dst->device) || + !copy_from_host && (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device)) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: backend and buffer devices do not match\n", __func__); #endif @@ -2819,9 +2820,6 @@ static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_ } if (copy_from_host) { - if (!cuda_ctx_dst->stream()) { - return false; - } CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyHostToDevice, cuda_ctx_dst->stream())); } else if (backend_src != backend_dst) { // copy on src stream