From 58faba86a9f1b6c81ca3666bf09015f9c678fb89 Mon Sep 17 00:00:00 2001 From: Aristeidis Stathopoulos Date: Sat, 7 Mar 2026 12:07:47 +0200 Subject: [PATCH] test: address review feedback for test-pre-alloc-callback - Add missing llama_backend_free() on model load failure path - Only print diagnostics on failure, not on success - Pick target backend by finding one different from current instead of assuming backend ordering Co-Authored-By: Claude Opus 4.6 --- tests/test-pre-alloc-callback.cpp | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tests/test-pre-alloc-callback.cpp b/tests/test-pre-alloc-callback.cpp index 97b215cef7..1b19e91860 100644 --- a/tests/test-pre-alloc-callback.cpp +++ b/tests/test-pre-alloc-callback.cpp @@ -13,14 +13,24 @@ static void pre_alloc_cb(ggml_backend_sched_t sched, struct ggml_cgraph * gf, vo auto * state = static_cast(user_data); state->called = true; - // reassign the first node to the last backend (CPU) and verify + // reassign the first node to a different backend and verify int n_backends = ggml_backend_sched_get_n_backends(sched); if (n_backends < 1 || ggml_graph_n_nodes(gf) <= 0) { return; } - ggml_backend_t target = ggml_backend_sched_get_backend(sched, n_backends - 1); struct ggml_tensor * node = ggml_graph_node(gf, 0); + ggml_backend_t current = ggml_backend_sched_get_tensor_backend(sched, node); + ggml_backend_t target = current; + + for (int i = 0; i < n_backends; i++) { + ggml_backend_t candidate = ggml_backend_sched_get_backend(sched, i); + if (candidate != current) { + target = candidate; + break; + } + } + ggml_backend_sched_set_tensor_backend(sched, node, target); state->reassign_ok = (ggml_backend_sched_get_tensor_backend(sched, node) == target); } @@ -32,6 +42,7 @@ int main(int argc, char ** argv) { auto * model = llama_model_load_from_file(model_path, llama_model_default_params()); if (!model) { fprintf(stderr, "FAIL: could not load model\n"); + llama_backend_free(); return 1; } @@ -60,10 +71,12 @@ int main(int argc, char ** argv) { return 1; } - fprintf(stderr, "called=%d reassign_ok=%d\n", state.called, state.reassign_ok); - int ret = (state.called && state.reassign_ok) ? 0 : 1; + if (ret != 0) { + fprintf(stderr, "FAIL: called=%d reassign_ok=%d\n", state.called, state.reassign_ok); + } + llama_free(ctx); llama_model_free(model); llama_backend_free();