From 883a87043a5987ac46a6519d23d754b93b6535af Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 24 Nov 2025 21:46:57 +0200 Subject: [PATCH] samplers : add missing cont --- src/llama-backend-sampler.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/llama-backend-sampler.cpp b/src/llama-backend-sampler.cpp index cd6b8bb752..361e48ed68 100644 --- a/src/llama-backend-sampler.cpp +++ b/src/llama-backend-sampler.cpp @@ -149,7 +149,8 @@ static void llama_sampler_backend_top_k_apply_ggml( fprintf(stderr, "CPU backend will be used instead which defeats the purpose of having backend samplers\n"); } - ggml_data->candidates = top_k; + // TODO: temporary cont until https://github.com/ggml-org/llama.cpp/pull/17365 is merged + ggml_data->candidates = ggml_cont(ctx, top_k); struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, ggml_data->logits, 1, ggml_data->logits->ne[0]); struct ggml_tensor * top_k_rows = ggml_get_rows(ctx, logits_rows, top_k);