From 25f33806d35032a3ebc5eddfc3eeae6538e6c9c3 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Mon, 24 Nov 2025 15:03:41 +0100 Subject: [PATCH] sampling : add debug log when backend sampler selects token This commit adds a debug log statement in the llama_sampler_sample to indicate when a backend sampler has selected a token for a given index. The modification helps in tracing the sampling process and understanding the flow of control when backend samplers are used. --- src/llama-sampling.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 621438a9cf..5c3214f029 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -446,6 +446,7 @@ llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_conte // If a backend sampler has already sampled a token, return it. if (sampled_token != LLAMA_TOKEN_NULL) { + LLAMA_LOG_DEBUG("%s: Backend sampler selected token for idx %d. Skipping CPU samplers\n", __func__, idx); return sampled_token; }