From 81086cd6a3ca1252f0dc0f938171648399179c53 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 10 Oct 2025 17:17:31 +0300 Subject: [PATCH] vocab : mark EOT token for Granite models (#16499) * vocab : mark EOT token for Granite models * sampling : fallback to EOS when EOT is not found --- src/llama-sampling.cpp | 5 +++++ src/llama-vocab.cpp | 1 + 2 files changed, 6 insertions(+) diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 2186f827bf..55d2e355fd 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -2541,8 +2541,13 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_ if (n_non_eog == 0) { cur_p->size = 1; cur_p->data[0].id = ctx->vocab->token_eot(); + if (cur_p->data[0].id == LLAMA_TOKEN_NULL) { + cur_p->data[0].id = ctx->vocab->token_eos(); + } cur_p->data[0].logit = 1.0f; + GGML_ASSERT(cur_p->data[0].id != LLAMA_TOKEN_NULL); + return; } diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index f965752a84..7fffd17149 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -2171,6 +2171,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { || t.first == "<|end|>" || t.first == "" || t.first == "<|endoftext|>" + || t.first == "<|end_of_text|>" // granite || t.first == "" || t.first == "_" || t.first == "<|end▁of▁sentence|>" // DeepSeek