From f4884293809b5227d7307140a942f1bc4176a603 Mon Sep 17 00:00:00 2001 From: Christian Schmitz Date: Thu, 12 Feb 2026 15:52:57 +0100 Subject: [PATCH] llama : update outdated comment in llama.h (#19428) * Updated documentation Model is no longer a parameter * llama : fix trailing whitespace in comment --------- Co-authored-by: Daniel Bevenius --- include/llama.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/llama.h b/include/llama.h index 46c3672e98..305623127c 100644 --- a/include/llama.h +++ b/include/llama.h @@ -1150,9 +1150,9 @@ extern "C" { // /// Apply chat template. Inspired by hf apply_chat_template() on python. - /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model" + /// /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggml-org/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template - /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead. + /// @param tmpl A Jinja template to use for this chat. /// @param chat Pointer to a list of multiple llama_chat_message /// @param n_msg Number of llama_chat_message in this chat /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.