diff --git a/tools/cli/cli.cpp b/tools/cli/cli.cpp index ad421e6326..e57bf52e36 100644 --- a/tools/cli/cli.cpp +++ b/tools/cli/cli.cpp @@ -380,6 +380,15 @@ int main(int argc, char ** argv) { console::error("file does not exist or cannot be opened: '%s'\n", fname.c_str()); continue; } + if (inf.fim_sep_token != LLAMA_TOKEN_NULL) { + cur_msg += common_token_to_piece(ctx_cli.ctx_server.get_llama_context(), inf.fim_sep_token, true); + cur_msg += fname; + cur_msg.push_back('\n'); + } else { + cur_msg += "--- File: "; + cur_msg += fname; + cur_msg += " ---\n"; + } cur_msg += marker; console::log("Loaded text from '%s'\n", fname.c_str()); continue; diff --git a/tools/server/server-context.cpp b/tools/server/server-context.cpp index 8aab0d4c1b..0f2f3a45aa 100644 --- a/tools/server/server-context.cpp +++ b/tools/server/server-context.cpp @@ -2911,6 +2911,9 @@ server_context_meta server_context::get_meta() const { /* fim_pre_token */ llama_vocab_fim_pre(impl->vocab), /* fim_sub_token */ llama_vocab_fim_suf(impl->vocab), /* fim_mid_token */ llama_vocab_fim_mid(impl->vocab), + /* fim_pad_token */ llama_vocab_fim_pad(impl->vocab), + /* fim_rep_token */ llama_vocab_fim_rep(impl->vocab), + /* fim_sep_token */ llama_vocab_fim_sep(impl->vocab), /* model_vocab_type */ llama_vocab_type(impl->vocab), /* model_vocab_n_tokens */ llama_vocab_n_tokens(impl->vocab), diff --git a/tools/server/server-context.h b/tools/server/server-context.h index c0b5d373ff..03c29f513b 100644 --- a/tools/server/server-context.h +++ b/tools/server/server-context.h @@ -30,6 +30,9 @@ struct server_context_meta { llama_token fim_pre_token; llama_token fim_sub_token; llama_token fim_mid_token; + llama_token fim_pad_token; + llama_token fim_rep_token; + llama_token fim_sep_token; // model meta enum llama_vocab_type model_vocab_type;