diff --git a/tools/cli/cli.cpp b/tools/cli/cli.cpp index 93fc8272ef..4039bc75a9 100644 --- a/tools/cli/cli.cpp +++ b/tools/cli/cli.cpp @@ -131,7 +131,7 @@ struct cli_context { } append_file_out( - "[Prompt]: " + messages.back()["content"].get() + "\n\n", + "[Prompt]: " + messages.back()["content"].get() + "\n\n", chat_params.prompt ); @@ -405,13 +405,13 @@ int main(int argc, char ** argv) { // open output file early to fail fast if (!params.out_file.empty()) { ctx_cli.file_out.emplace(params.out_file, std::ios::binary); - + if (!ctx_cli.file_out.has_value() || !ctx_cli.file_out->is_open()) { console::error("Failed to open output file '%s'\n", params.out_file.c_str()); return 1; } } - + console::set_display(DISPLAY_TYPE_RESET); console::set_completion_callback(auto_completion_callback); diff --git a/tools/server/server-task.h b/tools/server/server-task.h index 0f95b0b274..ead45dff61 100644 --- a/tools/server/server-task.h +++ b/tools/server/server-task.h @@ -52,7 +52,7 @@ struct task_params { bool return_tokens = false; bool return_progress = false; bool special_characters = false; // whether to include special tokens in the output (e.g. , , , etc.) - + int32_t n_keep = 0; // number of tokens to keep from initial prompt int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half int32_t n_predict = -1; // new tokens to predict