common : add parser for ministral/mistral large 3/devstral 2 (#17713)

This commit is contained in:
Aldehir Rojas 2025-12-09 17:31:04 -06:00 committed by GitHub
parent 63391852b0
commit 2fbe3b7bb7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 415 additions and 0 deletions

View File

@ -1,5 +1,6 @@
#include "chat.h" #include "chat.h"
#include "chat-parser.h" #include "chat-parser.h"
#include "chat-peg-parser.h"
#include "common.h" #include "common.h"
#include "json-partial.h" #include "json-partial.h"
#include "json-schema-to-grammar.h" #include "json-schema-to-grammar.h"
@ -150,6 +151,7 @@ struct templates_params {
common_chat_tool_choice tool_choice; common_chat_tool_choice tool_choice;
json json_schema; json json_schema;
bool parallel_tool_calls; bool parallel_tool_calls;
common_reasoning_format reasoning_format;
bool stream; bool stream;
std::string grammar; std::string grammar;
bool add_generation_prompt = true; bool add_generation_prompt = true;
@ -589,6 +591,16 @@ common_chat_templates_ptr common_chat_templates_init(
"{%- if false %}"); "{%- if false %}");
} }
// TODO @aldehir : this is a temporary fix, pending Minja changes
// Ref: https://github.com/ggml-org/llama.cpp/pull/17713#issuecomment-3631342664
if (default_template_src.find("[TOOL_CALLS]") != std::string::npos
// search for the error message and patch it
&& default_template_src.find("if (message['content'] is none or") != std::string::npos) {
string_replace_all(default_template_src,
"{%- if (message['content'] is none or message['content'] == '' or message['content']|length == 0) and (message['tool_calls'] is not defined or message['tool_calls'] is none or message['tool_calls']|length == 0) %}",
"{%- if false %}");
}
std::string token_bos = bos_token_override; std::string token_bos = bos_token_override;
std::string token_eos = eos_token_override; std::string token_eos = eos_token_override;
bool add_bos = false; bool add_bos = false;
@ -987,6 +999,118 @@ static common_chat_params common_chat_params_init_lfm2(const common_chat_templat
return data; return data;
} }
static common_chat_params common_chat_params_init_ministral_3(const common_chat_template & tmpl, const struct templates_params & inputs) {
common_chat_params data;
// Build up messages to follow the format: https://huggingface.co/mistralai/Ministral-3-14B-Reasoning-2512/blob/main/chat_template.jinja
auto adjusted_messages = json::array();
for (const auto & msg : inputs.messages) {
auto role = msg.value("role", "");
if (role != "system" && role != "assistant") {
// Only adjust system and assistant messages. Interestingly, the system message may contain thinking.
adjusted_messages.push_back(msg);
continue;
}
auto content = json::array();
// If message contains `reasoning_content`, add it as a block of type `thinking`
if (msg.contains("reasoning_content") && msg.at("reasoning_content").is_string()) {
content.push_back({
{"type", "thinking"},
{"thinking", msg.at("reasoning_content").get<std::string>()},
});
}
// If message contains `content`, add it as a block of type `text`
if (msg.contains("content")) {
if (msg.at("content").is_string()) {
content.push_back({
{"type", "text"},
{"text", msg.at("content").get<std::string>()},
});
} else if (msg.at("content").is_array()) {
auto blocks = msg.at("content");
content.insert(content.end(), blocks.begin(), blocks.end());
}
}
auto adjusted = msg;
adjusted["content"] = content;
adjusted.erase("reasoning_content");
adjusted_messages.push_back(adjusted);
}
auto has_tools = inputs.tools.is_array() && !inputs.tools.empty();
auto extract_reasoning = inputs.reasoning_format != COMMON_REASONING_FORMAT_NONE;
auto include_grammar = true;
data.prompt = apply(tmpl, inputs, /* messages_override = */ adjusted_messages);
data.format = COMMON_CHAT_FORMAT_PEG_NATIVE;
data.preserved_tokens = {
"[THINK]",
"[/THINK]",
"[TOOL_CALLS]",
"[ARGS]",
};
auto parser = build_chat_peg_native_parser([&](common_chat_peg_native_builder & p) {
auto reasoning = extract_reasoning ? p.optional("[THINK]" + p.reasoning(p.until("[/THINK]")) + "[/THINK]") : p.eps();
// Response format parser
if (inputs.json_schema.is_object() && !inputs.json_schema.empty()) {
// Ministral wants to emit json surrounded by code fences
return reasoning << "```json" << p.content(p.schema(p.json(), "response-format", inputs.json_schema)) << "```";
}
// Tool call parser
if (has_tools && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) {
auto tool_choice = p.choice();
foreach_function(inputs.tools, [&](const json & tool) {
const auto & function = tool.at("function");
std::string name = function.at("name");
const auto & schema = function.at("parameters");
tool_choice |= p.rule("tool-" + name,
p.tool_open(p.tool_name(p.literal(name)) + "[ARGS]")
+ p.tool_args(p.schema(p.json(), "tool-" + name + "-schema", schema))
);
});
auto min_calls = inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_REQUIRED ? 1 : 0;
auto max_calls = inputs.parallel_tool_calls ? -1 : 1;
auto tool_calls = p.trigger_rule("tool-call", p.repeat("[TOOL_CALLS]" + tool_choice, min_calls, max_calls));
return reasoning << p.content(p.until("[TOOL_CALLS]")) << tool_calls;
}
// Content only parser
include_grammar = false;
return reasoning << p.content(p.rest());
});
data.parser = parser.save();
if (include_grammar) {
data.grammar_lazy = has_tools && inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_AUTO;
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
foreach_function(inputs.tools, [&](const json & tool) {
const auto & function = tool.at("function");
auto schema = function.at("parameters");
builder.resolve_refs(schema);
});
parser.build_grammar(builder, data.grammar_lazy);
});
data.grammar_triggers = {
{COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"}
};
}
return data;
}
static common_chat_params common_chat_params_init_magistral(const common_chat_template & tmpl, const struct templates_params & inputs) { static common_chat_params common_chat_params_init_magistral(const common_chat_template & tmpl, const struct templates_params & inputs) {
common_chat_params data; common_chat_params data;
data.prompt = apply(tmpl, inputs); data.prompt = apply(tmpl, inputs);
@ -2341,6 +2465,7 @@ static common_chat_params common_chat_templates_apply_jinja(
params.messages = common_chat_msgs_to_json_oaicompat<json>(inputs.messages, /* concat_text= */ !tmpl.original_caps().requires_typed_content); params.messages = common_chat_msgs_to_json_oaicompat<json>(inputs.messages, /* concat_text= */ !tmpl.original_caps().requires_typed_content);
params.add_generation_prompt = inputs.add_generation_prompt; params.add_generation_prompt = inputs.add_generation_prompt;
params.tool_choice = inputs.tool_choice; params.tool_choice = inputs.tool_choice;
params.reasoning_format = inputs.reasoning_format;
params.enable_thinking = inputs.enable_thinking; params.enable_thinking = inputs.enable_thinking;
params.grammar = inputs.grammar; params.grammar = inputs.grammar;
params.now = inputs.now; params.now = inputs.now;
@ -2504,6 +2629,13 @@ static common_chat_params common_chat_templates_apply_jinja(
return common_chat_params_init_llama_3_x(tmpl, params, allow_python_tag_builtin_tools); return common_chat_params_init_llama_3_x(tmpl, params, allow_python_tag_builtin_tools);
} }
// Ministral/Mistral Large 3
if (src.find("[SYSTEM_PROMPT]") != std::string::npos &&
src.find("[TOOL_CALLS]") != std::string::npos &&
src.find("[ARGS]") != std::string::npos) {
return common_chat_params_init_ministral_3(tmpl, params);
}
if (src.find("[THINK]") != std::string::npos && src.find("[/THINK]") != std::string::npos) { if (src.find("[THINK]") != std::string::npos && src.find("[/THINK]") != std::string::npos) {
return common_chat_params_init_magistral(tmpl, params); return common_chat_params_init_magistral(tmpl, params);
} }

View File

@ -0,0 +1,126 @@
{#- Default system message if no system prompt is passed. #}
{%- set default_system_message = '# HOW YOU SHOULD THINK AND ANSWER\n\nFirst draft your thinking process (inner monologue) until you arrive at a response. Format your response using Markdown, and use LaTeX for any mathematical equations. Write both your thoughts and the response in the same language as the input.\n\nYour thinking process must follow the template below:[THINK]Your thoughts or/and draft, like working through an exercise on scratch paper. Be as casual and as long as you want until you are confident to generate the response to the user.[/THINK]Here, provide a self-contained response.' %}
{#- Begin of sequence token. #}
{{- bos_token }}
{#- Handle system prompt if it exists. #}
{#- System prompt supports text content or text and thinking chunks. #}
{%- if messages[0]['role'] == 'system' %}
{{- '[SYSTEM_PROMPT]' -}}
{%- if messages[0]['content'] is string %}
{{- messages[0]['content'] -}}
{%- else %}
{%- for block in messages[0]['content'] %}
{%- if block['type'] == 'text' %}
{{- block['text'] }}
{%- elif block['type'] == 'thinking' %}
{{- '[THINK]' + block['thinking'] + '[/THINK]' }}
{%- else %}
{{- raise_exception('Only text and thinking chunks are supported in system message contents.') }}
{%- endif %}
{%- endfor %}
{%- endif %}
{{- '[/SYSTEM_PROMPT]' -}}
{%- set loop_messages = messages[1:] %}
{%- else %}
{%- set loop_messages = messages %}
{%- if default_system_message != '' %}
{{- '[SYSTEM_PROMPT]' + default_system_message + '[/SYSTEM_PROMPT]' }}
{%- endif %}
{%- endif %}
{#- Tools definition #}
{%- set tools_definition = '' %}
{%- set has_tools = false %}
{%- if tools is defined and tools is not none and tools|length > 0 %}
{%- set has_tools = true %}
{%- set tools_definition = '[AVAILABLE_TOOLS]' + (tools| tojson) + '[/AVAILABLE_TOOLS]' %}
{{- tools_definition }}
{%- endif %}
{#- Checks for alternating user/assistant messages. #}
{%- set ns = namespace(index=0) %}
{%- for message in loop_messages %}
{%- if message.role == 'user' or (message.role == 'assistant' and (message.tool_calls is not defined or message.tool_calls is none or message.tool_calls | length == 0)) %}
{%- if (message['role'] == 'user') != (ns.index % 2 == 0) %}
{{- raise_exception('After the optional system message, conversation roles must alternate user and assistant roles except for tool calls and results.') }}
{%- endif %}
{%- set ns.index = ns.index + 1 %}
{%- endif %}
{%- endfor %}
{#- Handle conversation messages. #}
{%- for message in loop_messages %}
{#- User messages supports text content or text and image chunks. #}
{%- if message['role'] == 'user' %}
{%- if message['content'] is string %}
{{- '[INST]' + message['content'] + '[/INST]' }}
{%- elif message['content'] | length > 0 %}
{{- '[INST]' }}
{%- if message['content'] | length == 2 %}
{%- set blocks = message['content'] | sort(attribute='type') %}
{%- else %}
{%- set blocks = message['content'] %}
{%- endif %}
{%- for block in blocks %}
{%- if block['type'] == 'text' %}
{{- block['text'] }}
{%- elif block['type'] in ['image', 'image_url'] %}
{{- '[IMG]' }}
{%- else %}
{{- raise_exception('Only text, image and image_url chunks are supported in user message content.') }}
{%- endif %}
{%- endfor %}
{{- '[/INST]' }}
{%- else %}
{{- raise_exception('User message must have a string or a list of chunks in content') }}
{%- endif %}
{#- Assistant messages supports text content or text, image and thinking chunks. #}
{%- elif message['role'] == 'assistant' %}
{%- if (message['content'] is none or message['content'] == '' or message['content']|length == 0) and (message['tool_calls'] is not defined or message['tool_calls'] is none or message['tool_calls']|length == 0) %}
{{- raise_exception('Assistant message must have a string or a list of chunks in content or a list of tool calls.') }}
{%- endif %}
{%- if message['content'] is string and message['content'] != '' %}
{{- message['content'] }}
{%- elif message['content'] | length > 0 %}
{%- for block in message['content'] %}
{%- if block['type'] == 'text' %}
{{- block['text'] }}
{%- elif block['type'] == 'thinking' %}
{{- '[THINK]' + block['thinking'] + '[/THINK]' }}
{%- else %}
{{- raise_exception('Only text and thinking chunks are supported in assistant message contents.') }}
{%- endif %}
{%- endfor %}
{%- endif %}
{%- if message['tool_calls'] is defined and message['tool_calls'] is not none and message['tool_calls']|length > 0 %}
{%- for tool in message['tool_calls'] %}
{{- '[TOOL_CALLS]' }}
{%- set name = tool['function']['name'] %}
{%- set arguments = tool['function']['arguments'] %}
{%- if arguments is not string %}
{%- set arguments = arguments|tojson|safe %}
{%- elif arguments == '' %}
{%- set arguments = '{}' %}
{%- endif %}
{{- name + '[ARGS]' + arguments }}
{%- endfor %}
{%- endif %}
{{- eos_token }}
{#- Tool messages only supports text content. #}
{%- elif message['role'] == 'tool' %}
{{- '[TOOL_RESULTS]' + message['content']|string + '[/TOOL_RESULTS]' }}
{#- Raise exception for unsupported roles. #}
{%- else %}
{{- raise_exception('Only user, assistant and tool roles are supported, got ' + message['role'] + '.') }}
{%- endif %}
{%- endfor %}

View File

@ -539,6 +539,71 @@ const common_chat_msg message_assist_call_python_lines = simple_assist
const common_chat_msg message_assist_call_python_lines_unclosed = simple_assist_msg("", "", "python", "{\"code\":\"# This is a program:\\nprint('hey')"); const common_chat_msg message_assist_call_python_lines_unclosed = simple_assist_msg("", "", "python", "{\"code\":\"# This is a program:\\nprint('hey')");
const common_chat_msg message_assist_call_code_interpreter = simple_assist_msg("", "", "code_interpreter", "{\"code\":\"print('hey')\"}"); const common_chat_msg message_assist_call_code_interpreter = simple_assist_msg("", "", "code_interpreter", "{\"code\":\"print('hey')\"}");
// Use for PEG parser implementations
struct peg_test_case {
common_chat_templates_inputs params;
std::string input;
common_chat_msg expect;
};
struct make_peg_parser {
common_chat_params params_;
common_peg_arena arena_;
make_peg_parser(common_chat_templates * tmpls, const common_chat_templates_inputs & inputs) {
params_ = common_chat_templates_apply(tmpls, inputs);
arena_.load(params_.parser);
}
common_chat_msg parse(const std::string & msg, bool is_partial) {
return common_chat_peg_parse(arena_, msg, is_partial, /* syntax = */ {params_.format});
}
};
static void test_peg_parser(common_chat_templates * tmpls, const std::function<void(peg_test_case &)> & init) {
peg_test_case tc;
init(tc);
if (tc.params.messages.empty()) {
tc.params.messages = {message_user};
}
if (tc.expect.role.empty()) {
tc.expect.role = "assistant";
}
auto parser = make_peg_parser(tmpls, tc.params);
common_chat_msg msg_accum;
common_chat_msg msg_prev;
msg_accum.role = msg_prev.role = "assistant";
for (size_t i = 1; i <= tc.input.size(); ++i) {
auto is_partial = i < tc.input.size();
common_chat_msg msg_current = parser.parse(tc.input.substr(0, i), is_partial);
for (const auto & diff : common_chat_msg_diff::compute_diffs(msg_prev, msg_current)) {
if (!diff.reasoning_content_delta.empty()) {
msg_accum.reasoning_content += diff.reasoning_content_delta;
}
if (!diff.content_delta.empty()) {
msg_accum.content += diff.content_delta;
}
if (diff.tool_call_index != std::string::npos) {
if (!diff.tool_call_delta.name.empty()) {
msg_accum.tool_calls.push_back({diff.tool_call_delta.name, "", ""});
}
if (!diff.tool_call_delta.arguments.empty()) {
msg_accum.tool_calls.back().arguments += diff.tool_call_delta.arguments;
}
}
}
assert_msg_equals(msg_current, msg_accum, true);
msg_prev = msg_current;
}
assert_msg_equals(tc.expect, parser.parse(tc.input, false), true);
assert_msg_equals(tc.expect, msg_accum, true);
}
static void test_msgs_oaicompat_json_conversion() { static void test_msgs_oaicompat_json_conversion() {
printf("[%s]\n", __func__); printf("[%s]\n", __func__);
std::vector<common_chat_msg> msgs{ std::vector<common_chat_msg> msgs{
@ -3434,7 +3499,95 @@ Hey there!<|im_end|>
auto grammar = build_grammar(params.grammar); auto grammar = build_grammar(params.grammar);
GGML_ASSERT(grammar && "Failed to build Qwen3-Coder grammar with union types"); GGML_ASSERT(grammar && "Failed to build Qwen3-Coder grammar with union types");
} }
}
static void test_template_output_peg_parsers() {
printf("[%s]\n", __func__);
// JSON schemas
const char * invoice_schema = R"({
"type": "object",
"properties": {
"amount": {"type": "number"},
"date": {"type": "string"}
}
})";
{
// Ministral-3-14B-Reasoning-2512
auto tmpls = read_templates("models/templates/mistralai-Ministral-3-14B-Reasoning-2512.jinja");
// Test basic message
test_peg_parser(tmpls.get(), [&](auto & t) {
t.input = "Hello, world!\nWhat's up?";
t.expect = message_assist;
});
// Test basic message and reasoning with reasoning_format = none
test_peg_parser(tmpls.get(), [&](auto & t) {
t.input = "[THINK]I'm\nthinking[/THINK]Hello, world!\nWhat's up?";
t.expect.content = "[THINK]I'm\nthinking[/THINK]Hello, world!\nWhat's up?";
});
// Test basic message and reasoning with reasoning_format = auto
test_peg_parser(tmpls.get(), [&](auto & t) {
t.input = "[THINK]I'm\nthinking[/THINK]Hello, world!\nWhat's up?";
t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO;
t.expect = message_assist_thoughts;
});
// Test tool call
test_peg_parser(tmpls.get(), [&](auto & t) {
t.input = R"([TOOL_CALLS]special_function[ARGS]{"arg1":1})";
t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO;
t.params.tools = {special_function_tool};
t.expect = message_assist_call;
});
// Test tool call with reasoning
test_peg_parser(tmpls.get(), [&](auto & t) {
t.input = "[THINK]I'm\nthinking[/THINK]"
R"([TOOL_CALLS]special_function[ARGS]{"arg1":1})";
t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO;
t.params.tools = {special_function_tool};
t.expect = message_assist_call_thoughts;
});
// Test parallel tool calls
test_peg_parser(tmpls.get(), [&](auto & t) {
t.input = R"([TOOL_CALLS]special_function[ARGS]{"arg1": 1})"
R"([TOOL_CALLS]special_function_with_opt[ARGS]{"arg1": 1, "arg2": 2})";
t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO;
t.params.parallel_tool_calls = true;
t.params.tools = {special_function_tool, special_function_tool_with_optional_param};
t.expect.tool_calls = {{
/* .name = */ "special_function",
/* .arguments = */ R"({"arg1": 1})",
/* .id = */ {},
}, {
/* .name = */ "special_function_with_opt",
/* .arguments = */ R"({"arg1": 1, "arg2": 2})",
/* .id = */ {},
}};
});
// Test response format
test_peg_parser(tmpls.get(), [&](auto & t) {
t.input = "[THINK]I need to output the invoice details in JSON[/THINK]"
"```json\n"
R"({"amount": 123.45, "date": "2025-12-03"})"
"\n```";
t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO;
t.params.json_schema = invoice_schema;
t.expect.reasoning_content = "I need to output the invoice details in JSON";
t.expect.content =R"({"amount": 123.45, "date": "2025-12-03"})";
});
}
} }
static void test_msg_diffs_compute() { static void test_msg_diffs_compute() {
@ -3560,6 +3713,7 @@ int main(int argc, char ** argv) {
test_msgs_oaicompat_json_conversion(); test_msgs_oaicompat_json_conversion();
test_tools_oaicompat_json_conversion(); test_tools_oaicompat_json_conversion();
test_template_output_parsers(); test_template_output_parsers();
test_template_output_peg_parsers();
std::cout << "\n[chat] All tests passed!" << '\n'; std::cout << "\n[chat] All tests passed!" << '\n';
} }
return 0; return 0;

View File

@ -972,6 +972,9 @@ json oaicompat_chat_params_parse(
inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false);
inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true); inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true);
inputs.reasoning_format = opt.reasoning_format; inputs.reasoning_format = opt.reasoning_format;
if (body.contains("reasoning_format")) {
inputs.reasoning_format = common_reasoning_format_from_name(body.at("reasoning_format").get<std::string>());
}
inputs.enable_thinking = opt.enable_thinking; inputs.enable_thinking = opt.enable_thinking;
if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) { if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) {
if (body.contains("grammar")) { if (body.contains("grammar")) {