Merge 08e16816b4 into f49e917876
This commit is contained in:
commit
9cc73489aa
|
|
@ -1430,6 +1430,35 @@ json convert_responses_to_chatcmpl(const json & response_body) {
|
|||
chatcmpl_body["max_tokens"] = response_body["max_output_tokens"];
|
||||
}
|
||||
|
||||
// Convert Responses API text.format to Chat Completions response_format
|
||||
if (response_body.contains("text") && response_body.at("text").contains("format")) {
|
||||
const json & text_format = response_body.at("text").at("format");
|
||||
std::string format_type = json_value(text_format, "type", std::string());
|
||||
|
||||
if (format_type == "json_schema") {
|
||||
json json_schema_wrapper = {
|
||||
{"name", json_value(text_format, "name", std::string("default"))},
|
||||
{"schema", json_value(text_format, "schema", json::object())},
|
||||
};
|
||||
if (text_format.contains("strict")) {
|
||||
json_schema_wrapper["strict"] = text_format.at("strict");
|
||||
}
|
||||
if (text_format.contains("description")) {
|
||||
json_schema_wrapper["description"] = text_format.at("description");
|
||||
}
|
||||
chatcmpl_body["response_format"] = {
|
||||
{"type", "json_schema"},
|
||||
{"json_schema", json_schema_wrapper},
|
||||
};
|
||||
} else if (format_type == "json_object") {
|
||||
chatcmpl_body["response_format"] = {
|
||||
{"type", "json_object"},
|
||||
};
|
||||
}
|
||||
|
||||
chatcmpl_body.erase("text");
|
||||
}
|
||||
|
||||
return chatcmpl_body;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -71,3 +71,25 @@ def test_responses_stream_with_openai_library():
|
|||
assert r.response.output[0].id.startswith("msg_")
|
||||
assert gathered_text == r.response.output_text
|
||||
assert match_regex("(Suddenly)+", r.response.output_text)
|
||||
|
||||
@pytest.mark.parametrize("text_format,n_predicted,re_content", [
|
||||
({"type": "json_schema", "name": "test_schema", "schema": {"const": "foooooo"}}, 10, "\"foooooo\""),
|
||||
({"type": "json_object"}, 10, "(\\{|John)+"),
|
||||
])
|
||||
def test_responses_text_format(text_format: dict, n_predicted: int, re_content: str | None):
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/responses", data={
|
||||
"model": "gpt-4.1",
|
||||
"max_output_tokens": n_predicted,
|
||||
"input": [
|
||||
{"role": "system", "content": "You are a coding assistant."},
|
||||
{"role": "user", "content": "Write an example"},
|
||||
],
|
||||
"text": {"format": text_format},
|
||||
})
|
||||
|
||||
|
||||
assert res.status_code == 200
|
||||
output_text = res.body["output"][0]["content"][0]["text"]
|
||||
assert match_regex(re_content, output_text)
|
||||
|
|
|
|||
Loading…
Reference in New Issue