server: fix reasoning item content format handling for multi-turn
Accept all valid reasoning item content formats in multi-turn input:
- Array of objects: [{"type":"reasoning_text","text":"..."}] (spec format)
- Plain string: "thinking about it" (OpenCode format)
- Null: content:null with encrypted_content (Codex, openai/codex#11834)
- Omitted entirely: no content field present
Previously threw "item['content'] is not an array" for non-array formats,
breaking OpenCode multi-turn conversations. The encrypted_content field
is accepted but ignored for local models (no server-side decryption).
Add 4 tests covering each format variant.
Refs: openai/codex#11834, anomalyco/opencode#19081
This commit is contained in:
parent
428b68a2b6
commit
adef64cb9f
|
|
@ -1387,24 +1387,30 @@ json convert_responses_to_chatcmpl(const json & response_body) {
|
|||
item.at("type") == "reasoning") {
|
||||
// #responses_create-input-input_item_list-item-reasoning
|
||||
|
||||
if (!exists_and_is_array(item, "content")) {
|
||||
throw std::invalid_argument("item['content'] is not an array");
|
||||
}
|
||||
if (item.at("content").empty()) {
|
||||
throw std::invalid_argument("item['content'] is empty");
|
||||
}
|
||||
if (!exists_and_is_string(item.at("content")[0], "text")) {
|
||||
throw std::invalid_argument("item['content']['text'] is not a string");
|
||||
// content can be: null, omitted, a string, or array of {type, text} objects.
|
||||
// Codex may send content:null or omit it entirely (issue openai/codex#11834).
|
||||
// OpenCode may send content as a plain string.
|
||||
// The spec uses array format: [{"type":"reasoning_text","text":"..."}].
|
||||
// encrypted_content (opaque string) is accepted but ignored for local models.
|
||||
std::string reasoning_text;
|
||||
if (!item.contains("content") || item.at("content").is_null()) {
|
||||
// null or missing content — skip (encrypted_content only, or empty reasoning)
|
||||
} else if (item.at("content").is_string()) {
|
||||
reasoning_text = item.at("content").get<std::string>();
|
||||
} else if (item.at("content").is_array() && !item.at("content").empty()
|
||||
&& exists_and_is_string(item.at("content")[0], "text")) {
|
||||
reasoning_text = item.at("content")[0].at("text").get<std::string>();
|
||||
}
|
||||
// else: empty array or unrecognized format — treat as empty reasoning
|
||||
|
||||
if (merge_prev) {
|
||||
auto & prev_msg = chatcmpl_messages.back();
|
||||
prev_msg["reasoning_content"] = item.at("content")[0].at("text");
|
||||
prev_msg["reasoning_content"] = reasoning_text;
|
||||
} else {
|
||||
chatcmpl_messages.push_back(json {
|
||||
{"role", "assistant"},
|
||||
{"content", json::array()},
|
||||
{"reasoning_content", item.at("content")[0].at("text")},
|
||||
{"reasoning_content", reasoning_text},
|
||||
});
|
||||
}
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -509,3 +509,86 @@ def test_responses_stream_delta_events_have_indices():
|
|||
assert saw_output_item_added, "never received response.output_item.added"
|
||||
assert saw_content_part_added, "never received response.content_part.added"
|
||||
assert saw_output_text_delta, "never received response.output_text.delta"
|
||||
|
||||
|
||||
def test_responses_reasoning_content_array():
|
||||
"""Reasoning items with content as array (spec format) must be accepted."""
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/responses", data={
|
||||
"model": "gpt-4.1",
|
||||
"input": [
|
||||
{"role": "user", "content": [{"type": "input_text", "text": "Hi"}]},
|
||||
{"type": "reasoning", "summary": [],
|
||||
"content": [{"type": "reasoning_text", "text": "thinking"}]},
|
||||
{"role": "assistant", "type": "message",
|
||||
"content": [{"type": "output_text", "text": "Hello"}]},
|
||||
{"role": "user", "content": [{"type": "input_text", "text": "How are you"}]},
|
||||
],
|
||||
"max_output_tokens": 8,
|
||||
"temperature": 0.8,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["status"] == "completed"
|
||||
|
||||
|
||||
def test_responses_reasoning_content_string():
|
||||
"""Reasoning items with content as plain string (OpenCode format) must be accepted."""
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/responses", data={
|
||||
"model": "gpt-4.1",
|
||||
"input": [
|
||||
{"role": "user", "content": [{"type": "input_text", "text": "Hi"}]},
|
||||
{"type": "reasoning", "summary": [], "content": "thinking about it"},
|
||||
{"role": "assistant", "type": "message",
|
||||
"content": [{"type": "output_text", "text": "Hello"}]},
|
||||
{"role": "user", "content": [{"type": "input_text", "text": "How are you"}]},
|
||||
],
|
||||
"max_output_tokens": 8,
|
||||
"temperature": 0.8,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["status"] == "completed"
|
||||
|
||||
|
||||
def test_responses_reasoning_content_null():
|
||||
"""Reasoning items with content:null (Codex format, issue openai/codex#11834)
|
||||
must be accepted — content may be null when encrypted_content is present."""
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/responses", data={
|
||||
"model": "gpt-4.1",
|
||||
"input": [
|
||||
{"role": "user", "content": [{"type": "input_text", "text": "Hi"}]},
|
||||
{"type": "reasoning", "summary": [], "content": None,
|
||||
"encrypted_content": "opaque_data_here"},
|
||||
{"role": "assistant", "type": "message",
|
||||
"content": [{"type": "output_text", "text": "Hello"}]},
|
||||
{"role": "user", "content": [{"type": "input_text", "text": "How are you"}]},
|
||||
],
|
||||
"max_output_tokens": 8,
|
||||
"temperature": 0.8,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["status"] == "completed"
|
||||
|
||||
|
||||
def test_responses_reasoning_content_omitted():
|
||||
"""Reasoning items with content omitted entirely must be accepted."""
|
||||
global server
|
||||
server.start()
|
||||
res = server.make_request("POST", "/v1/responses", data={
|
||||
"model": "gpt-4.1",
|
||||
"input": [
|
||||
{"role": "user", "content": [{"type": "input_text", "text": "Hi"}]},
|
||||
{"type": "reasoning", "summary": []},
|
||||
{"role": "assistant", "type": "message",
|
||||
"content": [{"type": "output_text", "text": "Hello"}]},
|
||||
{"role": "user", "content": [{"type": "input_text", "text": "How are you"}]},
|
||||
],
|
||||
"max_output_tokens": 8,
|
||||
"temperature": 0.8,
|
||||
})
|
||||
assert res.status_code == 200
|
||||
assert res.body["status"] == "completed"
|
||||
|
|
|
|||
Loading…
Reference in New Issue