llama.cpp/tools/server/tests/unit/test_compat_oai_responses.py

49 lines
1.4 KiB
Python

import pytest
from openai import OpenAI
from utils import *
server: ServerProcess
@pytest.fixture(autouse=True)
def create_server():
global server
server = ServerPreset.tinyllama2()
def test_responses_with_openai_library():
global server
server.start()
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
res = client.responses.create(
model="gpt-4.1",
input=[
{"role": "system", "content": "Book"},
{"role": "user", "content": "What is the best book"},
],
max_output_tokens=8,
temperature=0.8,
)
assert match_regex("(Suddenly)+", res.output_text)
def test_responses_stream_with_openai_library():
global server
server.start()
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
stream = client.responses.create(
model="gpt-4.1",
input=[
{"role": "system", "content": "Book"},
{"role": "user", "content": "What is the best book"},
],
max_output_tokens=8,
temperature=0.8,
stream=True,
)
gathered_text = ''
for r in stream:
if r.type == "response.output_text.delta":
gathered_text += r.delta
if r.type == "response.completed":
assert gathered_text == r.response.output_text
assert match_regex("(Suddenly)+", r.response.output_text)