enhance context retention between tool calls, fix lints
This commit is contained in:
parent
0a428ff112
commit
1bbf328caf
|
|
@ -5,19 +5,22 @@
|
|||
import { buttonVariants } from '$lib/components/ui/button/index.js';
|
||||
import { Card } from '$lib/components/ui/card';
|
||||
import { config } from '$lib/stores/settings.svelte';
|
||||
import type { Snippet } from 'svelte';
|
||||
|
||||
interface Props {
|
||||
class?: string;
|
||||
hasRegularContent?: boolean;
|
||||
isStreaming?: boolean;
|
||||
reasoningContent: string | null;
|
||||
children?: Snippet;
|
||||
}
|
||||
|
||||
let {
|
||||
class: className = '',
|
||||
hasRegularContent = false,
|
||||
isStreaming = false,
|
||||
reasoningContent
|
||||
reasoningContent,
|
||||
children
|
||||
}: Props = $props();
|
||||
|
||||
const currentConfig = config();
|
||||
|
|
@ -72,9 +75,11 @@
|
|||
<div class="border-t border-muted px-3 pb-3">
|
||||
<div class="pt-3">
|
||||
<div class="text-xs leading-relaxed break-words whitespace-pre-wrap">
|
||||
<slot>
|
||||
{#if children}
|
||||
{@render children()}
|
||||
{:else}
|
||||
{reasoningContent ?? ''}
|
||||
</slot>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -119,6 +119,7 @@ export class ChatService {
|
|||
messages: normalizedMessages.map((msg: ApiChatMessageData) => ({
|
||||
role: msg.role,
|
||||
content: msg.content,
|
||||
...(msg.reasoning_content ? { reasoning_content: msg.reasoning_content } : {}),
|
||||
...((msg as ApiChatCompletionRequestMessage).tool_call_id
|
||||
? { tool_call_id: (msg as ApiChatCompletionRequestMessage).tool_call_id }
|
||||
: {}),
|
||||
|
|
@ -602,6 +603,9 @@ export class ChatService {
|
|||
return {
|
||||
role: message.role as ChatRole,
|
||||
content: message.content,
|
||||
...(message.role === 'assistant' && message.thinking
|
||||
? { reasoning_content: message.thinking }
|
||||
: {}),
|
||||
// tool_call_id is only relevant for tool role messages
|
||||
...(message.toolCallId ? { tool_call_id: message.toolCallId } : {}),
|
||||
...(toolCalls ? { tool_calls: toolCalls } : {})
|
||||
|
|
@ -693,6 +697,9 @@ export class ChatService {
|
|||
return {
|
||||
role: message.role as ChatRole,
|
||||
content: contentParts,
|
||||
...(message.role === 'assistant' && message.thinking
|
||||
? { reasoning_content: message.thinking }
|
||||
: {}),
|
||||
...(message.toolCallId ? { tool_call_id: message.toolCallId } : {}),
|
||||
...(toolCalls ? { tool_calls: toolCalls } : {})
|
||||
};
|
||||
|
|
|
|||
|
|
@ -35,6 +35,13 @@ export interface ApiChatMessageData {
|
|||
role: ChatRole;
|
||||
content: string | ApiChatMessageContentPart[];
|
||||
timestamp?: number;
|
||||
/**
|
||||
* Optional reasoning/thinking content to be sent back to the server.
|
||||
*
|
||||
* llama-server accepts this non-OpenAI field and uses it to preserve the model's
|
||||
* internal "thinking" blocks across tool-call resumptions (notably for gpt-oss).
|
||||
*/
|
||||
reasoning_content?: string;
|
||||
tool_call_id?: string;
|
||||
tool_calls?: ApiChatCompletionToolCallDelta[];
|
||||
}
|
||||
|
|
@ -183,6 +190,7 @@ export interface ApiLlamaCppServerProps {
|
|||
export interface ApiChatCompletionRequestMessage {
|
||||
role: ChatRole;
|
||||
content: string | ApiChatMessageContentPart[];
|
||||
reasoning_content?: string;
|
||||
tool_call_id?: string;
|
||||
tool_calls?: ApiChatCompletionToolCallDelta[];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,9 +75,8 @@ describe('ChatMessage delete for merged assistant messages', () => {
|
|||
conversationsStore.activeMessages = allMessages;
|
||||
|
||||
// Avoid touching IndexedDB by stubbing the store call used by getDeletionInfo.
|
||||
const originalGetConversationMessages = conversationsStore.getConversationMessages.bind(
|
||||
conversationsStore
|
||||
);
|
||||
const originalGetConversationMessages =
|
||||
conversationsStore.getConversationMessages.bind(conversationsStore);
|
||||
conversationsStore.getConversationMessages = async () => allMessages;
|
||||
|
||||
const onDelete = vi.fn();
|
||||
|
|
@ -111,4 +110,3 @@ describe('ChatMessage delete for merged assistant messages', () => {
|
|||
}
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -14,4 +14,3 @@
|
|||
<ChatMessage message={props.message} onDelete={props.onDelete} />
|
||||
</Sidebar.Provider>
|
||||
</Tooltip.Provider>
|
||||
|
||||
|
|
|
|||
|
|
@ -137,4 +137,6 @@ test('tool output does not echo tool arguments back to the model', async ({ page
|
|||
);
|
||||
expect(assistantWithToolCall).toBeTruthy();
|
||||
expect(JSON.stringify(assistantWithToolCall?.tool_calls ?? null)).toContain('LARGE_CODE_BEGIN');
|
||||
// Preserve the model's reasoning across tool-call resumptions (required for gpt-oss).
|
||||
expect(String(assistantWithToolCall?.reasoning_content ?? '')).toContain('reasoning-step-1');
|
||||
});
|
||||
|
|
|
|||
Loading…
Reference in New Issue