feat: Raw LLM output switch per message

This commit is contained in:
Aleksander Grygier 2025-12-31 13:08:26 +01:00
parent 284425097b
commit 62dbc9f654
3 changed files with 4 additions and 4 deletions

View File

@ -83,7 +83,7 @@
// Check if content contains agentic tool call markers
const isAgenticContent = $derived(
messageContent?.includes('<!-- AGENTIC_TOOL_CALL_START -->') ?? false
messageContent?.includes('<<<AGENTIC_TOOL_CALL_START>>>') ?? false
);
const processingState = useProcessingState();
@ -261,7 +261,7 @@
{onConfirmDelete}
{onNavigateToSibling}
{onShowDeleteDialogChange}
showRawOutputSwitch={currentConfig.showRawOutputSwitch}
showRawOutputSwitch={currentConfig.disableReasoningFormat}
rawOutputEnabled={showRawOutput}
onRawOutputToggle={(enabled) => (showRawOutput = enabled)}
/>

View File

@ -273,7 +273,7 @@
fields: [
{
key: 'disableReasoningFormat',
label: 'Show raw LLM output',
label: 'Enable raw LLM output switch',
type: 'checkbox'
},
{

View File

@ -94,7 +94,7 @@ export const SETTING_CONFIG_INFO: Record<string, string> = {
custom: 'Custom JSON parameters to send to the API. Must be valid JSON format.',
showThoughtInProgress: 'Expand thought process by default when generating messages.',
disableReasoningFormat:
'Show raw LLM output without backend parsing and frontend Markdown rendering to inspect streaming across different models.',
'Enable raw LLM output switch to show unprocessed model output without backend parsing and frontend Markdown rendering to inspect streaming across different models.',
keepStatsVisible: 'Keep processing statistics visible after generation finishes.',
showMessageStats:
'Display generation statistics (tokens/second, token count, duration) below each assistant message.',