webui: split raw output into backend parsing and frontend display options

This commit is contained in:
Pascal 2026-01-03 17:30:52 +01:00 committed by Aleksander Grygier
parent 18efdabb12
commit afb79b2970
4 changed files with 14 additions and 12 deletions

View File

@ -261,7 +261,7 @@
{onConfirmDelete}
{onNavigateToSibling}
{onShowDeleteDialogChange}
showRawOutputSwitch={currentConfig.disableReasoningFormat}
showRawOutputSwitch={currentConfig.showRawOutputSwitch}
rawOutputEnabled={showRawOutput}
onRawOutputToggle={(enabled) => (showRawOutput = enabled)}
/>

View File

@ -276,8 +276,13 @@
icon: Code,
fields: [
{
key: 'disableReasoningFormat',
label: 'Enable raw LLM output switch',
key: 'disableReasoningParsing',
label: 'Disable reasoning content parsing',
type: 'checkbox'
},
{
key: 'showRawOutputSwitch',
label: 'Enable raw output toggle',
type: 'checkbox'
},
{

View File

@ -6,7 +6,8 @@ export const SETTING_CONFIG_DEFAULT: Record<string, string | number | boolean> =
showSystemMessage: true,
theme: 'system',
showThoughtInProgress: false,
disableReasoningFormat: false,
disableReasoningParsing: false,
showRawOutputSwitch: false,
keepStatsVisible: false,
showMessageStats: true,
askForTitleConfirmation: false,
@ -95,8 +96,10 @@ export const SETTING_CONFIG_INFO: Record<string, string> = {
max_tokens: 'The maximum number of token per output. Use -1 for infinite (no limit).',
custom: 'Custom JSON parameters to send to the API. Must be valid JSON format.',
showThoughtInProgress: 'Expand thought process by default when generating messages.',
disableReasoningFormat:
'Enable raw LLM output switch to show unprocessed model output without backend parsing and frontend Markdown rendering to inspect streaming across different models.',
disableReasoningParsing:
'Send reasoning_format=none to prevent server-side extraction of reasoning tokens into separate field',
showRawOutputSwitch:
'Show toggle button to display messages as plain text instead of Markdown-formatted content',
keepStatsVisible: 'Keep processing statistics visible after generation finishes.',
showMessageStats:
'Display generation statistics (tokens/second, token count, duration) below each assistant message.',

View File

@ -69,12 +69,6 @@ export const SYNCABLE_PARAMETERS: SyncableParameter[] = [
type: 'boolean',
canSync: true
},
{
key: 'disableReasoningFormat',
serverKey: 'disableReasoningFormat',
type: 'boolean',
canSync: true
},
{ key: 'keepStatsVisible', serverKey: 'keepStatsVisible', type: 'boolean', canSync: true },
{ key: 'showMessageStats', serverKey: 'showMessageStats', type: 'boolean', canSync: true },
{