refactor: Tool call handling
This commit is contained in:
parent
3e7318f09d
commit
03464a0780
Binary file not shown.
|
|
@ -1,7 +1,18 @@
|
|||
<script lang="ts">
|
||||
/**
|
||||
* AgenticContent - Chronological display of agentic flow output
|
||||
*
|
||||
* Parses content with tool call markers and displays them inline
|
||||
* with text content. Each tool call is shown as a collapsible box
|
||||
* similar to the reasoning/thinking block UI.
|
||||
*/
|
||||
|
||||
import { MarkdownContent } from '$lib/components/app';
|
||||
import { Wrench, ChevronDown, ChevronRight } from '@lucide/svelte';
|
||||
import { slide } from 'svelte/transition';
|
||||
import { Wrench } from '@lucide/svelte';
|
||||
import ChevronsUpDownIcon from '@lucide/svelte/icons/chevrons-up-down';
|
||||
import * as Collapsible from '$lib/components/ui/collapsible/index.js';
|
||||
import { buttonVariants } from '$lib/components/ui/button/index.js';
|
||||
import { Card } from '$lib/components/ui/card';
|
||||
|
||||
interface Props {
|
||||
content: string;
|
||||
|
|
@ -17,11 +28,19 @@
|
|||
|
||||
let { content }: Props = $props();
|
||||
|
||||
// Parse content into sections
|
||||
// Parse content into chronological sections
|
||||
const sections = $derived(parseAgenticContent(content));
|
||||
|
||||
// Track collapsed state for each tool call
|
||||
let collapsedArgs: Record<number, boolean> = $state({});
|
||||
// Track expanded state for each tool call (default expanded)
|
||||
let expandedStates: Record<number, boolean> = $state({});
|
||||
|
||||
function isExpanded(index: number): boolean {
|
||||
return expandedStates[index] ?? true;
|
||||
}
|
||||
|
||||
function toggleExpanded(index: number) {
|
||||
expandedStates[index] = !isExpanded(index);
|
||||
}
|
||||
|
||||
function parseAgenticContent(rawContent: string): AgenticSection[] {
|
||||
if (!rawContent) return [];
|
||||
|
|
@ -74,10 +93,6 @@
|
|||
return sections;
|
||||
}
|
||||
|
||||
function toggleArgs(index: number) {
|
||||
collapsedArgs[index] = !collapsedArgs[index];
|
||||
}
|
||||
|
||||
function formatToolArgs(args: string): string {
|
||||
try {
|
||||
const parsed = JSON.parse(args);
|
||||
|
|
@ -91,46 +106,57 @@
|
|||
<div class="agentic-content">
|
||||
{#each sections as section, index (index)}
|
||||
{#if section.type === 'text'}
|
||||
<div class="agentic-section agentic-text">
|
||||
<div class="agentic-text">
|
||||
<MarkdownContent content={section.content} />
|
||||
</div>
|
||||
{:else if section.type === 'tool_call'}
|
||||
<div class="agentic-section agentic-tool-call" transition:slide={{ duration: 200 }}>
|
||||
<div class="tool-call-header">
|
||||
<div class="tool-call-title">
|
||||
<Wrench class="h-4 w-4" />
|
||||
<span class="tool-name">{section.toolName}</span>
|
||||
</div>
|
||||
{#if section.toolArgs && section.toolArgs !== '{}'}
|
||||
<button
|
||||
type="button"
|
||||
class="tool-args-toggle"
|
||||
onclick={() => toggleArgs(index)}
|
||||
aria-expanded={!collapsedArgs[index]}
|
||||
<Collapsible.Root open={isExpanded(index)} class="mb-4">
|
||||
<Card class="gap-0 border-muted bg-muted/30 py-0">
|
||||
<Collapsible.Trigger
|
||||
class="flex w-full cursor-pointer items-center justify-between p-3"
|
||||
onclick={() => toggleExpanded(index)}
|
||||
>
|
||||
<div class="flex items-center gap-2 text-muted-foreground">
|
||||
<Wrench class="h-4 w-4" />
|
||||
<span class="font-mono text-sm font-medium">{section.toolName}</span>
|
||||
</div>
|
||||
|
||||
<div
|
||||
class={buttonVariants({
|
||||
variant: 'ghost',
|
||||
size: 'sm',
|
||||
class: 'h-6 w-6 p-0 text-muted-foreground hover:text-foreground'
|
||||
})}
|
||||
>
|
||||
{#if collapsedArgs[index]}
|
||||
<ChevronRight class="h-4 w-4" />
|
||||
{:else}
|
||||
<ChevronDown class="h-4 w-4" />
|
||||
<ChevronsUpDownIcon class="h-4 w-4" />
|
||||
<span class="sr-only">Toggle tool call content</span>
|
||||
</div>
|
||||
</Collapsible.Trigger>
|
||||
|
||||
<Collapsible.Content>
|
||||
<div class="border-t border-muted px-3 pb-3">
|
||||
{#if section.toolArgs && section.toolArgs !== '{}'}
|
||||
<div class="pt-3">
|
||||
<div class="mb-1 text-xs text-muted-foreground">Arguments:</div>
|
||||
<pre
|
||||
class="rounded bg-muted/30 p-2 font-mono text-xs leading-relaxed break-words whitespace-pre-wrap">{formatToolArgs(
|
||||
section.toolArgs
|
||||
)}</pre>
|
||||
</div>
|
||||
{/if}
|
||||
<span class="text-xs">Arguments</span>
|
||||
</button>
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
{#if section.toolArgs && section.toolArgs !== '{}' && !collapsedArgs[index]}
|
||||
<div class="tool-args" transition:slide={{ duration: 150 }}>
|
||||
<pre class="tool-args-content">{formatToolArgs(section.toolArgs)}</pre>
|
||||
</div>
|
||||
{/if}
|
||||
|
||||
{#if section.toolResult}
|
||||
<div class="tool-result">
|
||||
<div class="tool-result-label">Result:</div>
|
||||
<MarkdownContent content={section.toolResult} />
|
||||
</div>
|
||||
{/if}
|
||||
</div>
|
||||
{#if section.toolResult}
|
||||
<div class="pt-3">
|
||||
<div class="mb-1 text-xs text-muted-foreground">Result:</div>
|
||||
<div class="text-sm">
|
||||
<MarkdownContent content={section.toolResult} />
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
</div>
|
||||
</Collapsible.Content>
|
||||
</Card>
|
||||
</Collapsible.Root>
|
||||
{/if}
|
||||
{/each}
|
||||
</div>
|
||||
|
|
@ -139,82 +165,12 @@
|
|||
.agentic-content {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 1rem;
|
||||
gap: 0.5rem;
|
||||
width: 100%;
|
||||
max-width: 48rem;
|
||||
}
|
||||
|
||||
.agentic-section {
|
||||
.agentic-text {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.agentic-tool-call {
|
||||
border-left: 3px solid hsl(var(--primary) / 0.5);
|
||||
padding-left: 1rem;
|
||||
margin: 0.5rem 0;
|
||||
}
|
||||
|
||||
.tool-call-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: 0.5rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.tool-call-title {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
font-weight: 600;
|
||||
color: hsl(var(--primary));
|
||||
}
|
||||
|
||||
.tool-name {
|
||||
font-family: ui-monospace, SFMono-Regular, 'SF Mono', Monaco, monospace;
|
||||
font-size: 0.875rem;
|
||||
}
|
||||
|
||||
.tool-args-toggle {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.25rem;
|
||||
padding: 0.25rem 0.5rem;
|
||||
border-radius: 0.25rem;
|
||||
background: hsl(var(--muted) / 0.5);
|
||||
color: hsl(var(--muted-foreground));
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.15s;
|
||||
}
|
||||
|
||||
.tool-args-toggle:hover {
|
||||
background: hsl(var(--muted));
|
||||
}
|
||||
|
||||
.tool-args {
|
||||
margin: 0.5rem 0;
|
||||
padding: 0.5rem;
|
||||
background: hsl(var(--muted) / 0.3);
|
||||
border-radius: 0.375rem;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
.tool-args-content {
|
||||
margin: 0;
|
||||
font-family: ui-monospace, SFMono-Regular, 'SF Mono', Monaco, monospace;
|
||||
font-size: 0.75rem;
|
||||
color: hsl(var(--muted-foreground));
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.tool-result {
|
||||
margin-top: 0.5rem;
|
||||
}
|
||||
|
||||
.tool-result-label {
|
||||
font-size: 0.75rem;
|
||||
font-weight: 500;
|
||||
color: hsl(var(--muted-foreground));
|
||||
margin-bottom: 0.25rem;
|
||||
}
|
||||
</style>
|
||||
|
|
|
|||
|
|
@ -69,29 +69,6 @@
|
|||
return null;
|
||||
});
|
||||
|
||||
let toolCallContent = $derived.by((): ApiChatCompletionToolCall[] | string | null => {
|
||||
if (message.role === 'assistant') {
|
||||
const trimmedToolCalls = message.toolCalls?.trim();
|
||||
|
||||
if (!trimmedToolCalls) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(trimmedToolCalls);
|
||||
|
||||
if (Array.isArray(parsed)) {
|
||||
return parsed as ApiChatCompletionToolCall[];
|
||||
}
|
||||
} catch {
|
||||
// Harmony-only path: fall back to the raw string so issues surface visibly.
|
||||
}
|
||||
|
||||
return trimmedToolCalls;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
function handleCancelEdit() {
|
||||
isEditing = false;
|
||||
editedContent = message.content;
|
||||
|
|
@ -281,6 +258,5 @@
|
|||
{showDeleteDialog}
|
||||
{siblingInfo}
|
||||
{thinkingContent}
|
||||
{toolCallContent}
|
||||
/>
|
||||
{/if}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@
|
|||
ChatMessageActions,
|
||||
ChatMessageStatistics,
|
||||
ChatMessageThinkingBlock,
|
||||
CopyToClipboardIcon,
|
||||
MarkdownContent,
|
||||
ModelsSelector
|
||||
} from '$lib/components/app';
|
||||
|
|
@ -14,7 +13,7 @@
|
|||
import { isLoading } from '$lib/stores/chat.svelte';
|
||||
import { autoResizeTextarea, copyToClipboard } from '$lib/utils';
|
||||
import { fade } from 'svelte/transition';
|
||||
import { Check, X, Wrench } from '@lucide/svelte';
|
||||
import { Check, X } from '@lucide/svelte';
|
||||
import { Button } from '$lib/components/ui/button';
|
||||
import { Checkbox } from '$lib/components/ui/checkbox';
|
||||
import { INPUT_CLASSES } from '$lib/constants/input-classes';
|
||||
|
|
@ -53,7 +52,6 @@
|
|||
siblingInfo?: ChatMessageSiblingInfo | null;
|
||||
textareaElement?: HTMLTextAreaElement;
|
||||
thinkingContent: string | null;
|
||||
toolCallContent: ApiChatCompletionToolCall[] | string | null;
|
||||
}
|
||||
|
||||
let {
|
||||
|
|
@ -80,15 +78,9 @@
|
|||
shouldBranchAfterEdit = false,
|
||||
siblingInfo = null,
|
||||
textareaElement = $bindable(),
|
||||
thinkingContent,
|
||||
toolCallContent = null
|
||||
thinkingContent
|
||||
}: Props = $props();
|
||||
|
||||
const toolCalls = $derived(
|
||||
Array.isArray(toolCallContent) ? (toolCallContent as ApiChatCompletionToolCall[]) : null
|
||||
);
|
||||
const fallbackToolCalls = $derived(typeof toolCallContent === 'string' ? toolCallContent : null);
|
||||
|
||||
// Check if content contains agentic tool call markers
|
||||
const isAgenticContent = $derived(
|
||||
messageContent?.includes('<!-- AGENTIC_TOOL_CALL_START -->') ?? false
|
||||
|
|
@ -128,58 +120,6 @@
|
|||
processingState.startMonitoring();
|
||||
}
|
||||
});
|
||||
|
||||
function formatToolCallBadge(toolCall: ApiChatCompletionToolCall, index: number) {
|
||||
const callNumber = index + 1;
|
||||
const functionName = toolCall.function?.name?.trim();
|
||||
const label = functionName || `Call #${callNumber}`;
|
||||
|
||||
const payload: Record<string, unknown> = {};
|
||||
|
||||
const id = toolCall.id?.trim();
|
||||
if (id) {
|
||||
payload.id = id;
|
||||
}
|
||||
|
||||
const type = toolCall.type?.trim();
|
||||
if (type) {
|
||||
payload.type = type;
|
||||
}
|
||||
|
||||
if (toolCall.function) {
|
||||
const fnPayload: Record<string, unknown> = {};
|
||||
|
||||
const name = toolCall.function.name?.trim();
|
||||
if (name) {
|
||||
fnPayload.name = name;
|
||||
}
|
||||
|
||||
const rawArguments = toolCall.function.arguments?.trim();
|
||||
if (rawArguments) {
|
||||
try {
|
||||
fnPayload.arguments = JSON.parse(rawArguments);
|
||||
} catch {
|
||||
fnPayload.arguments = rawArguments;
|
||||
}
|
||||
}
|
||||
|
||||
if (Object.keys(fnPayload).length > 0) {
|
||||
payload.function = fnPayload;
|
||||
}
|
||||
}
|
||||
|
||||
const formattedPayload = JSON.stringify(payload, null, 2);
|
||||
|
||||
return {
|
||||
label,
|
||||
tooltip: formattedPayload,
|
||||
copyValue: formattedPayload
|
||||
};
|
||||
}
|
||||
|
||||
function handleCopyToolCall(payload: string) {
|
||||
void copyToClipboard(payload, 'Tool call copied to clipboard');
|
||||
}
|
||||
</script>
|
||||
|
||||
<div
|
||||
|
|
@ -298,48 +238,6 @@
|
|||
{/if}
|
||||
</div>
|
||||
{/if}
|
||||
|
||||
{#if config().showToolCalls}
|
||||
{#if (toolCalls && toolCalls.length > 0) || fallbackToolCalls}
|
||||
<span class="inline-flex flex-wrap items-center gap-2 text-xs text-muted-foreground">
|
||||
<span class="inline-flex items-center gap-1">
|
||||
<Wrench class="h-3.5 w-3.5" />
|
||||
|
||||
<span>Tool calls:</span>
|
||||
</span>
|
||||
|
||||
{#if toolCalls && toolCalls.length > 0}
|
||||
{#each toolCalls as toolCall, index (toolCall.id ?? `${index}`)}
|
||||
{@const badge = formatToolCallBadge(toolCall, index)}
|
||||
<button
|
||||
type="button"
|
||||
class="tool-call-badge inline-flex cursor-pointer items-center gap-1 rounded-sm bg-muted-foreground/15 px-1.5 py-0.75"
|
||||
title={badge.tooltip}
|
||||
aria-label={`Copy tool call ${badge.label}`}
|
||||
onclick={() => handleCopyToolCall(badge.copyValue)}
|
||||
>
|
||||
{badge.label}
|
||||
<CopyToClipboardIcon
|
||||
text={badge.copyValue}
|
||||
ariaLabel={`Copy tool call ${badge.label}`}
|
||||
/>
|
||||
</button>
|
||||
{/each}
|
||||
{:else if fallbackToolCalls}
|
||||
<button
|
||||
type="button"
|
||||
class="tool-call-badge tool-call-badge--fallback inline-flex cursor-pointer items-center gap-1 rounded-sm bg-muted-foreground/15 px-1.5 py-0.75"
|
||||
title={fallbackToolCalls}
|
||||
aria-label="Copy tool call payload"
|
||||
onclick={() => handleCopyToolCall(fallbackToolCalls)}
|
||||
>
|
||||
{fallbackToolCalls}
|
||||
<CopyToClipboardIcon text={fallbackToolCalls} ariaLabel="Copy tool call payload" />
|
||||
</button>
|
||||
{/if}
|
||||
</span>
|
||||
{/if}
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
{#if message.timestamp && !isEditing}
|
||||
|
|
@ -410,17 +308,4 @@
|
|||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.tool-call-badge {
|
||||
max-width: 12rem;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
.tool-call-badge--fallback {
|
||||
max-width: 20rem;
|
||||
white-space: normal;
|
||||
word-break: break-word;
|
||||
}
|
||||
</style>
|
||||
|
|
|
|||
|
|
@ -271,11 +271,6 @@
|
|||
title: 'Developer',
|
||||
icon: Code,
|
||||
fields: [
|
||||
{
|
||||
key: 'showToolCalls',
|
||||
label: 'Show tool call labels',
|
||||
type: 'checkbox'
|
||||
},
|
||||
{
|
||||
key: 'disableReasoningFormat',
|
||||
label: 'Show raw LLM output',
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ export const SETTING_CONFIG_DEFAULT: Record<string, string | number | boolean> =
|
|||
showSystemMessage: true,
|
||||
theme: 'system',
|
||||
showThoughtInProgress: false,
|
||||
showToolCalls: false,
|
||||
disableReasoningFormat: false,
|
||||
keepStatsVisible: false,
|
||||
showMessageStats: true,
|
||||
|
|
@ -94,8 +93,6 @@ export const SETTING_CONFIG_INFO: Record<string, string> = {
|
|||
max_tokens: 'The maximum number of token per output. Use -1 for infinite (no limit).',
|
||||
custom: 'Custom JSON parameters to send to the API. Must be valid JSON format.',
|
||||
showThoughtInProgress: 'Expand thought process by default when generating messages.',
|
||||
showToolCalls:
|
||||
'Display tool call labels and payloads from Harmony-compatible delta.tool_calls data below assistant messages.',
|
||||
disableReasoningFormat:
|
||||
'Show raw LLM output without backend parsing and frontend Markdown rendering to inspect streaming across different models.',
|
||||
keepStatsVisible: 'Keep processing statistics visible after generation finishes.',
|
||||
|
|
|
|||
|
|
@ -0,0 +1,354 @@
|
|||
/**
|
||||
* MCPHostManager - Agregator wielu połączeń MCP.
|
||||
*
|
||||
* Zgodnie z architekturą MCP, Host:
|
||||
* - Koordynuje wiele instancji Client (MCPServerConnection)
|
||||
* - Agreguje tools/resources/prompts ze wszystkich serwerów
|
||||
* - Routuje tool calls do odpowiedniego serwera
|
||||
* - Zarządza lifecycle wszystkich połączeń
|
||||
*/
|
||||
|
||||
import { MCPServerConnection, type ToolExecutionResult } from './server-connection';
|
||||
import type {
|
||||
MCPClientConfig,
|
||||
MCPToolCall,
|
||||
ClientCapabilities,
|
||||
Implementation
|
||||
} from '$lib/types/mcp';
|
||||
import { MCPError } from '$lib/types/mcp';
|
||||
import type { Tool } from '@modelcontextprotocol/sdk/types.js';
|
||||
|
||||
export interface MCPHostManagerConfig {
|
||||
/** Server configurations keyed by server name */
|
||||
servers: MCPClientConfig['servers'];
|
||||
/** Client info to advertise to all servers */
|
||||
clientInfo?: Implementation;
|
||||
/** Default capabilities to advertise */
|
||||
capabilities?: ClientCapabilities;
|
||||
}
|
||||
|
||||
export interface OpenAIToolDefinition {
|
||||
type: 'function';
|
||||
function: {
|
||||
name: string;
|
||||
description?: string;
|
||||
parameters: Record<string, unknown>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface ServerStatus {
|
||||
name: string;
|
||||
isConnected: boolean;
|
||||
toolCount: number;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* MCPHostManager manages multiple MCP server connections.
|
||||
*
|
||||
* This corresponds to the "Host" role in MCP architecture:
|
||||
* - Coordinates multiple Client instances (MCPServerConnection)
|
||||
* - Aggregates tools from all connected servers
|
||||
* - Routes tool calls to the appropriate server
|
||||
*/
|
||||
export class MCPHostManager {
|
||||
private connections = new Map<string, MCPServerConnection>();
|
||||
private toolsIndex = new Map<string, string>(); // toolName → serverName
|
||||
private _isInitialized = false;
|
||||
private _initializationError: Error | null = null;
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// Lifecycle
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async initialize(config: MCPHostManagerConfig): Promise<void> {
|
||||
console.log('[MCPHost] Starting initialization...');
|
||||
|
||||
// Clean up previous connections
|
||||
await this.shutdown();
|
||||
|
||||
const serverEntries = Object.entries(config.servers);
|
||||
if (serverEntries.length === 0) {
|
||||
console.log('[MCPHost] No servers configured');
|
||||
this._isInitialized = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Connect to each server in parallel
|
||||
const connectionPromises = serverEntries.map(async ([name, serverConfig]) => {
|
||||
try {
|
||||
const connection = new MCPServerConnection({
|
||||
name,
|
||||
server: serverConfig,
|
||||
clientInfo: config.clientInfo,
|
||||
capabilities: config.capabilities
|
||||
});
|
||||
|
||||
await connection.connect();
|
||||
return { name, connection, success: true, error: null };
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
console.error(`[MCPHost] Failed to connect to ${name}:`, errorMessage);
|
||||
return { name, connection: null, success: false, error: errorMessage };
|
||||
}
|
||||
});
|
||||
|
||||
const results = await Promise.all(connectionPromises);
|
||||
|
||||
// Store successful connections
|
||||
for (const result of results) {
|
||||
if (result.success && result.connection) {
|
||||
this.connections.set(result.name, result.connection);
|
||||
}
|
||||
}
|
||||
|
||||
// Build tools index
|
||||
this.rebuildToolsIndex();
|
||||
|
||||
const successCount = this.connections.size;
|
||||
const totalCount = serverEntries.length;
|
||||
|
||||
if (successCount === 0 && totalCount > 0) {
|
||||
this._initializationError = new Error('All MCP server connections failed');
|
||||
throw this._initializationError;
|
||||
}
|
||||
|
||||
this._isInitialized = true;
|
||||
this._initializationError = null;
|
||||
|
||||
console.log(
|
||||
`[MCPHost] Initialization complete: ${successCount}/${totalCount} servers connected, ` +
|
||||
`${this.toolsIndex.size} tools available`
|
||||
);
|
||||
}
|
||||
|
||||
async shutdown(): Promise<void> {
|
||||
if (this.connections.size === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`[MCPHost] Shutting down ${this.connections.size} connections...`);
|
||||
|
||||
const shutdownPromises = Array.from(this.connections.values()).map((conn) =>
|
||||
conn.disconnect().catch((error) => {
|
||||
console.warn(`[MCPHost] Error disconnecting ${conn.serverName}:`, error);
|
||||
})
|
||||
);
|
||||
|
||||
await Promise.all(shutdownPromises);
|
||||
|
||||
this.connections.clear();
|
||||
this.toolsIndex.clear();
|
||||
this._isInitialized = false;
|
||||
|
||||
console.log('[MCPHost] Shutdown complete');
|
||||
}
|
||||
|
||||
private rebuildToolsIndex(): void {
|
||||
this.toolsIndex.clear();
|
||||
|
||||
for (const [serverName, connection] of this.connections) {
|
||||
for (const tool of connection.tools) {
|
||||
// Check for name conflicts
|
||||
if (this.toolsIndex.has(tool.name)) {
|
||||
console.warn(
|
||||
`[MCPHost] Tool name conflict: "${tool.name}" exists in ` +
|
||||
`"${this.toolsIndex.get(tool.name)}" and "${serverName}". ` +
|
||||
`Using tool from "${serverName}".`
|
||||
);
|
||||
}
|
||||
this.toolsIndex.set(tool.name, serverName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// Tool Aggregation
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Returns ALL tools from ALL connected servers.
|
||||
* This is what we send to LLM as available tools.
|
||||
*/
|
||||
getAllTools(): Tool[] {
|
||||
const allTools: Tool[] = [];
|
||||
for (const connection of this.connections.values()) {
|
||||
allTools.push(...connection.tools);
|
||||
}
|
||||
return allTools;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns tools in OpenAI function calling format.
|
||||
* Ready to be sent to /v1/chat/completions API.
|
||||
*/
|
||||
getToolDefinitionsForLLM(): OpenAIToolDefinition[] {
|
||||
return this.getAllTools().map((tool) => ({
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: (tool.inputSchema as Record<string, unknown>) ?? {
|
||||
type: 'object',
|
||||
properties: {},
|
||||
required: []
|
||||
}
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns names of all available tools.
|
||||
*/
|
||||
getToolNames(): string[] {
|
||||
return Array.from(this.toolsIndex.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool exists.
|
||||
*/
|
||||
hasTool(toolName: string): boolean {
|
||||
return this.toolsIndex.has(toolName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get which server provides a specific tool.
|
||||
*/
|
||||
getToolServer(toolName: string): string | undefined {
|
||||
return this.toolsIndex.get(toolName);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// Tool Execution
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Execute a tool call, automatically routing to the appropriate server.
|
||||
* Accepts the OpenAI-style tool call format.
|
||||
*/
|
||||
async executeTool(toolCall: MCPToolCall, signal?: AbortSignal): Promise<ToolExecutionResult> {
|
||||
const toolName = toolCall.function.name;
|
||||
|
||||
// Find which server handles this tool
|
||||
const serverName = this.toolsIndex.get(toolName);
|
||||
if (!serverName) {
|
||||
throw new MCPError(`Unknown tool: ${toolName}`, -32601);
|
||||
}
|
||||
|
||||
const connection = this.connections.get(serverName);
|
||||
if (!connection) {
|
||||
throw new MCPError(`Server "${serverName}" is not connected`, -32000);
|
||||
}
|
||||
|
||||
// Parse arguments
|
||||
const args = this.parseToolArguments(toolCall.function.arguments);
|
||||
|
||||
// Delegate to the appropriate server
|
||||
return connection.callTool({ name: toolName, arguments: args }, signal);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a tool by name with arguments object.
|
||||
* Simpler interface for direct tool calls.
|
||||
*/
|
||||
async executeToolByName(
|
||||
toolName: string,
|
||||
args: Record<string, unknown>,
|
||||
signal?: AbortSignal
|
||||
): Promise<ToolExecutionResult> {
|
||||
const serverName = this.toolsIndex.get(toolName);
|
||||
if (!serverName) {
|
||||
throw new MCPError(`Unknown tool: ${toolName}`, -32601);
|
||||
}
|
||||
|
||||
const connection = this.connections.get(serverName);
|
||||
if (!connection) {
|
||||
throw new MCPError(`Server "${serverName}" is not connected`, -32000);
|
||||
}
|
||||
|
||||
return connection.callTool({ name: toolName, arguments: args }, signal);
|
||||
}
|
||||
|
||||
private parseToolArguments(args: string | Record<string, unknown>): Record<string, unknown> {
|
||||
if (typeof args === 'string') {
|
||||
const trimmed = args.trim();
|
||||
if (trimmed === '') {
|
||||
return {};
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(trimmed);
|
||||
if (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) {
|
||||
throw new MCPError(
|
||||
`Tool arguments must be an object, got ${Array.isArray(parsed) ? 'array' : typeof parsed}`,
|
||||
-32602
|
||||
);
|
||||
}
|
||||
return parsed as Record<string, unknown>;
|
||||
} catch (error) {
|
||||
if (error instanceof MCPError) {
|
||||
throw error;
|
||||
}
|
||||
throw new MCPError(
|
||||
`Failed to parse tool arguments as JSON: ${(error as Error).message}`,
|
||||
-32700
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof args === 'object' && args !== null && !Array.isArray(args)) {
|
||||
return args;
|
||||
}
|
||||
|
||||
throw new MCPError(`Invalid tool arguments type: ${typeof args}`, -32602);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// State
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
get isInitialized(): boolean {
|
||||
return this._isInitialized;
|
||||
}
|
||||
|
||||
get initializationError(): Error | null {
|
||||
return this._initializationError;
|
||||
}
|
||||
|
||||
get connectedServerCount(): number {
|
||||
return this.connections.size;
|
||||
}
|
||||
|
||||
get connectedServerNames(): string[] {
|
||||
return Array.from(this.connections.keys());
|
||||
}
|
||||
|
||||
get toolCount(): number {
|
||||
return this.toolsIndex.size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get status of all configured servers.
|
||||
*/
|
||||
getServersStatus(): ServerStatus[] {
|
||||
const statuses: ServerStatus[] = [];
|
||||
|
||||
for (const [name, connection] of this.connections) {
|
||||
statuses.push({
|
||||
name,
|
||||
isConnected: connection.isConnected,
|
||||
toolCount: connection.tools.length,
|
||||
error: connection.lastError?.message
|
||||
});
|
||||
}
|
||||
|
||||
return statuses;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific server connection (for advanced use cases).
|
||||
*/
|
||||
getServerConnection(name: string): MCPServerConnection | undefined {
|
||||
return this.connections.get(name);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,3 +1,21 @@
|
|||
// New architecture exports
|
||||
export { MCPHostManager } from './host-manager';
|
||||
export type { MCPHostManagerConfig, OpenAIToolDefinition, ServerStatus } from './host-manager';
|
||||
export { MCPServerConnection } from './server-connection';
|
||||
export type {
|
||||
MCPServerConnectionConfig,
|
||||
ToolCallParams,
|
||||
ToolExecutionResult
|
||||
} from './server-connection';
|
||||
|
||||
// Legacy client export (deprecated - use MCPHostManager instead)
|
||||
export { MCPClient } from './client';
|
||||
|
||||
// Types
|
||||
export { MCPError } from '$lib/types/mcp';
|
||||
export type { MCPClientConfig, MCPServerConfig, MCPToolCall, IMCPClient } from '$lib/types/mcp';
|
||||
export type {
|
||||
MCPClientConfig,
|
||||
MCPServerConfig,
|
||||
MCPToolCall,
|
||||
MCPServerSettingsEntry
|
||||
} from '$lib/types/mcp';
|
||||
|
|
|
|||
|
|
@ -0,0 +1,289 @@
|
|||
/**
|
||||
* MCPServerConnection - Wrapper na SDK Client dla pojedynczego serwera MCP.
|
||||
*
|
||||
* Zgodnie z architekturą MCP:
|
||||
* - Jeden MCPServerConnection = jedno połączenie = jeden SDK Client
|
||||
* - Izolacja między serwerami - każdy ma własny transport i capabilities
|
||||
* - Własny lifecycle (connect, disconnect)
|
||||
*/
|
||||
|
||||
import { Client } from '@modelcontextprotocol/sdk/client';
|
||||
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js';
|
||||
import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js';
|
||||
import type { Tool } from '@modelcontextprotocol/sdk/types.js';
|
||||
import type { Transport } from '@modelcontextprotocol/sdk/shared/transport.js';
|
||||
import type { MCPServerConfig, ClientCapabilities, Implementation } from '$lib/types/mcp';
|
||||
import { MCPError } from '$lib/types/mcp';
|
||||
import { DEFAULT_MCP_CONFIG } from '$lib/constants/mcp';
|
||||
|
||||
// Type for tool call result content item
|
||||
interface ToolResultContentItem {
|
||||
type: string;
|
||||
text?: string;
|
||||
data?: string;
|
||||
mimeType?: string;
|
||||
resource?: { text?: string; blob?: string; uri?: string };
|
||||
}
|
||||
|
||||
// Type for tool call result
|
||||
interface ToolCallResult {
|
||||
content?: ToolResultContentItem[];
|
||||
isError?: boolean;
|
||||
_meta?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface MCPServerConnectionConfig {
|
||||
/** Unique server name/identifier */
|
||||
name: string;
|
||||
/** Server configuration */
|
||||
server: MCPServerConfig;
|
||||
/** Client info to advertise */
|
||||
clientInfo?: Implementation;
|
||||
/** Capabilities to advertise */
|
||||
capabilities?: ClientCapabilities;
|
||||
}
|
||||
|
||||
export interface ToolCallParams {
|
||||
name: string;
|
||||
arguments: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface ToolExecutionResult {
|
||||
content: string;
|
||||
isError: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a single connection to an MCP server.
|
||||
* Wraps the SDK Client and provides a clean interface for tool operations.
|
||||
*/
|
||||
export class MCPServerConnection {
|
||||
private client: Client;
|
||||
private transport: Transport | null = null;
|
||||
private _tools: Tool[] = [];
|
||||
private _isConnected = false;
|
||||
private _lastError: Error | null = null;
|
||||
|
||||
readonly serverName: string;
|
||||
readonly config: MCPServerConnectionConfig;
|
||||
|
||||
constructor(config: MCPServerConnectionConfig) {
|
||||
this.serverName = config.name;
|
||||
this.config = config;
|
||||
|
||||
const clientInfo = config.clientInfo ?? DEFAULT_MCP_CONFIG.clientInfo;
|
||||
const capabilities = config.capabilities ?? DEFAULT_MCP_CONFIG.capabilities;
|
||||
|
||||
// Create SDK Client with our host info
|
||||
this.client = new Client(
|
||||
{
|
||||
name: clientInfo.name,
|
||||
version: clientInfo.version ?? '1.0.0'
|
||||
},
|
||||
{ capabilities }
|
||||
);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// Lifecycle
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async connect(): Promise<void> {
|
||||
if (this._isConnected) {
|
||||
console.log(`[MCP][${this.serverName}] Already connected`);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
console.log(`[MCP][${this.serverName}] Creating transport...`);
|
||||
this.transport = await this.createTransport();
|
||||
|
||||
console.log(`[MCP][${this.serverName}] Connecting to server...`);
|
||||
// SDK Client.connect() performs:
|
||||
// 1. initialize request → server
|
||||
// 2. Receives server capabilities
|
||||
// 3. Sends initialized notification
|
||||
await this.client.connect(this.transport);
|
||||
|
||||
console.log(`[MCP][${this.serverName}] Connected, listing tools...`);
|
||||
await this.refreshTools();
|
||||
|
||||
this._isConnected = true;
|
||||
this._lastError = null;
|
||||
console.log(
|
||||
`[MCP][${this.serverName}] Initialization complete with ${this._tools.length} tools`
|
||||
);
|
||||
} catch (error) {
|
||||
this._lastError = error instanceof Error ? error : new Error(String(error));
|
||||
console.error(`[MCP][${this.serverName}] Connection failed:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async disconnect(): Promise<void> {
|
||||
if (!this._isConnected) {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`[MCP][${this.serverName}] Disconnecting...`);
|
||||
try {
|
||||
await this.client.close();
|
||||
} catch (error) {
|
||||
console.warn(`[MCP][${this.serverName}] Error during disconnect:`, error);
|
||||
}
|
||||
|
||||
this._isConnected = false;
|
||||
this._tools = [];
|
||||
this.transport = null;
|
||||
}
|
||||
|
||||
private async createTransport(): Promise<Transport> {
|
||||
const serverConfig = this.config.server;
|
||||
|
||||
if (!serverConfig.url) {
|
||||
throw new Error('MCP server configuration is missing url');
|
||||
}
|
||||
|
||||
const url = new URL(serverConfig.url);
|
||||
const requestInit: RequestInit = {};
|
||||
|
||||
if (serverConfig.headers) {
|
||||
requestInit.headers = serverConfig.headers;
|
||||
}
|
||||
if (serverConfig.credentials) {
|
||||
requestInit.credentials = serverConfig.credentials;
|
||||
}
|
||||
|
||||
// Try StreamableHTTP first (modern), fall back to SSE (legacy)
|
||||
try {
|
||||
console.log(`[MCP][${this.serverName}] Trying StreamableHTTP transport...`);
|
||||
const transport = new StreamableHTTPClientTransport(url, {
|
||||
requestInit,
|
||||
sessionId: serverConfig.sessionId
|
||||
});
|
||||
return transport;
|
||||
} catch (httpError) {
|
||||
console.warn(
|
||||
`[MCP][${this.serverName}] StreamableHTTP failed, trying SSE transport...`,
|
||||
httpError
|
||||
);
|
||||
|
||||
try {
|
||||
const transport = new SSEClientTransport(url, {
|
||||
requestInit
|
||||
});
|
||||
return transport;
|
||||
} catch (sseError) {
|
||||
const httpMsg = httpError instanceof Error ? httpError.message : String(httpError);
|
||||
const sseMsg = sseError instanceof Error ? sseError.message : String(sseError);
|
||||
throw new Error(`Failed to create transport. StreamableHTTP: ${httpMsg}; SSE: ${sseMsg}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// Tool Discovery
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
private async refreshTools(): Promise<void> {
|
||||
try {
|
||||
const toolsResult = await this.client.listTools();
|
||||
this._tools = toolsResult.tools ?? [];
|
||||
} catch (error) {
|
||||
console.warn(`[MCP][${this.serverName}] Failed to list tools:`, error);
|
||||
this._tools = [];
|
||||
}
|
||||
}
|
||||
|
||||
get tools(): Tool[] {
|
||||
return this._tools;
|
||||
}
|
||||
|
||||
get toolNames(): string[] {
|
||||
return this._tools.map((t) => t.name);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// Tool Execution
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async callTool(params: ToolCallParams, signal?: AbortSignal): Promise<ToolExecutionResult> {
|
||||
if (!this._isConnected) {
|
||||
throw new MCPError(`Server ${this.serverName} is not connected`, -32000);
|
||||
}
|
||||
|
||||
if (signal?.aborted) {
|
||||
throw new DOMException('Aborted', 'AbortError');
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await this.client.callTool(
|
||||
{ name: params.name, arguments: params.arguments },
|
||||
undefined,
|
||||
{ signal }
|
||||
);
|
||||
|
||||
return {
|
||||
content: this.formatToolResult(result as ToolCallResult),
|
||||
isError: (result as ToolCallResult).isError ?? false
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof DOMException && error.name === 'AbortError') {
|
||||
throw error;
|
||||
}
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
throw new MCPError(`Tool execution failed: ${message}`, -32603);
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// State
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
get isConnected(): boolean {
|
||||
return this._isConnected;
|
||||
}
|
||||
|
||||
get lastError(): Error | null {
|
||||
return this._lastError;
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
// Formatting
|
||||
// ─────────────────────────────────────────────────────────────────────────
|
||||
|
||||
private formatToolResult(result: ToolCallResult): string {
|
||||
const content = result.content;
|
||||
if (Array.isArray(content)) {
|
||||
return content
|
||||
.map((item) => this.formatSingleContent(item))
|
||||
.filter(Boolean)
|
||||
.join('\n');
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
private formatSingleContent(content: ToolResultContentItem): string {
|
||||
if (content.type === 'text' && content.text) {
|
||||
return content.text;
|
||||
}
|
||||
if (content.type === 'image' && content.data) {
|
||||
return `data:${content.mimeType ?? 'image/png'};base64,${content.data}`;
|
||||
}
|
||||
if (content.type === 'resource' && content.resource) {
|
||||
const resource = content.resource;
|
||||
if (resource.text) {
|
||||
return resource.text;
|
||||
}
|
||||
if (resource.blob) {
|
||||
return resource.blob;
|
||||
}
|
||||
return JSON.stringify(resource);
|
||||
}
|
||||
// audio type
|
||||
if (content.data && content.mimeType) {
|
||||
return `data:${content.mimeType};base64,${content.data}`;
|
||||
}
|
||||
return JSON.stringify(content);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,15 +1,5 @@
|
|||
import { getAuthHeaders, getJsonHeaders } from '$lib/utils';
|
||||
import { getJsonHeaders } from '$lib/utils';
|
||||
import { AttachmentType } from '$lib/enums';
|
||||
import { config } from '$lib/stores/settings.svelte';
|
||||
import { getAgenticConfig } from '$lib/config/agentic';
|
||||
import { OpenAISseClient, type OpenAISseTurnResult } from '$lib/agentic/openai-sse-client';
|
||||
import type {
|
||||
AgenticChatCompletionRequest,
|
||||
AgenticMessage,
|
||||
AgenticToolCallList
|
||||
} from '$lib/agentic/types';
|
||||
import { toAgenticMessages } from '$lib/agentic/types';
|
||||
import type { MCPToolCall, IMCPClient } from '$lib/mcp';
|
||||
|
||||
/**
|
||||
* ChatService - Low-level API communication layer for Chat Completions
|
||||
|
|
@ -62,8 +52,7 @@ export class ChatService {
|
|||
messages: ApiChatMessageData[] | (DatabaseMessage & { extra?: DatabaseMessageExtra[] })[],
|
||||
options: SettingsChatServiceOptions = {},
|
||||
conversationId?: string,
|
||||
signal?: AbortSignal,
|
||||
mcpClient?: IMCPClient
|
||||
signal?: AbortSignal
|
||||
): Promise<string | void> {
|
||||
const {
|
||||
stream,
|
||||
|
|
@ -184,38 +173,6 @@ export class ChatService {
|
|||
}
|
||||
}
|
||||
|
||||
// MCP agentic orchestration
|
||||
// Run agentic loop if MCP client is provided and agentic mode is enabled
|
||||
const agenticConfig = getAgenticConfig(config());
|
||||
if (stream && agenticConfig.enabled && mcpClient) {
|
||||
console.log('[ChatService] Running agentic loop with MCP client');
|
||||
try {
|
||||
const agenticResult = await ChatService.runAgenticLoop({
|
||||
mcpClient,
|
||||
messages: normalizedMessages,
|
||||
requestBody,
|
||||
agenticConfig,
|
||||
callbacks: {
|
||||
onChunk,
|
||||
onReasoningChunk,
|
||||
onToolCallChunk,
|
||||
onModel,
|
||||
onComplete,
|
||||
onError,
|
||||
onTimings
|
||||
},
|
||||
signal
|
||||
});
|
||||
|
||||
if (agenticResult) {
|
||||
return; // Agentic loop handled the request
|
||||
}
|
||||
// Fall through to standard flow if agentic loop returned false
|
||||
} catch (error) {
|
||||
console.warn('[ChatService] Agentic loop failed, falling back to standard flow:', error);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(`./v1/chat/completions`, {
|
||||
method: 'POST',
|
||||
|
|
@ -824,251 +781,4 @@ export class ChatService {
|
|||
|
||||
onTimingsCallback(timings, promptProgress);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Agentic Orchestration
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Run the agentic orchestration loop with MCP tools.
|
||||
* The MCP client is passed from the store layer - ChatService remains stateless.
|
||||
*
|
||||
* @param params - Parameters for the agentic loop including the MCP client
|
||||
* @returns true if agentic loop handled the request
|
||||
*/
|
||||
private static async runAgenticLoop(params: {
|
||||
mcpClient: IMCPClient;
|
||||
messages: ApiChatMessageData[];
|
||||
requestBody: ApiChatCompletionRequest;
|
||||
agenticConfig: ReturnType<typeof getAgenticConfig>;
|
||||
callbacks: {
|
||||
onChunk?: (chunk: string) => void;
|
||||
onReasoningChunk?: (chunk: string) => void;
|
||||
onToolCallChunk?: (serializedToolCalls: string) => void;
|
||||
onModel?: (model: string) => void;
|
||||
onComplete?: (
|
||||
content: string,
|
||||
reasoningContent?: string,
|
||||
timings?: ChatMessageTimings,
|
||||
toolCalls?: string
|
||||
) => void;
|
||||
onError?: (error: Error) => void;
|
||||
onTimings?: (timings: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => void;
|
||||
};
|
||||
signal?: AbortSignal;
|
||||
}): Promise<boolean> {
|
||||
const { mcpClient, messages, requestBody, agenticConfig, callbacks, signal } = params;
|
||||
const { onChunk, onReasoningChunk, onToolCallChunk, onModel, onComplete, onError, onTimings } =
|
||||
callbacks;
|
||||
|
||||
console.log(`[ChatService] Running agentic loop with ${mcpClient.listTools().length} tools`);
|
||||
|
||||
// Set up LLM client
|
||||
const llmClient = new OpenAISseClient({
|
||||
url: './v1/chat/completions',
|
||||
buildHeaders: () => getAuthHeaders()
|
||||
});
|
||||
|
||||
// Prepare session state
|
||||
const sessionMessages: AgenticMessage[] = toAgenticMessages(messages);
|
||||
const tools = await mcpClient.getToolsDefinition();
|
||||
const allToolCalls: ApiChatCompletionToolCall[] = [];
|
||||
let capturedTimings: ChatMessageTimings | undefined;
|
||||
|
||||
const requestWithoutMessages = { ...requestBody };
|
||||
delete (requestWithoutMessages as Partial<ApiChatCompletionRequest>).messages;
|
||||
const requestBase: AgenticChatCompletionRequest = {
|
||||
...(requestWithoutMessages as Omit<ApiChatCompletionRequest, 'messages'>),
|
||||
stream: true,
|
||||
messages: []
|
||||
};
|
||||
|
||||
const maxTurns = agenticConfig.maxTurns;
|
||||
const maxToolPreviewLines = agenticConfig.maxToolPreviewLines;
|
||||
|
||||
// Run agentic loop
|
||||
for (let turn = 0; turn < maxTurns; turn++) {
|
||||
if (signal?.aborted) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return true;
|
||||
}
|
||||
|
||||
const llmRequest: AgenticChatCompletionRequest = {
|
||||
...requestBase,
|
||||
messages: sessionMessages,
|
||||
tools: tools.length > 0 ? tools : undefined
|
||||
};
|
||||
|
||||
const shouldFilterReasoning = agenticConfig.filterReasoningAfterFirstTurn && turn > 0;
|
||||
|
||||
let turnResult: OpenAISseTurnResult;
|
||||
try {
|
||||
turnResult = await llmClient.stream(
|
||||
llmRequest,
|
||||
{
|
||||
onChunk,
|
||||
onReasoningChunk: shouldFilterReasoning ? undefined : onReasoningChunk,
|
||||
onModel,
|
||||
onFirstValidChunk: undefined,
|
||||
onProcessingUpdate: (timings, progress) => {
|
||||
ChatService.notifyTimings(timings, progress, onTimings);
|
||||
if (timings) capturedTimings = timings;
|
||||
}
|
||||
},
|
||||
signal
|
||||
);
|
||||
} catch (error) {
|
||||
if (signal?.aborted) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return true;
|
||||
}
|
||||
const normalizedError = error instanceof Error ? error : new Error('LLM stream error');
|
||||
onError?.(normalizedError);
|
||||
onChunk?.(`\n\n\`\`\`\nUpstream LLM error:\n${normalizedError.message}\n\`\`\`\n`);
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if we should stop (no tool calls or finish reason isn't tool_calls)
|
||||
if (
|
||||
turnResult.toolCalls.length === 0 ||
|
||||
(turnResult.finishReason && turnResult.finishReason !== 'tool_calls')
|
||||
) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Normalize and validate tool calls
|
||||
const normalizedCalls = ChatService.normalizeToolCalls(turnResult.toolCalls);
|
||||
if (normalizedCalls.length === 0) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Accumulate tool calls
|
||||
for (const call of normalizedCalls) {
|
||||
allToolCalls.push({
|
||||
id: call.id,
|
||||
type: call.type,
|
||||
function: call.function ? { ...call.function } : undefined
|
||||
});
|
||||
}
|
||||
onToolCallChunk?.(JSON.stringify(allToolCalls));
|
||||
|
||||
// Add assistant message with tool calls to session
|
||||
sessionMessages.push({
|
||||
role: 'assistant',
|
||||
content: turnResult.content || undefined,
|
||||
tool_calls: normalizedCalls
|
||||
});
|
||||
|
||||
// Execute each tool call
|
||||
for (const toolCall of normalizedCalls) {
|
||||
if (signal?.aborted) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return true;
|
||||
}
|
||||
|
||||
const mcpCall: MCPToolCall = {
|
||||
id: toolCall.id,
|
||||
function: {
|
||||
name: toolCall.function.name,
|
||||
arguments: toolCall.function.arguments
|
||||
}
|
||||
};
|
||||
|
||||
let result: string;
|
||||
try {
|
||||
result = await mcpClient.execute(mcpCall, signal);
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.name !== 'AbortError') {
|
||||
onError?.(error);
|
||||
}
|
||||
result = `Error: ${error instanceof Error ? error.message : String(error)}`;
|
||||
}
|
||||
|
||||
if (signal?.aborted) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Emit tool preview
|
||||
ChatService.emitToolPreview(toolCall, result, maxToolPreviewLines, onChunk);
|
||||
|
||||
// Add tool result to session (sanitize base64 images for context)
|
||||
const contextValue = ChatService.isBase64Image(result)
|
||||
? '[Image displayed to user]'
|
||||
: result;
|
||||
sessionMessages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
content: contextValue
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Turn limit reached
|
||||
onChunk?.('\n\n```\nTurn limit reached\n```\n');
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize tool calls from LLM response
|
||||
*/
|
||||
private static normalizeToolCalls(toolCalls: ApiChatCompletionToolCall[]): AgenticToolCallList {
|
||||
if (!toolCalls) return [];
|
||||
return toolCalls.map((call, index) => ({
|
||||
id: call?.id ?? `tool_${index}`,
|
||||
type: (call?.type as 'function') ?? 'function',
|
||||
function: {
|
||||
name: call?.function?.name ?? '',
|
||||
arguments: call?.function?.arguments ?? ''
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit tool call preview to the chunk callback
|
||||
*/
|
||||
private static emitToolPreview(
|
||||
toolCall: AgenticToolCallList[number],
|
||||
result: string,
|
||||
maxLines: number,
|
||||
emit?: (chunk: string) => void
|
||||
): void {
|
||||
if (!emit) return;
|
||||
|
||||
const toolName = toolCall.function.name;
|
||||
const toolArgs = toolCall.function.arguments;
|
||||
|
||||
let output = `\n\n<!-- AGENTIC_TOOL_CALL_START -->`;
|
||||
output += `\n<!-- TOOL_NAME: ${toolName} -->`;
|
||||
output += `\n<!-- TOOL_ARGS: ${toolArgs.replace(/\n/g, '\\n')} -->`;
|
||||
|
||||
if (ChatService.isBase64Image(result)) {
|
||||
output += `\n})`;
|
||||
} else {
|
||||
const lines = result.split('\n');
|
||||
const trimmedLines = lines.length > maxLines ? lines.slice(-maxLines) : lines;
|
||||
output += `\n\`\`\`\n${trimmedLines.join('\n')}\n\`\`\``;
|
||||
}
|
||||
|
||||
output += `\n<!-- AGENTIC_TOOL_CALL_END -->\n`;
|
||||
emit(output);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if content is a base64 image
|
||||
*/
|
||||
private static isBase64Image(content: string): boolean {
|
||||
const trimmed = content.trim();
|
||||
if (!trimmed.startsWith('data:image/')) return false;
|
||||
|
||||
const match = trimmed.match(/^data:image\/(png|jpe?g|gif|webp);base64,([A-Za-z0-9+/]+=*)$/);
|
||||
if (!match) return false;
|
||||
|
||||
const base64Payload = match[2];
|
||||
return base64Payload.length > 0 && base64Payload.length % 4 === 0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,6 @@ export const SYNCABLE_PARAMETERS: SyncableParameter[] = [
|
|||
type: 'boolean',
|
||||
canSync: true
|
||||
},
|
||||
{ key: 'showToolCalls', serverKey: 'showToolCalls', type: 'boolean', canSync: true },
|
||||
{
|
||||
key: 'disableReasoningFormat',
|
||||
serverKey: 'disableReasoningFormat',
|
||||
|
|
|
|||
|
|
@ -0,0 +1,473 @@
|
|||
/**
|
||||
* agenticStore - Orchestration of the agentic loop with MCP tools
|
||||
*
|
||||
* This store is responsible for:
|
||||
* - Managing the agentic loop lifecycle
|
||||
* - Coordinating between LLM and MCP tool execution
|
||||
* - Tracking session state (messages, turns, tool calls)
|
||||
* - Emitting streaming content and tool results
|
||||
*
|
||||
* **Architecture & Relationships:**
|
||||
* - **mcpStore**: Provides MCP host manager for tool execution
|
||||
* - **chatStore**: Triggers agentic flow and receives streaming updates
|
||||
* - **OpenAISseClient**: LLM communication for streaming responses
|
||||
* - **settingsStore**: Provides agentic configuration (maxTurns, etc.)
|
||||
*
|
||||
* **Key Features:**
|
||||
* - Stateful session management (unlike stateless ChatService)
|
||||
* - Multi-turn tool call orchestration
|
||||
* - Automatic routing of tool calls to appropriate MCP servers
|
||||
* - Raw LLM output streaming (UI formatting is separate concern)
|
||||
*/
|
||||
|
||||
import { mcpStore } from '$lib/stores/mcp.svelte';
|
||||
import { OpenAISseClient, type OpenAISseTurnResult } from '$lib/agentic/openai-sse-client';
|
||||
import {
|
||||
toAgenticMessages,
|
||||
type AgenticMessage,
|
||||
type AgenticChatCompletionRequest,
|
||||
type AgenticToolCallList
|
||||
} from '$lib/agentic/types';
|
||||
import type { ApiChatCompletionToolCall, ApiChatMessageData } from '$lib/types/api';
|
||||
import type { ChatMessagePromptProgress, ChatMessageTimings } from '$lib/types/chat';
|
||||
import type { MCPToolCall } from '$lib/types/mcp';
|
||||
import type { DatabaseMessage, DatabaseMessageExtra } from '$lib/types/database';
|
||||
import { getAgenticConfig } from '$lib/config/agentic';
|
||||
import { config } from '$lib/stores/settings.svelte';
|
||||
import { getAuthHeaders } from '$lib/utils';
|
||||
import { ChatService } from '$lib/services';
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Types
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
export interface AgenticFlowCallbacks {
|
||||
onChunk?: (chunk: string) => void;
|
||||
onReasoningChunk?: (chunk: string) => void;
|
||||
onToolCallChunk?: (serializedToolCalls: string) => void;
|
||||
onModel?: (model: string) => void;
|
||||
onComplete?: (
|
||||
content: string,
|
||||
reasoningContent?: string,
|
||||
timings?: ChatMessageTimings,
|
||||
toolCalls?: string
|
||||
) => void;
|
||||
onError?: (error: Error) => void;
|
||||
onTimings?: (timings?: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => void;
|
||||
}
|
||||
|
||||
export interface AgenticFlowOptions {
|
||||
stream?: boolean;
|
||||
model?: string;
|
||||
temperature?: number;
|
||||
max_tokens?: number;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface AgenticFlowParams {
|
||||
messages: (ApiChatMessageData | (DatabaseMessage & { extra?: DatabaseMessageExtra[] }))[];
|
||||
options?: AgenticFlowOptions;
|
||||
callbacks: AgenticFlowCallbacks;
|
||||
signal?: AbortSignal;
|
||||
}
|
||||
|
||||
export interface AgenticFlowResult {
|
||||
handled: boolean;
|
||||
error?: Error;
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Agentic Store
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
class AgenticStore {
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// State
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
private _isRunning = $state(false);
|
||||
private _currentTurn = $state(0);
|
||||
private _totalToolCalls = $state(0);
|
||||
private _lastError = $state<Error | null>(null);
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Getters
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
get isRunning(): boolean {
|
||||
return this._isRunning;
|
||||
}
|
||||
|
||||
get currentTurn(): number {
|
||||
return this._currentTurn;
|
||||
}
|
||||
|
||||
get totalToolCalls(): number {
|
||||
return this._totalToolCalls;
|
||||
}
|
||||
|
||||
get lastError(): Error | null {
|
||||
return this._lastError;
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Main Agentic Flow
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Run the agentic orchestration loop with MCP tools.
|
||||
*
|
||||
* This is the main entry point called by chatStore when agentic mode is enabled.
|
||||
* It coordinates:
|
||||
* 1. Initial LLM request with available tools
|
||||
* 2. Tool call detection and execution via MCP
|
||||
* 3. Multi-turn loop until completion or turn limit
|
||||
*
|
||||
* @returns AgenticFlowResult indicating if the flow handled the request
|
||||
*/
|
||||
async runAgenticFlow(params: AgenticFlowParams): Promise<AgenticFlowResult> {
|
||||
const { messages, options = {}, callbacks, signal } = params;
|
||||
const { onChunk, onReasoningChunk, onToolCallChunk, onModel, onComplete, onError, onTimings } =
|
||||
callbacks;
|
||||
|
||||
// Get agentic configuration
|
||||
const agenticConfig = getAgenticConfig(config());
|
||||
if (!agenticConfig.enabled) {
|
||||
return { handled: false };
|
||||
}
|
||||
|
||||
// Ensure MCP is initialized
|
||||
const hostManager = await mcpStore.ensureInitialized();
|
||||
if (!hostManager) {
|
||||
console.log('[AgenticStore] MCP not initialized, falling back to standard chat');
|
||||
return { handled: false };
|
||||
}
|
||||
|
||||
const tools = mcpStore.getToolDefinitions();
|
||||
if (tools.length === 0) {
|
||||
console.log('[AgenticStore] No tools available, falling back to standard chat');
|
||||
return { handled: false };
|
||||
}
|
||||
|
||||
console.log(`[AgenticStore] Starting agentic flow with ${tools.length} tools`);
|
||||
|
||||
// Normalize messages to API format
|
||||
const normalizedMessages: ApiChatMessageData[] = messages
|
||||
.map((msg) => {
|
||||
if ('id' in msg && 'convId' in msg && 'timestamp' in msg) {
|
||||
// DatabaseMessage - use ChatService to convert
|
||||
return ChatService.convertDbMessageToApiChatMessageData(
|
||||
msg as DatabaseMessage & { extra?: DatabaseMessageExtra[] }
|
||||
);
|
||||
}
|
||||
return msg as ApiChatMessageData;
|
||||
})
|
||||
.filter((msg) => {
|
||||
// Filter out empty system messages
|
||||
if (msg.role === 'system') {
|
||||
const content = typeof msg.content === 'string' ? msg.content : '';
|
||||
return content.trim().length > 0;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
// Reset state
|
||||
this._isRunning = true;
|
||||
this._currentTurn = 0;
|
||||
this._totalToolCalls = 0;
|
||||
this._lastError = null;
|
||||
|
||||
try {
|
||||
await this.executeAgenticLoop({
|
||||
messages: normalizedMessages,
|
||||
options,
|
||||
tools,
|
||||
agenticConfig,
|
||||
callbacks: {
|
||||
onChunk,
|
||||
onReasoningChunk,
|
||||
onToolCallChunk,
|
||||
onModel,
|
||||
onComplete,
|
||||
onError,
|
||||
onTimings
|
||||
},
|
||||
signal
|
||||
});
|
||||
return { handled: true };
|
||||
} catch (error) {
|
||||
const normalizedError = error instanceof Error ? error : new Error(String(error));
|
||||
this._lastError = normalizedError;
|
||||
onError?.(normalizedError);
|
||||
return { handled: true, error: normalizedError };
|
||||
} finally {
|
||||
this._isRunning = false;
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Private: Agentic Loop Implementation
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
private async executeAgenticLoop(params: {
|
||||
messages: ApiChatMessageData[];
|
||||
options: AgenticFlowOptions;
|
||||
tools: ReturnType<typeof mcpStore.getToolDefinitions>;
|
||||
agenticConfig: ReturnType<typeof getAgenticConfig>;
|
||||
callbacks: AgenticFlowCallbacks;
|
||||
signal?: AbortSignal;
|
||||
}): Promise<void> {
|
||||
const { messages, options, tools, agenticConfig, callbacks, signal } = params;
|
||||
const { onChunk, onReasoningChunk, onToolCallChunk, onModel, onComplete, onTimings } =
|
||||
callbacks;
|
||||
|
||||
// Set up LLM client
|
||||
const llmClient = new OpenAISseClient({
|
||||
url: './v1/chat/completions',
|
||||
buildHeaders: () => getAuthHeaders()
|
||||
});
|
||||
|
||||
// Prepare session state
|
||||
const sessionMessages: AgenticMessage[] = toAgenticMessages(messages);
|
||||
const allToolCalls: ApiChatCompletionToolCall[] = [];
|
||||
let capturedTimings: ChatMessageTimings | undefined;
|
||||
|
||||
// Build base request from options (messages change per turn)
|
||||
const requestBase: AgenticChatCompletionRequest = {
|
||||
...options,
|
||||
stream: true,
|
||||
messages: []
|
||||
};
|
||||
|
||||
const maxTurns = agenticConfig.maxTurns;
|
||||
const maxToolPreviewLines = agenticConfig.maxToolPreviewLines;
|
||||
|
||||
// Run agentic loop
|
||||
for (let turn = 0; turn < maxTurns; turn++) {
|
||||
this._currentTurn = turn + 1;
|
||||
|
||||
if (signal?.aborted) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return;
|
||||
}
|
||||
|
||||
// Build LLM request for this turn
|
||||
const llmRequest: AgenticChatCompletionRequest = {
|
||||
...requestBase,
|
||||
messages: sessionMessages,
|
||||
tools: tools.length > 0 ? tools : undefined
|
||||
};
|
||||
|
||||
// Filter reasoning content after first turn if configured
|
||||
const shouldFilterReasoning = agenticConfig.filterReasoningAfterFirstTurn && turn > 0;
|
||||
|
||||
// Stream from LLM
|
||||
let turnResult: OpenAISseTurnResult;
|
||||
try {
|
||||
turnResult = await llmClient.stream(
|
||||
llmRequest,
|
||||
{
|
||||
onChunk,
|
||||
onReasoningChunk: shouldFilterReasoning ? undefined : onReasoningChunk,
|
||||
onModel,
|
||||
onFirstValidChunk: undefined,
|
||||
onProcessingUpdate: (timings, progress) => {
|
||||
onTimings?.(timings, progress);
|
||||
if (timings) capturedTimings = timings;
|
||||
}
|
||||
},
|
||||
signal
|
||||
);
|
||||
} catch (error) {
|
||||
if (signal?.aborted) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return;
|
||||
}
|
||||
const normalizedError = error instanceof Error ? error : new Error('LLM stream error');
|
||||
onChunk?.(`\n\n\`\`\`\nUpstream LLM error:\n${normalizedError.message}\n\`\`\`\n`);
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
throw normalizedError;
|
||||
}
|
||||
|
||||
// Check if we should stop (no tool calls or finish reason isn't tool_calls)
|
||||
if (
|
||||
turnResult.toolCalls.length === 0 ||
|
||||
(turnResult.finishReason && turnResult.finishReason !== 'tool_calls')
|
||||
) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return;
|
||||
}
|
||||
|
||||
// Normalize and validate tool calls
|
||||
const normalizedCalls = this.normalizeToolCalls(turnResult.toolCalls);
|
||||
if (normalizedCalls.length === 0) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return;
|
||||
}
|
||||
|
||||
// Accumulate tool calls
|
||||
for (const call of normalizedCalls) {
|
||||
allToolCalls.push({
|
||||
id: call.id,
|
||||
type: call.type,
|
||||
function: call.function ? { ...call.function } : undefined
|
||||
});
|
||||
}
|
||||
this._totalToolCalls = allToolCalls.length;
|
||||
onToolCallChunk?.(JSON.stringify(allToolCalls));
|
||||
|
||||
// Add assistant message with tool calls to session
|
||||
sessionMessages.push({
|
||||
role: 'assistant',
|
||||
content: turnResult.content || undefined,
|
||||
tool_calls: normalizedCalls
|
||||
});
|
||||
|
||||
// Execute each tool call via MCP
|
||||
for (const toolCall of normalizedCalls) {
|
||||
if (signal?.aborted) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return;
|
||||
}
|
||||
|
||||
const mcpCall: MCPToolCall = {
|
||||
id: toolCall.id,
|
||||
function: {
|
||||
name: toolCall.function.name,
|
||||
arguments: toolCall.function.arguments
|
||||
}
|
||||
};
|
||||
|
||||
let result: string;
|
||||
try {
|
||||
const executionResult = await mcpStore.executeTool(mcpCall, signal);
|
||||
result = executionResult.content;
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.name === 'AbortError') {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return;
|
||||
}
|
||||
result = `Error: ${error instanceof Error ? error.message : String(error)}`;
|
||||
}
|
||||
|
||||
if (signal?.aborted) {
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
return;
|
||||
}
|
||||
|
||||
// Emit tool preview (raw output for UI to format later)
|
||||
this.emitToolPreview(toolCall, result, maxToolPreviewLines, onChunk);
|
||||
|
||||
// Add tool result to session (sanitize base64 images for context)
|
||||
const contextValue = this.isBase64Image(result) ? '[Image displayed to user]' : result;
|
||||
sessionMessages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
content: contextValue
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Turn limit reached
|
||||
onChunk?.('\n\n```\nTurn limit reached\n```\n');
|
||||
onComplete?.('', undefined, capturedTimings, undefined);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Private: Helper Methods
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Normalize tool calls from LLM response
|
||||
*/
|
||||
private normalizeToolCalls(toolCalls: ApiChatCompletionToolCall[]): AgenticToolCallList {
|
||||
if (!toolCalls) return [];
|
||||
return toolCalls.map((call, index) => ({
|
||||
id: call?.id ?? `tool_${index}`,
|
||||
type: (call?.type as 'function') ?? 'function',
|
||||
function: {
|
||||
name: call?.function?.name ?? '',
|
||||
arguments: call?.function?.arguments ?? ''
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit tool call preview to the chunk callback.
|
||||
* Output is raw/sterile - UI formatting is a separate concern.
|
||||
*/
|
||||
private emitToolPreview(
|
||||
toolCall: AgenticToolCallList[number],
|
||||
result: string,
|
||||
maxLines: number,
|
||||
emit?: (chunk: string) => void
|
||||
): void {
|
||||
if (!emit) return;
|
||||
|
||||
const toolName = toolCall.function.name;
|
||||
const toolArgs = toolCall.function.arguments;
|
||||
|
||||
let output = `\n\n<!-- AGENTIC_TOOL_CALL_START -->`;
|
||||
output += `\n<!-- TOOL_NAME: ${toolName} -->`;
|
||||
output += `\n<!-- TOOL_ARGS: ${toolArgs.replace(/\n/g, '\\n')} -->`;
|
||||
|
||||
if (this.isBase64Image(result)) {
|
||||
output += `\n})`;
|
||||
} else {
|
||||
const lines = result.split('\n');
|
||||
const trimmedLines = lines.length > maxLines ? lines.slice(-maxLines) : lines;
|
||||
output += `\n\`\`\`\n${trimmedLines.join('\n')}\n\`\`\``;
|
||||
}
|
||||
|
||||
output += `\n<!-- AGENTIC_TOOL_CALL_END -->\n`;
|
||||
emit(output);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if content is a base64 image
|
||||
*/
|
||||
private isBase64Image(content: string): boolean {
|
||||
const trimmed = content.trim();
|
||||
if (!trimmed.startsWith('data:image/')) return false;
|
||||
|
||||
const match = trimmed.match(/^data:image\/(png|jpe?g|gif|webp);base64,([A-Za-z0-9+/]+=*)$/);
|
||||
if (!match) return false;
|
||||
|
||||
const base64Payload = match[2];
|
||||
return base64Payload.length > 0 && base64Payload.length % 4 === 0;
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Utilities
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Clear error state
|
||||
*/
|
||||
clearError(): void {
|
||||
this._lastError = null;
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Singleton Instance & Exports
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
export const agenticStore = new AgenticStore();
|
||||
|
||||
// Reactive exports for components
|
||||
export function agenticIsRunning() {
|
||||
return agenticStore.isRunning;
|
||||
}
|
||||
|
||||
export function agenticCurrentTurn() {
|
||||
return agenticStore.currentTurn;
|
||||
}
|
||||
|
||||
export function agenticTotalToolCalls() {
|
||||
return agenticStore.totalToolCalls;
|
||||
}
|
||||
|
||||
export function agenticLastError() {
|
||||
return agenticStore.lastError;
|
||||
}
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import { DatabaseService, ChatService } from '$lib/services';
|
||||
import { conversationsStore } from '$lib/stores/conversations.svelte';
|
||||
import { config } from '$lib/stores/settings.svelte';
|
||||
import { mcpStore } from '$lib/stores/mcp.svelte';
|
||||
import { agenticStore } from '$lib/stores/agentic.svelte';
|
||||
import { contextSize, isRouterMode } from '$lib/stores/server.svelte';
|
||||
import {
|
||||
selectedModelName,
|
||||
|
|
@ -519,130 +519,150 @@ class ChatStore {
|
|||
|
||||
const abortController = this.getOrCreateAbortController(assistantMessage.convId);
|
||||
|
||||
// Get MCP client if agentic mode is enabled (store layer responsibility)
|
||||
const agenticConfig = getAgenticConfig(config());
|
||||
const mcpClient = agenticConfig.enabled ? await mcpStore.ensureClient() : undefined;
|
||||
// Build common callbacks for both agentic and standard flows
|
||||
const streamCallbacks = {
|
||||
onChunk: (chunk: string) => {
|
||||
streamedContent += chunk;
|
||||
this.setChatStreaming(assistantMessage.convId, streamedContent, assistantMessage.id);
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
conversationsStore.updateMessageAtIndex(idx, { content: streamedContent });
|
||||
},
|
||||
onReasoningChunk: (reasoningChunk: string) => {
|
||||
streamedReasoningContent += reasoningChunk;
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
conversationsStore.updateMessageAtIndex(idx, { thinking: streamedReasoningContent });
|
||||
},
|
||||
onToolCallChunk: (toolCallChunk: string) => {
|
||||
const chunk = toolCallChunk.trim();
|
||||
if (!chunk) return;
|
||||
streamedToolCallContent = chunk;
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
conversationsStore.updateMessageAtIndex(idx, { toolCalls: streamedToolCallContent });
|
||||
},
|
||||
onModel: (modelName: string) => recordModel(modelName),
|
||||
onTimings: (timings?: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => {
|
||||
const tokensPerSecond =
|
||||
timings?.predicted_ms && timings?.predicted_n
|
||||
? (timings.predicted_n / timings.predicted_ms) * 1000
|
||||
: 0;
|
||||
this.updateProcessingStateFromTimings(
|
||||
{
|
||||
prompt_n: timings?.prompt_n || 0,
|
||||
prompt_ms: timings?.prompt_ms,
|
||||
predicted_n: timings?.predicted_n || 0,
|
||||
predicted_per_second: tokensPerSecond,
|
||||
cache_n: timings?.cache_n || 0,
|
||||
prompt_progress: promptProgress
|
||||
},
|
||||
assistantMessage.convId
|
||||
);
|
||||
},
|
||||
onComplete: async (
|
||||
finalContent?: string,
|
||||
reasoningContent?: string,
|
||||
timings?: ChatMessageTimings,
|
||||
toolCallContent?: string
|
||||
) => {
|
||||
this.stopStreaming();
|
||||
|
||||
const updateData: Record<string, unknown> = {
|
||||
content: finalContent || streamedContent,
|
||||
thinking: reasoningContent || streamedReasoningContent,
|
||||
toolCalls: toolCallContent || streamedToolCallContent,
|
||||
timings
|
||||
};
|
||||
if (resolvedModel && !modelPersisted) {
|
||||
updateData.model = resolvedModel;
|
||||
}
|
||||
await DatabaseService.updateMessage(assistantMessage.id, updateData);
|
||||
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
const uiUpdate: Partial<DatabaseMessage> = {
|
||||
content: updateData.content as string,
|
||||
toolCalls: updateData.toolCalls as string
|
||||
};
|
||||
if (timings) uiUpdate.timings = timings;
|
||||
if (resolvedModel) uiUpdate.model = resolvedModel;
|
||||
|
||||
conversationsStore.updateMessageAtIndex(idx, uiUpdate);
|
||||
await conversationsStore.updateCurrentNode(assistantMessage.id);
|
||||
|
||||
if (onComplete) await onComplete(streamedContent);
|
||||
this.setChatLoading(assistantMessage.convId, false);
|
||||
this.clearChatStreaming(assistantMessage.convId);
|
||||
this.clearProcessingState(assistantMessage.convId);
|
||||
|
||||
if (isRouterMode()) {
|
||||
modelsStore.fetchRouterModels().catch(console.error);
|
||||
}
|
||||
},
|
||||
onError: (error: Error) => {
|
||||
this.stopStreaming();
|
||||
|
||||
if (this.isAbortError(error)) {
|
||||
this.setChatLoading(assistantMessage.convId, false);
|
||||
this.clearChatStreaming(assistantMessage.convId);
|
||||
this.clearProcessingState(assistantMessage.convId);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
console.error('Streaming error:', error);
|
||||
|
||||
this.setChatLoading(assistantMessage.convId, false);
|
||||
this.clearChatStreaming(assistantMessage.convId);
|
||||
this.clearProcessingState(assistantMessage.convId);
|
||||
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
|
||||
if (idx !== -1) {
|
||||
const failedMessage = conversationsStore.removeMessageAtIndex(idx);
|
||||
if (failedMessage) DatabaseService.deleteMessage(failedMessage.id).catch(console.error);
|
||||
}
|
||||
|
||||
const contextInfo = (
|
||||
error as Error & { contextInfo?: { n_prompt_tokens: number; n_ctx: number } }
|
||||
).contextInfo;
|
||||
|
||||
this.showErrorDialog(
|
||||
error.name === 'TimeoutError' ? 'timeout' : 'server',
|
||||
error.message,
|
||||
contextInfo
|
||||
);
|
||||
|
||||
if (onError) onError(error);
|
||||
}
|
||||
};
|
||||
|
||||
// Try agentic flow first if enabled
|
||||
const agenticConfig = getAgenticConfig(config());
|
||||
if (agenticConfig.enabled) {
|
||||
const agenticResult = await agenticStore.runAgenticFlow({
|
||||
messages: allMessages,
|
||||
options: {
|
||||
...this.getApiOptions(),
|
||||
...(modelOverride ? { model: modelOverride } : {})
|
||||
},
|
||||
callbacks: streamCallbacks,
|
||||
signal: abortController.signal
|
||||
});
|
||||
|
||||
if (agenticResult.handled) {
|
||||
return; // Agentic flow handled the request
|
||||
}
|
||||
// Fall through to standard ChatService if not handled
|
||||
}
|
||||
|
||||
// Standard ChatService flow
|
||||
await ChatService.sendMessage(
|
||||
allMessages,
|
||||
{
|
||||
...this.getApiOptions(),
|
||||
...(modelOverride ? { model: modelOverride } : {}),
|
||||
onChunk: (chunk: string) => {
|
||||
streamedContent += chunk;
|
||||
this.setChatStreaming(assistantMessage.convId, streamedContent, assistantMessage.id);
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
conversationsStore.updateMessageAtIndex(idx, { content: streamedContent });
|
||||
},
|
||||
onReasoningChunk: (reasoningChunk: string) => {
|
||||
streamedReasoningContent += reasoningChunk;
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
conversationsStore.updateMessageAtIndex(idx, { thinking: streamedReasoningContent });
|
||||
},
|
||||
onToolCallChunk: (toolCallChunk: string) => {
|
||||
const chunk = toolCallChunk.trim();
|
||||
if (!chunk) return;
|
||||
streamedToolCallContent = chunk;
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
conversationsStore.updateMessageAtIndex(idx, { toolCalls: streamedToolCallContent });
|
||||
},
|
||||
onModel: (modelName: string) => recordModel(modelName),
|
||||
onTimings: (timings?: ChatMessageTimings, promptProgress?: ChatMessagePromptProgress) => {
|
||||
const tokensPerSecond =
|
||||
timings?.predicted_ms && timings?.predicted_n
|
||||
? (timings.predicted_n / timings.predicted_ms) * 1000
|
||||
: 0;
|
||||
this.updateProcessingStateFromTimings(
|
||||
{
|
||||
prompt_n: timings?.prompt_n || 0,
|
||||
prompt_ms: timings?.prompt_ms,
|
||||
predicted_n: timings?.predicted_n || 0,
|
||||
predicted_per_second: tokensPerSecond,
|
||||
cache_n: timings?.cache_n || 0,
|
||||
prompt_progress: promptProgress
|
||||
},
|
||||
assistantMessage.convId
|
||||
);
|
||||
},
|
||||
onComplete: async (
|
||||
finalContent?: string,
|
||||
reasoningContent?: string,
|
||||
timings?: ChatMessageTimings,
|
||||
toolCallContent?: string
|
||||
) => {
|
||||
this.stopStreaming();
|
||||
|
||||
const updateData: Record<string, unknown> = {
|
||||
content: finalContent || streamedContent,
|
||||
thinking: reasoningContent || streamedReasoningContent,
|
||||
toolCalls: toolCallContent || streamedToolCallContent,
|
||||
timings
|
||||
};
|
||||
if (resolvedModel && !modelPersisted) {
|
||||
updateData.model = resolvedModel;
|
||||
}
|
||||
await DatabaseService.updateMessage(assistantMessage.id, updateData);
|
||||
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
const uiUpdate: Partial<DatabaseMessage> = {
|
||||
content: updateData.content as string,
|
||||
toolCalls: updateData.toolCalls as string
|
||||
};
|
||||
if (timings) uiUpdate.timings = timings;
|
||||
if (resolvedModel) uiUpdate.model = resolvedModel;
|
||||
|
||||
conversationsStore.updateMessageAtIndex(idx, uiUpdate);
|
||||
await conversationsStore.updateCurrentNode(assistantMessage.id);
|
||||
|
||||
if (onComplete) await onComplete(streamedContent);
|
||||
this.setChatLoading(assistantMessage.convId, false);
|
||||
this.clearChatStreaming(assistantMessage.convId);
|
||||
this.clearProcessingState(assistantMessage.convId);
|
||||
|
||||
if (isRouterMode()) {
|
||||
modelsStore.fetchRouterModels().catch(console.error);
|
||||
}
|
||||
},
|
||||
onError: (error: Error) => {
|
||||
this.stopStreaming();
|
||||
|
||||
if (this.isAbortError(error)) {
|
||||
this.setChatLoading(assistantMessage.convId, false);
|
||||
this.clearChatStreaming(assistantMessage.convId);
|
||||
this.clearProcessingState(assistantMessage.convId);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
console.error('Streaming error:', error);
|
||||
|
||||
this.setChatLoading(assistantMessage.convId, false);
|
||||
this.clearChatStreaming(assistantMessage.convId);
|
||||
this.clearProcessingState(assistantMessage.convId);
|
||||
|
||||
const idx = conversationsStore.findMessageIndex(assistantMessage.id);
|
||||
|
||||
if (idx !== -1) {
|
||||
const failedMessage = conversationsStore.removeMessageAtIndex(idx);
|
||||
if (failedMessage) DatabaseService.deleteMessage(failedMessage.id).catch(console.error);
|
||||
}
|
||||
|
||||
const contextInfo = (
|
||||
error as Error & { contextInfo?: { n_prompt_tokens: number; n_ctx: number } }
|
||||
).contextInfo;
|
||||
|
||||
this.showErrorDialog(
|
||||
error.name === 'TimeoutError' ? 'timeout' : 'server',
|
||||
error.message,
|
||||
contextInfo
|
||||
);
|
||||
|
||||
if (onError) onError(error);
|
||||
}
|
||||
...streamCallbacks
|
||||
},
|
||||
assistantMessage.convId,
|
||||
abortController.signal,
|
||||
mcpClient ?? undefined
|
||||
abortController.signal
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -1127,8 +1147,7 @@ class ChatStore {
|
|||
}
|
||||
},
|
||||
msg.convId,
|
||||
abortController.signal,
|
||||
undefined // No MCP for continue generation
|
||||
abortController.signal
|
||||
);
|
||||
} catch (error) {
|
||||
if (!this.isAbortError(error)) console.error('Failed to continue message:', error);
|
||||
|
|
|
|||
|
|
@ -1,26 +1,36 @@
|
|||
import { browser } from '$app/environment';
|
||||
import { MCPClient, type IMCPClient } from '$lib/mcp';
|
||||
import {
|
||||
MCPHostManager,
|
||||
type OpenAIToolDefinition,
|
||||
type ServerStatus
|
||||
} from '$lib/mcp/host-manager';
|
||||
import type { ToolExecutionResult } from '$lib/mcp/server-connection';
|
||||
import { buildMcpClientConfig } from '$lib/config/mcp';
|
||||
import { config } from '$lib/stores/settings.svelte';
|
||||
import type { MCPToolCall } from '$lib/types/mcp';
|
||||
import { DEFAULT_MCP_CONFIG } from '$lib/constants/mcp';
|
||||
|
||||
/**
|
||||
* mcpStore - Reactive store for MCP (Model Context Protocol) client management
|
||||
* mcpStore - Reactive store for MCP (Model Context Protocol) host management
|
||||
*
|
||||
* This store manages:
|
||||
* - MCP client lifecycle (initialization, shutdown)
|
||||
* - Connection state tracking
|
||||
* - Available tools from connected MCP servers
|
||||
* - MCPHostManager lifecycle (initialization, shutdown)
|
||||
* - Connection state tracking for multiple MCP servers
|
||||
* - Aggregated tools from all connected MCP servers
|
||||
* - Error handling for MCP operations
|
||||
*
|
||||
* **Architecture & Relationships:**
|
||||
* - **MCPClient**: SDK-based client wrapper for MCP server communication
|
||||
* - **mcpStore** (this class): Reactive store for MCP state
|
||||
* - **ChatService**: Uses mcpStore for agentic orchestration
|
||||
* - **MCPHostManager**: Coordinates multiple MCPServerConnection instances
|
||||
* - **MCPServerConnection**: Single SDK Client wrapper per server
|
||||
* - **mcpStore** (this class): Reactive Svelte store for MCP state
|
||||
* - **agenticStore**: Uses mcpStore for tool execution in agentic loop
|
||||
* - **settingsStore**: Provides MCP server configuration
|
||||
*
|
||||
* **Key Features:**
|
||||
* - Reactive state with Svelte 5 runes ($state, $derived)
|
||||
* - Automatic reinitialization on config changes
|
||||
* - Aggregates tools from multiple servers
|
||||
* - Routes tool calls to appropriate server automatically
|
||||
* - Graceful error handling with fallback to standard chat
|
||||
*/
|
||||
class MCPStore {
|
||||
|
|
@ -28,18 +38,18 @@ class MCPStore {
|
|||
// State
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
private _client = $state<MCPClient | null>(null);
|
||||
private _hostManager = $state<MCPHostManager | null>(null);
|
||||
private _isInitializing = $state(false);
|
||||
private _error = $state<string | null>(null);
|
||||
private _configSignature = $state<string | null>(null);
|
||||
private _initPromise: Promise<MCPClient | undefined> | null = null;
|
||||
private _initPromise: Promise<MCPHostManager | undefined> | null = null;
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Computed Getters
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
get client(): IMCPClient | null {
|
||||
return this._client;
|
||||
get hostManager(): MCPHostManager | null {
|
||||
return this._hostManager;
|
||||
}
|
||||
|
||||
get isInitializing(): boolean {
|
||||
|
|
@ -47,7 +57,7 @@ class MCPStore {
|
|||
}
|
||||
|
||||
get isInitialized(): boolean {
|
||||
return this._client !== null;
|
||||
return this._hostManager?.isInitialized ?? false;
|
||||
}
|
||||
|
||||
get error(): string | null {
|
||||
|
|
@ -65,23 +75,45 @@ class MCPStore {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get list of available tool names
|
||||
* Get list of available tool names (aggregated from all servers)
|
||||
*/
|
||||
get availableTools(): string[] {
|
||||
return this._client?.listTools() ?? [];
|
||||
return this._hostManager?.getToolNames() ?? [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tool definitions for LLM
|
||||
* Get number of connected servers
|
||||
*/
|
||||
async getToolDefinitions(): Promise<
|
||||
{
|
||||
type: 'function';
|
||||
function: { name: string; description?: string; parameters: Record<string, unknown> };
|
||||
}[]
|
||||
> {
|
||||
if (!this._client) return [];
|
||||
return this._client.getToolsDefinition();
|
||||
get connectedServerCount(): number {
|
||||
return this._hostManager?.connectedServerCount ?? 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get names of connected servers
|
||||
*/
|
||||
get connectedServerNames(): string[] {
|
||||
return this._hostManager?.connectedServerNames ?? [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get total tool count
|
||||
*/
|
||||
get toolCount(): number {
|
||||
return this._hostManager?.toolCount ?? 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tool definitions for LLM (OpenAI function calling format)
|
||||
*/
|
||||
getToolDefinitions(): OpenAIToolDefinition[] {
|
||||
return this._hostManager?.getToolDefinitionsForLLM() ?? [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get status of all servers
|
||||
*/
|
||||
getServersStatus(): ServerStatus[] {
|
||||
return this._hostManager?.getServersStatus() ?? [];
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
|
@ -89,11 +121,11 @@ class MCPStore {
|
|||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Ensure MCP client is initialized with current config.
|
||||
* Returns the client if successful, undefined otherwise.
|
||||
* Ensure MCP host manager is initialized with current config.
|
||||
* Returns the host manager if successful, undefined otherwise.
|
||||
* Handles config changes by reinitializing as needed.
|
||||
*/
|
||||
async ensureClient(): Promise<IMCPClient | undefined> {
|
||||
async ensureInitialized(): Promise<MCPHostManager | undefined> {
|
||||
if (!browser) return undefined;
|
||||
|
||||
const mcpConfig = buildMcpClientConfig(config());
|
||||
|
|
@ -106,8 +138,8 @@ class MCPStore {
|
|||
}
|
||||
|
||||
// Already initialized with correct config
|
||||
if (this._client && this._configSignature === signature) {
|
||||
return this._client;
|
||||
if (this._hostManager?.isInitialized && this._configSignature === signature) {
|
||||
return this._hostManager;
|
||||
}
|
||||
|
||||
// Init in progress with correct config - wait for it
|
||||
|
|
@ -115,55 +147,63 @@ class MCPStore {
|
|||
return this._initPromise;
|
||||
}
|
||||
|
||||
// Config changed or first init - shutdown old client first
|
||||
if (this._client || this._initPromise) {
|
||||
// Config changed or first init - shutdown old manager first
|
||||
if (this._hostManager || this._initPromise) {
|
||||
await this.shutdown();
|
||||
}
|
||||
|
||||
// Initialize new client
|
||||
// Initialize new host manager
|
||||
return this.initialize(signature, mcpConfig!);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize MCP client with given config
|
||||
* Initialize MCP host manager with given config
|
||||
*/
|
||||
private async initialize(
|
||||
signature: string,
|
||||
mcpConfig: ReturnType<typeof buildMcpClientConfig>
|
||||
): Promise<MCPClient | undefined> {
|
||||
if (!mcpConfig) return undefined;
|
||||
|
||||
mcpConfig: NonNullable<ReturnType<typeof buildMcpClientConfig>>
|
||||
): Promise<MCPHostManager | undefined> {
|
||||
this._isInitializing = true;
|
||||
this._error = null;
|
||||
this._configSignature = signature;
|
||||
|
||||
const client = new MCPClient(mcpConfig);
|
||||
const hostManager = new MCPHostManager();
|
||||
|
||||
this._initPromise = client
|
||||
.initialize()
|
||||
this._initPromise = hostManager
|
||||
.initialize({
|
||||
servers: mcpConfig.servers,
|
||||
clientInfo: mcpConfig.clientInfo ?? DEFAULT_MCP_CONFIG.clientInfo,
|
||||
capabilities: mcpConfig.capabilities ?? DEFAULT_MCP_CONFIG.capabilities
|
||||
})
|
||||
.then(() => {
|
||||
// Check if config changed during initialization
|
||||
if (this._configSignature !== signature) {
|
||||
void client.shutdown().catch((err) => {
|
||||
console.error('[MCP Store] Failed to shutdown stale client:', err);
|
||||
void hostManager.shutdown().catch((err) => {
|
||||
console.error('[MCP Store] Failed to shutdown stale host manager:', err);
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
|
||||
this._client = client;
|
||||
this._hostManager = hostManager;
|
||||
this._isInitializing = false;
|
||||
|
||||
const toolNames = hostManager.getToolNames();
|
||||
const serverNames = hostManager.connectedServerNames;
|
||||
|
||||
console.log(
|
||||
`[MCP Store] Initialized with ${client.listTools().length} tools:`,
|
||||
client.listTools()
|
||||
`[MCP Store] Initialized: ${serverNames.length} servers, ${toolNames.length} tools`
|
||||
);
|
||||
return client;
|
||||
console.log(`[MCP Store] Servers: ${serverNames.join(', ')}`);
|
||||
console.log(`[MCP Store] Tools: ${toolNames.join(', ')}`);
|
||||
|
||||
return hostManager;
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('[MCP Store] Initialization failed:', error);
|
||||
this._error = error instanceof Error ? error.message : String(error);
|
||||
this._isInitializing = false;
|
||||
|
||||
void client.shutdown().catch((err) => {
|
||||
void hostManager.shutdown().catch((err) => {
|
||||
console.error('[MCP Store] Failed to shutdown after error:', err);
|
||||
});
|
||||
|
||||
|
|
@ -179,7 +219,7 @@ class MCPStore {
|
|||
}
|
||||
|
||||
/**
|
||||
* Shutdown MCP client and clear state
|
||||
* Shutdown MCP host manager and clear state
|
||||
*/
|
||||
async shutdown(): Promise<void> {
|
||||
// Wait for any pending initialization
|
||||
|
|
@ -188,15 +228,15 @@ class MCPStore {
|
|||
this._initPromise = null;
|
||||
}
|
||||
|
||||
if (this._client) {
|
||||
const clientToShutdown = this._client;
|
||||
this._client = null;
|
||||
if (this._hostManager) {
|
||||
const managerToShutdown = this._hostManager;
|
||||
this._hostManager = null;
|
||||
this._configSignature = null;
|
||||
this._error = null;
|
||||
|
||||
try {
|
||||
await clientToShutdown.shutdown();
|
||||
console.log('[MCP Store] Client shutdown complete');
|
||||
await managerToShutdown.shutdown();
|
||||
console.log('[MCP Store] Host manager shutdown complete');
|
||||
} catch (error) {
|
||||
console.error('[MCP Store] Shutdown error:', error);
|
||||
}
|
||||
|
|
@ -208,16 +248,43 @@ class MCPStore {
|
|||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Execute a tool call via MCP client
|
||||
* Execute a tool call via MCP host manager.
|
||||
* Automatically routes to the appropriate server.
|
||||
*/
|
||||
async execute(
|
||||
toolCall: { id: string; function: { name: string; arguments: string } },
|
||||
abortSignal?: AbortSignal
|
||||
): Promise<string> {
|
||||
if (!this._client) {
|
||||
throw new Error('MCP client not initialized');
|
||||
async executeTool(toolCall: MCPToolCall, signal?: AbortSignal): Promise<ToolExecutionResult> {
|
||||
if (!this._hostManager) {
|
||||
throw new Error('MCP host manager not initialized');
|
||||
}
|
||||
return this._client.execute(toolCall, abortSignal);
|
||||
return this._hostManager.executeTool(toolCall, signal);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a tool by name with arguments.
|
||||
* Simpler interface for direct tool calls.
|
||||
*/
|
||||
async executeToolByName(
|
||||
toolName: string,
|
||||
args: Record<string, unknown>,
|
||||
signal?: AbortSignal
|
||||
): Promise<ToolExecutionResult> {
|
||||
if (!this._hostManager) {
|
||||
throw new Error('MCP host manager not initialized');
|
||||
}
|
||||
return this._hostManager.executeToolByName(toolName, args, signal);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool exists
|
||||
*/
|
||||
hasTool(toolName: string): boolean {
|
||||
return this._hostManager?.hasTool(toolName) ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get which server provides a specific tool
|
||||
*/
|
||||
getToolServer(toolName: string): string | undefined {
|
||||
return this._hostManager?.getToolServer(toolName);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
|
|
@ -239,8 +306,8 @@ class MCPStore {
|
|||
export const mcpStore = new MCPStore();
|
||||
|
||||
// Reactive exports for components
|
||||
export function mcpClient() {
|
||||
return mcpStore.client;
|
||||
export function mcpHostManager() {
|
||||
return mcpStore.hostManager;
|
||||
}
|
||||
|
||||
export function mcpIsInitializing() {
|
||||
|
|
@ -262,3 +329,15 @@ export function mcpIsEnabled() {
|
|||
export function mcpAvailableTools() {
|
||||
return mcpStore.availableTools;
|
||||
}
|
||||
|
||||
export function mcpConnectedServerCount() {
|
||||
return mcpStore.connectedServerCount;
|
||||
}
|
||||
|
||||
export function mcpConnectedServerNames() {
|
||||
return mcpStore.connectedServerNames;
|
||||
}
|
||||
|
||||
export function mcpToolCount() {
|
||||
return mcpStore.toolCount;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,20 +76,3 @@ export type MCPServerSettingsEntry = {
|
|||
url: string;
|
||||
requestTimeoutSeconds: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Interface defining the public API for MCP clients.
|
||||
* Both MCPClient (custom) and MCPClientSDK (official SDK) implement this interface.
|
||||
*/
|
||||
export interface IMCPClient {
|
||||
initialize(): Promise<void>;
|
||||
shutdown(): Promise<void>;
|
||||
listTools(): string[];
|
||||
getToolsDefinition(): Promise<
|
||||
{
|
||||
type: 'function';
|
||||
function: { name: string; description?: string; parameters: Record<string, unknown> };
|
||||
}[]
|
||||
>;
|
||||
execute(toolCall: MCPToolCall, abortSignal?: AbortSignal): Promise<string>;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue