From 95bb5e98148f44b4c561c98e50d7e98a09c9d09f Mon Sep 17 00:00:00 2001 From: imsherr Date: Fri, 20 Mar 2026 17:37:21 -0400 Subject: [PATCH 01/11] fix(ai, ai-anthropic): thinking blocks missing on turn 2+ in tool loops - Track thinking per-step via stepId instead of merging into single ThinkingPart - Capture Anthropic signature_delta and preserve through the full stack - Server-side TextEngine accumulates thinking + signatures per iteration - Include thinking blocks in Anthropic message history for multi-turn context - Add interleaved-thinking-2025-05-14 beta header when thinking is enabled - Add tests for multi-step thinking, backward compat, and result aggregation Closes TanStack/ai#340 --- .../ai-anthropic/src/adapters/text.ts | 37 +++++- .../typescript/ai-client/src/chat-client.ts | 2 +- .../ai/src/activities/chat/index.ts | 42 ++++++- .../ai/src/activities/chat/messages.ts | 13 +- .../chat/stream/message-updaters.ts | 13 +- .../src/activities/chat/stream/processor.ts | 115 +++++++++++++++--- .../ai/src/activities/chat/stream/types.ts | 5 +- packages/typescript/ai/src/types.ts | 5 + .../ai/tests/message-updaters.test.ts | 38 ++++-- .../ai/tests/stream-processor.test.ts | 74 ++++++++++- 10 files changed, 304 insertions(+), 40 deletions(-) diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index fdad0fc60..5add53d8d 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -331,6 +331,9 @@ export class AnthropicTextAdapter< : undefined, tools: tools, ...validProviderOptions, + ...(thinkingBudget && { + betas: ['interleaved-thinking-2025-05-14'] as any, + }), } validateTextProviderOptions(requestParams) return requestParams @@ -431,6 +434,18 @@ export class AnthropicTextAdapter< if (role === 'assistant' && message.toolCalls?.length) { const contentBlocks: AnthropicContentBlocks = [] + if (message.thinking?.length) { + for (const thinking of message.thinking) { + if (thinking.signature) { + contentBlocks.push({ + type: 'thinking', + thinking: thinking.content, + signature: thinking.signature, + } as unknown as AnthropicContentBlock) + } + } + } + if (message.content) { const content = typeof message.content === 'string' ? message.content : '' @@ -572,6 +587,7 @@ export class AnthropicTextAdapter< const model = options.model let accumulatedContent = '' let accumulatedThinking = '' + let accumulatedSignature = '' const timestamp = Date.now() const toolCallsMap = new Map< number, @@ -621,6 +637,7 @@ export class AnthropicTextAdapter< }) } else if (event.content_block.type === 'thinking') { accumulatedThinking = '' + accumulatedSignature = '' // Emit REASONING and STEP_STARTED for thinking stepId = genId() reasoningMessageId = genId() @@ -714,6 +731,11 @@ export class AnthropicTextAdapter< delta, content: accumulatedThinking, }) + } else if ( + (event.delta as { type: string }).type === 'signature_delta' + ) { + accumulatedSignature += + (event.delta as { signature: string }).signature || '' } else if (event.delta.type === 'input_json_delta') { const existing = toolCallsMap.get(currentToolIndex) if (existing) { @@ -744,7 +766,20 @@ export class AnthropicTextAdapter< } } } else if (event.type === 'content_block_stop') { - if (currentBlockType === 'tool_use') { + if (currentBlockType === 'thinking') { + // Emit signature so it can be replayed in multi-turn context + if (accumulatedSignature && stepId) { + yield asChunk({ + type: 'STEP_FINISHED', + stepId, + model, + timestamp, + delta: '', + content: accumulatedThinking, + signature: accumulatedSignature, + }) + } + } else if (currentBlockType === 'tool_use') { const existing = toolCallsMap.get(currentToolIndex) if (existing) { // If tool call wasn't started yet (no args), start it now diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index ab9d07dff..ac8a35b7b 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -154,7 +154,7 @@ export class ChatClient { this.events.textUpdated(this.currentStreamId, messageId, content) } }, - onThinkingUpdate: (messageId: string, content: string) => { + onThinkingUpdate: (messageId: string, _stepId: string, content: string) => { // Emit thinking update to devtools if (this.currentStreamId) { this.events.thinkingUpdated( diff --git a/packages/typescript/ai/src/activities/chat/index.ts b/packages/typescript/ai/src/activities/chat/index.ts index e1327fdb5..ac891c428 100644 --- a/packages/typescript/ai/src/activities/chat/index.ts +++ b/packages/typescript/ai/src/activities/chat/index.ts @@ -281,6 +281,9 @@ class TextEngine< private totalChunkCount = 0 private currentMessageId: string | null = null private accumulatedContent = '' + private accumulatedThinking: Array<{ content: string; signature?: string }> = [] + private currentThinkingContent = '' + private currentThinkingSignature = '' private eventOptions?: Record private eventToolNames?: Array private finishedEvent: RunFinishedEvent | null = null @@ -556,6 +559,9 @@ class TextEngine< private async beginIteration(): Promise { this.currentMessageId = this.createId('msg') this.accumulatedContent = '' + this.accumulatedThinking = [] + this.currentThinkingContent = '' + this.currentThinkingSignature = '' this.finishedEvent = null // Update mutable context fields @@ -666,6 +672,9 @@ class TextEngine< case 'RUN_ERROR': this.handleRunErrorEvent(chunk) break + case 'STEP_STARTED': + this.handleStepStartedEvent() + break case 'STEP_FINISHED': this.handleStepFinishedEvent(chunk) break @@ -683,7 +692,7 @@ class TextEngine< break default: - // RUN_STARTED, TEXT_MESSAGE_START, TEXT_MESSAGE_END, STEP_STARTED, + // RUN_STARTED, TEXT_MESSAGE_START, TEXT_MESSAGE_END, // STATE_SNAPSHOT, STATE_DELTA, CUSTOM // - no special handling needed in chat activity break @@ -726,10 +735,32 @@ class TextEngine< this.earlyTermination = true } + private finalizeCurrentThinkingStep(): void { + if (this.currentThinkingContent) { + this.accumulatedThinking.push({ + content: this.currentThinkingContent, + ...(this.currentThinkingSignature && { + signature: this.currentThinkingSignature, + }), + }) + this.currentThinkingContent = '' + this.currentThinkingSignature = '' + } + } + + private handleStepStartedEvent(): void { + this.finalizeCurrentThinkingStep() + } + private handleStepFinishedEvent( - _chunk: Extract, + chunk: Extract, ): void { - // State tracking for STEP_FINISHED is handled by middleware + if (chunk.delta) { + this.currentThinkingContent += chunk.delta + } + if (chunk.signature) { + this.currentThinkingSignature = chunk.signature + } } private async *checkForPendingToolCalls(): AsyncGenerator< @@ -1057,12 +1088,17 @@ class TextEngine< } private addAssistantToolCallMessage(toolCalls: Array): void { + this.finalizeCurrentThinkingStep() + this.messages = [ ...this.messages, { role: 'assistant', content: this.accumulatedContent || null, toolCalls, + ...(this.accumulatedThinking.length > 0 && { + thinking: this.accumulatedThinking, + }), }, ] } diff --git a/packages/typescript/ai/src/activities/chat/messages.ts b/packages/typescript/ai/src/activities/chat/messages.ts index b7f97b880..cecf16920 100644 --- a/packages/typescript/ai/src/activities/chat/messages.ts +++ b/packages/typescript/ai/src/activities/chat/messages.ts @@ -165,6 +165,7 @@ function isToolCallIncluded(part: ToolCallPart): boolean { function buildAssistantMessages(uiMessage: UIMessage): Array { const messageList: Array = [] let current = createSegment() + let pendingThinking: Array<{ content: string; signature?: string }> = [] // Track emitted tool result IDs to avoid duplicates. // A tool call can have BOTH an explicit tool-result part AND an output @@ -181,7 +182,9 @@ function buildAssistantMessages(uiMessage: UIMessage): Array { role: 'assistant', content, ...(hasToolCalls && { toolCalls: current.toolCalls }), + ...(pendingThinking.length > 0 && { thinking: pendingThinking }), }) + pendingThinking = [] } current = createSegment() } @@ -227,7 +230,15 @@ function buildAssistantMessages(uiMessage: UIMessage): Array { } break - // thinking parts are skipped - they're UI-only + case 'thinking': + if (part.content) { + pendingThinking.push({ + content: part.content, + ...(part.signature && { signature: part.signature }), + }) + } + break + default: break } diff --git a/packages/typescript/ai/src/activities/chat/stream/message-updaters.ts b/packages/typescript/ai/src/activities/chat/stream/message-updaters.ts index 80b94d59a..09a565f0d 100644 --- a/packages/typescript/ai/src/activities/chat/stream/message-updaters.ts +++ b/packages/typescript/ai/src/activities/chat/stream/message-updaters.ts @@ -244,12 +244,15 @@ export function updateToolCallApprovalResponse( } /** - * Update or add a thinking part to a message. + * Update or add a thinking part to a message, keyed by stepId. + * Each distinct stepId produces its own ThinkingPart. */ export function updateThinkingPart( messages: Array, messageId: string, + stepId: string, content: string, + signature?: string, ): Array { return messages.map((msg) => { if (msg.id !== messageId) { @@ -257,15 +260,19 @@ export function updateThinkingPart( } const parts = [...msg.parts] - const thinkingPartIndex = parts.findIndex((p) => p.type === 'thinking') + const thinkingPartIndex = parts.findIndex( + (p) => p.type === 'thinking' && p.stepId === stepId, + ) const thinkingPart: ThinkingPart = { type: 'thinking', content, + stepId, + ...(signature && { signature }), } if (thinkingPartIndex >= 0) { - // Update existing thinking part + // Update existing thinking part for this step parts[thinkingPartIndex] = thinkingPart } else { // Add new thinking part at the end (preserve natural streaming order) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index def1194c9..e385a51eb 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -90,7 +90,7 @@ export interface StreamProcessorEvents { state: ToolCallState, args: string, ) => void - onThinkingUpdate?: (messageId: string, content: string) => void + onThinkingUpdate?: (messageId: string, stepId: string, content: string) => void } /** @@ -139,6 +139,7 @@ export class StreamProcessor { private activeMessageIds: Set = new Set() private toolCallToMessage: Map = new Map() private pendingManualMessageId: string | null = null + private pendingThinkingStepId: string | null = null // Run tracking (for concurrent run safety) private activeRuns = new Set() @@ -541,8 +542,12 @@ export class StreamProcessor { ) break + case 'STEP_STARTED': + this.handleStepStartedEvent(chunk) + break + default: - // STEP_STARTED, STATE_SNAPSHOT, STATE_DELTA - no special handling needed + // STATE_SNAPSHOT, STATE_DELTA - no special handling needed break } } @@ -564,8 +569,11 @@ export class StreamProcessor { totalTextContent: '', currentSegmentText: '', lastEmittedText: '', - thinkingContent: '', hasSeenReasoningEvents: false, + thinkingSteps: new Map(), + thinkingStepSignatures: new Map(), + thinkingStepOrder: [], + currentThinkingStepId: null, toolCalls: new Map(), toolCallOrder: [], hasToolCallsSinceTextStart: false, @@ -1155,11 +1163,38 @@ export class StreamProcessor { this.events.onError?.(new Error(errorMessage)) } + /** + * Handle STEP_STARTED event (for thinking/reasoning content). + * + * Records the stepId so that subsequent STEP_FINISHED deltas accumulate + * into their own ThinkingPart. Does not create a message — the message + * is lazily created when the first STEP_FINISHED content arrives. + */ + private handleStepStartedEvent( + chunk: Extract, + ): void { + const activeId = this.getActiveAssistantMessageId() + if (activeId) { + const state = this.getMessageState(activeId) + if (state) { + state.currentThinkingStepId = chunk.stepId + if (!state.thinkingSteps.has(chunk.stepId)) { + state.thinkingSteps.set(chunk.stepId, '') + state.thinkingStepOrder.push(chunk.stepId) + } + return + } + } + + // No active message yet — defer until ensureAssistantMessage in STEP_FINISHED + this.pendingThinkingStepId = chunk.stepId + } + /** * Handle STEP_FINISHED event (for thinking/reasoning content). * - * Accumulates delta into thinkingContent and updates a single ThinkingPart - * in the UIMessage (replaced in-place, not appended). + * Accumulates delta into the current thinking step's content and updates + * the corresponding ThinkingPart in the UIMessage. * * @see docs/chat-architecture.md#thinkingreasoning-content — Thinking flow */ @@ -1178,7 +1213,26 @@ export class StreamProcessor { return } - const previous = state.thinkingContent + // Consume pending stepId from STEP_STARTED that arrived before the message existed + if (this.pendingThinkingStepId) { + state.currentThinkingStepId = this.pendingThinkingStepId + if (!state.thinkingSteps.has(this.pendingThinkingStepId)) { + state.thinkingSteps.set(this.pendingThinkingStepId, '') + state.thinkingStepOrder.push(this.pendingThinkingStepId) + } + this.pendingThinkingStepId = null + } + + const stepId = state.currentThinkingStepId ?? chunk.stepId + + // Auto-initialize if no prior STEP_STARTED (backward compat) + if (!state.thinkingSteps.has(stepId)) { + state.thinkingSteps.set(stepId, '') + state.thinkingStepOrder.push(stepId) + state.currentThinkingStepId = stepId + } + + const previous = state.thinkingSteps.get(stepId)! let nextThinking = previous // Prefer delta over content @@ -1194,24 +1248,31 @@ export class StreamProcessor { } } - state.thinkingContent = nextThinking + state.thinkingSteps.set(stepId, nextThinking) + + if (chunk.signature) { + state.thinkingStepSignatures.set(stepId, chunk.signature) + } // Update UIMessage this.messages = updateThinkingPart( this.messages, messageId, - state.thinkingContent, + stepId, + nextThinking, + state.thinkingStepSignatures.get(stepId), ) this.emitMessagesChange() // Emit granular event - this.events.onThinkingUpdate?.(messageId, state.thinkingContent) + this.events.onThinkingUpdate?.(messageId, stepId, nextThinking) } /** * Handle REASONING_MESSAGE_CONTENT event (AG-UI reasoning protocol). * - * Accumulates reasoning delta into thinkingContent and updates the ThinkingPart + * Accumulates reasoning delta into thinking content and updates the + * corresponding ThinkingPart in the UIMessage. * in the UIMessage. */ private handleReasoningMessageContentEvent( @@ -1223,16 +1284,36 @@ export class StreamProcessor { state.hasSeenReasoningEvents = true const delta = chunk.delta || '' - state.thinkingContent = state.thinkingContent + delta + + if (this.pendingThinkingStepId) { + state.currentThinkingStepId = this.pendingThinkingStepId + if (!state.thinkingSteps.has(this.pendingThinkingStepId)) { + state.thinkingSteps.set(this.pendingThinkingStepId, '') + state.thinkingStepOrder.push(this.pendingThinkingStepId) + } + this.pendingThinkingStepId = null + } + + const stepId = state.currentThinkingStepId ?? 'reasoning' + if (!state.thinkingSteps.has(stepId)) { + state.thinkingSteps.set(stepId, '') + state.thinkingStepOrder.push(stepId) + state.currentThinkingStepId = stepId + } + + const nextThinking = (state.thinkingSteps.get(stepId) ?? '') + delta + state.thinkingSteps.set(stepId, nextThinking) this.messages = updateThinkingPart( this.messages, messageId, - state.thinkingContent, + stepId, + nextThinking, + state.thinkingStepSignatures.get(stepId), ) this.emitMessagesChange() - this.events.onThinkingUpdate?.(messageId, state.thinkingContent) + this.events.onThinkingUpdate?.(messageId, stepId, nextThinking) } /** @@ -1518,7 +1599,9 @@ export class StreamProcessor { for (const state of this.messageStates.values()) { content += state.totalTextContent - thinking += state.thinkingContent + for (const stepId of state.thinkingStepOrder) { + thinking += state.thinkingSteps.get(stepId) ?? '' + } } return { @@ -1540,7 +1623,9 @@ export class StreamProcessor { for (const state of this.messageStates.values()) { content += state.totalTextContent - thinking += state.thinkingContent + for (const stepId of state.thinkingStepOrder) { + thinking += state.thinkingSteps.get(stepId) ?? '' + } for (const [id, tc] of state.toolCalls) { toolCalls.set(id, tc) } diff --git a/packages/typescript/ai/src/activities/chat/stream/types.ts b/packages/typescript/ai/src/activities/chat/stream/types.ts index b91bb457a..432d17eb9 100644 --- a/packages/typescript/ai/src/activities/chat/stream/types.ts +++ b/packages/typescript/ai/src/activities/chat/stream/types.ts @@ -56,8 +56,11 @@ export interface MessageStreamState { totalTextContent: string currentSegmentText: string lastEmittedText: string - thinkingContent: string hasSeenReasoningEvents: boolean + thinkingSteps: Map + thinkingStepSignatures: Map + thinkingStepOrder: Array + currentThinkingStepId: string | null toolCalls: Map toolCallOrder: Array hasToolCallsSinceTextStart: boolean diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index e11e7176f..29672a540 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -297,6 +297,7 @@ export interface ModelMessage< name?: string toolCalls?: Array toolCallId?: string + thinking?: Array<{ content: string; signature?: string }> } /** @@ -335,6 +336,8 @@ export interface ToolResultPart { export interface ThinkingPart { type: 'thinking' content: string + stepId?: string + signature?: string } export type MessagePart = @@ -987,6 +990,8 @@ export interface StepFinishedEvent extends AGUIStepFinishedEvent { delta?: string /** Full accumulated thinking content (TanStack AI internal) */ content?: string + /** Provider signature for the thinking block */ + signature?: string } /** diff --git a/packages/typescript/ai/tests/message-updaters.test.ts b/packages/typescript/ai/tests/message-updaters.test.ts index 5de1a031e..192ffdb86 100644 --- a/packages/typescript/ai/tests/message-updaters.test.ts +++ b/packages/typescript/ai/tests/message-updaters.test.ts @@ -777,24 +777,31 @@ describe('message-updaters', () => { describe('updateThinkingPart', () => { it('should add a new thinking part', () => { const messages = [createMessage('msg-1')] - const result = updateThinkingPart(messages, 'msg-1', 'Let me think...') + const result = updateThinkingPart( + messages, + 'msg-1', + 'step-1', + 'Let me think...', + ) expect(result[0]?.parts).toHaveLength(1) expect(result[0]?.parts[0]).toEqual({ type: 'thinking', content: 'Let me think...', + stepId: 'step-1', }) }) - it('should update existing thinking part', () => { + it('should update existing thinking part by stepId', () => { const messages = [ createMessage('msg-1', 'assistant', [ - { type: 'thinking', content: 'Let me think' }, + { type: 'thinking', content: 'Let me think', stepId: 'step-1' }, ]), ] const result = updateThinkingPart( messages, 'msg-1', + 'step-1', 'Let me think about this', ) @@ -802,26 +809,34 @@ describe('message-updaters', () => { expect(result[0]?.parts[0]).toEqual({ type: 'thinking', content: 'Let me think about this', + stepId: 'step-1', }) }) - it('should only update the first thinking part if multiple exist', () => { + it('should create separate parts for different stepIds', () => { const messages = [ createMessage('msg-1', 'assistant', [ - { type: 'thinking', content: 'First' }, + { type: 'thinking', content: 'First', stepId: 'step-1' }, { type: 'text', content: 'Some text' }, - { type: 'thinking', content: 'Second' }, ]), ] - const result = updateThinkingPart(messages, 'msg-1', 'Updated first') + const result = updateThinkingPart( + messages, + 'msg-1', + 'step-2', + 'Second', + ) + expect(result[0]?.parts).toHaveLength(3) expect(result[0]?.parts[0]).toEqual({ type: 'thinking', - content: 'Updated first', + content: 'First', + stepId: 'step-1', }) expect(result[0]?.parts[2]).toEqual({ type: 'thinking', content: 'Second', + stepId: 'step-2', }) }) @@ -830,7 +845,12 @@ describe('message-updaters', () => { createMessage('msg-1'), createMessage('msg-2', 'user', [{ type: 'text', content: 'Hi' }]), ] - const result = updateThinkingPart(messages, 'msg-1', 'Thinking...') + const result = updateThinkingPart( + messages, + 'msg-1', + 'step-1', + 'Thinking...', + ) expect(result[0]?.parts).toHaveLength(1) expect(result[1]?.parts).toHaveLength(1) diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index 02145e9e3..d95e69a44 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -74,8 +74,10 @@ const ev = { ) => chunk('RUN_FINISHED', { runId, threadId, finishReason }), runError: (message: string, runId = 'run-1') => chunk('RUN_ERROR', { message, runId, error: { message } }), - stepFinished: (delta: string, stepName = 'step-1') => - chunk('STEP_FINISHED', { stepName, stepId: stepName, delta }), + stepStarted: (stepId = 'step-1', stepType = 'thinking') => + chunk('STEP_STARTED', { stepId, stepType }), + stepFinished: (delta: string, stepId = 'step-1') => + chunk('STEP_FINISHED', { stepId, delta }), custom: (name: string, value?: unknown) => chunk('CUSTOM', { name, value }), } @@ -767,7 +769,7 @@ describe('StreamProcessor', () => { ).toBe(true) }) - it('should update a single ThinkingPart in-place', () => { + it('should update a single ThinkingPart in-place for same stepId', () => { const processor = new StreamProcessor() processor.prepareAssistantMessage() @@ -775,12 +777,67 @@ describe('StreamProcessor', () => { processor.processChunk(ev.stepFinished('B')) processor.processChunk(ev.stepFinished('C')) - // Only one thinking part, not three + // Only one thinking part, not three (same default stepId) const parts = processor.getMessages()[0]!.parts const thinkingParts = parts.filter((p) => p.type === 'thinking') expect(thinkingParts).toHaveLength(1) expect((thinkingParts[0] as any).content).toBe('ABC') }) + + it('should create separate ThinkingParts for different stepIds', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.stepStarted('step-1')) + processor.processChunk(ev.stepFinished('First thought', 'step-1')) + processor.processChunk(ev.stepFinished(' continued', 'step-1')) + + processor.processChunk(ev.stepStarted('step-2')) + processor.processChunk(ev.stepFinished('Second thought', 'step-2')) + + const parts = processor.getMessages()[0]!.parts + const thinkingParts = parts.filter((p) => p.type === 'thinking') + expect(thinkingParts).toHaveLength(2) + expect((thinkingParts[0] as any).content).toBe('First thought continued') + expect((thinkingParts[0] as any).stepId).toBe('step-1') + expect((thinkingParts[1] as any).content).toBe('Second thought') + expect((thinkingParts[1] as any).stepId).toBe('step-2') + }) + + it('should handle STEP_FINISHED without prior STEP_STARTED (backward compat)', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + // No STEP_STARTED, just STEP_FINISHED with a stepId + processor.processChunk(ev.stepFinished('thinking...', 'auto-step')) + + const parts = processor.getMessages()[0]!.parts + const thinkingParts = parts.filter((p) => p.type === 'thinking') + expect(thinkingParts).toHaveLength(1) + expect((thinkingParts[0] as any).content).toBe('thinking...') + expect((thinkingParts[0] as any).stepId).toBe('auto-step') + }) + + it('getResult().thinking should concatenate all steps in order', () => { + const processor = new StreamProcessor() + processor.prepareAssistantMessage() + + processor.processChunk(ev.runStarted()) + processor.processChunk(ev.stepStarted('step-1')) + processor.processChunk(ev.stepFinished('First. ', 'step-1')) + processor.processChunk(ev.stepStarted('step-2')) + processor.processChunk(ev.stepFinished('Second.', 'step-2')) + processor.processChunk(ev.textStart()) + processor.processChunk(ev.textContent('Answer')) + processor.processChunk(ev.textEnd()) + processor.processChunk(ev.runFinished('stop')) + + processor.finalizeStream() + + const state = processor.getState() + expect(state.thinking).toBe('First. Second.') + expect(state.content).toBe('Answer') + }) }) // ========================================================================== @@ -1684,7 +1741,7 @@ describe('StreamProcessor', () => { ) }) - it('onThinkingUpdate should fire for each STEP_FINISHED delta', () => { + it('onThinkingUpdate should fire for each STEP_FINISHED delta with stepId', () => { const events = spyEvents() const processor = new StreamProcessor({ events }) processor.prepareAssistantMessage() @@ -1694,9 +1751,14 @@ describe('StreamProcessor', () => { const msgId = processor.getCurrentAssistantMessageId()! expect(events.onThinkingUpdate).toHaveBeenCalledTimes(2) - expect(events.onThinkingUpdate).toHaveBeenCalledWith(msgId, 'Thinking') expect(events.onThinkingUpdate).toHaveBeenCalledWith( msgId, + 'step-1', + 'Thinking', + ) + expect(events.onThinkingUpdate).toHaveBeenCalledWith( + msgId, + 'step-1', 'Thinking more', ) }) From ce9cfe66b40aad1e119176a66125beee919215bf Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Fri, 20 Mar 2026 21:39:06 +0000 Subject: [PATCH 02/11] ci: apply automated fixes --- packages/typescript/ai-client/src/chat-client.ts | 6 +++++- packages/typescript/ai/src/activities/chat/index.ts | 3 ++- .../typescript/ai/src/activities/chat/stream/processor.ts | 6 +++++- packages/typescript/ai/tests/message-updaters.test.ts | 7 +------ 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index ac8a35b7b..1a03af515 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -154,7 +154,11 @@ export class ChatClient { this.events.textUpdated(this.currentStreamId, messageId, content) } }, - onThinkingUpdate: (messageId: string, _stepId: string, content: string) => { + onThinkingUpdate: ( + messageId: string, + _stepId: string, + content: string, + ) => { // Emit thinking update to devtools if (this.currentStreamId) { this.events.thinkingUpdated( diff --git a/packages/typescript/ai/src/activities/chat/index.ts b/packages/typescript/ai/src/activities/chat/index.ts index ac891c428..27564d49e 100644 --- a/packages/typescript/ai/src/activities/chat/index.ts +++ b/packages/typescript/ai/src/activities/chat/index.ts @@ -281,7 +281,8 @@ class TextEngine< private totalChunkCount = 0 private currentMessageId: string | null = null private accumulatedContent = '' - private accumulatedThinking: Array<{ content: string; signature?: string }> = [] + private accumulatedThinking: Array<{ content: string; signature?: string }> = + [] private currentThinkingContent = '' private currentThinkingSignature = '' private eventOptions?: Record diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index e385a51eb..576b92729 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -90,7 +90,11 @@ export interface StreamProcessorEvents { state: ToolCallState, args: string, ) => void - onThinkingUpdate?: (messageId: string, stepId: string, content: string) => void + onThinkingUpdate?: ( + messageId: string, + stepId: string, + content: string, + ) => void } /** diff --git a/packages/typescript/ai/tests/message-updaters.test.ts b/packages/typescript/ai/tests/message-updaters.test.ts index 192ffdb86..f062ee482 100644 --- a/packages/typescript/ai/tests/message-updaters.test.ts +++ b/packages/typescript/ai/tests/message-updaters.test.ts @@ -820,12 +820,7 @@ describe('message-updaters', () => { { type: 'text', content: 'Some text' }, ]), ] - const result = updateThinkingPart( - messages, - 'msg-1', - 'step-2', - 'Second', - ) + const result = updateThinkingPart(messages, 'msg-1', 'step-2', 'Second') expect(result[0]?.parts).toHaveLength(3) expect(result[0]?.parts[0]).toEqual({ From f91c5014f4c08b4df70614ed22a7d867f1af64bc Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Thu, 23 Apr 2026 13:26:48 +0200 Subject: [PATCH 03/11] fix(ai, ai-anthropic): scope interleaved-thinking betas to beta endpoint; clear stale pendingThinkingStepId - Move `betas: ['interleaved-thinking-2025-05-14']` out of the shared mapper and onto the `beta.messages.create` call site in chatStream. Prevents the non-beta structuredOutput endpoint from receiving an invalid `betas` field when a thinking budget is configured. - Clear `pendingThinkingStepId` when a later STEP_STARTED takes the active-message branch, and also in `resetStreamState`, so a stale pending id can't misattribute a later STEP_FINISHED's delta to an earlier step. - Add covering test for the pendingThinkingStepId leak (red-green verified). --- .../ai-anthropic/src/adapters/text.ts | 21 ++++++++--- .../src/activities/chat/stream/processor.ts | 7 ++++ .../ai/tests/stream-processor.test.ts | 36 +++++++++++++++++++ 3 files changed, 60 insertions(+), 4 deletions(-) diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index 5add53d8d..14aea0c96 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -28,6 +28,7 @@ import type { URLPDFSource, } from '@anthropic-ai/sdk/resources/messages' import type Anthropic_SDK from '@anthropic-ai/sdk' +import type { AnthropicBeta } from '@anthropic-ai/sdk/resources/beta/beta' import type { ContentPart, Modality, @@ -141,8 +142,23 @@ export class AnthropicTextAdapter< `activity=chat provider=anthropic model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, { provider: 'anthropic', model: this.model }, ) + + // Interleaved thinking is only supported on the beta messages endpoint, + // so the `betas` flag is attached here rather than in the shared mapper + // (structuredOutput uses the non-beta endpoint which rejects `betas`). + const modelOptions = options.modelOptions as + | InternalTextProviderOptions + | undefined + const useInterleavedThinking = + modelOptions?.thinking?.type === 'enabled' && + typeof modelOptions.thinking.budget_tokens === 'number' && + modelOptions.thinking.budget_tokens > 0 + const betas: Array | undefined = useInterleavedThinking + ? ['interleaved-thinking-2025-05-14'] + : undefined + const stream = await this.client.beta.messages.create( - { ...requestParams, stream: true }, + { ...requestParams, stream: true, ...(betas && { betas }) }, { signal: options.request?.signal, headers: options.request?.headers, @@ -331,9 +347,6 @@ export class AnthropicTextAdapter< : undefined, tools: tools, ...validProviderOptions, - ...(thinkingBudget && { - betas: ['interleaved-thinking-2025-05-14'] as any, - }), } validateTextProviderOptions(requestParams) return requestParams diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 576b92729..82aa372f7 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -1186,6 +1186,12 @@ export class StreamProcessor { state.thinkingSteps.set(chunk.stepId, '') state.thinkingStepOrder.push(chunk.stepId) } + // Clear any pending stepId from a prior STEP_STARTED that fired + // before the assistant message existed. Now that we're tracking + // the step directly on message state, the pending value is stale + // and must not leak into the next STEP_FINISHED (which would + // misattribute its delta to the stale step). + this.pendingThinkingStepId = null return } } @@ -1675,6 +1681,7 @@ export class StreamProcessor { this.activeRuns.clear() this.toolCallToMessage.clear() this.pendingManualMessageId = null + this.pendingThinkingStepId = null this.finishReason = null this.hasError = false this.isDone = false diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index d95e69a44..a2da0df32 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -838,6 +838,42 @@ describe('StreamProcessor', () => { expect(state.thinking).toBe('First. Second.') expect(state.content).toBe('Answer') }) + + it('should clear pendingThinkingStepId when a later STEP_STARTED arrives with an active message', () => { + const processor = new StreamProcessor() + + // 1. STEP_STARTED arrives before any assistant message exists → + // pendingThinkingStepId = 'step-a' + processor.processChunk(ev.stepStarted('step-a')) + + // 2. Assistant message gets created by TEXT_MESSAGE_START (note: + // prepareAssistantMessage() would reset stream state, which we + // don't want here — we want to expose the leak across the + // no-active → active transition). + processor.processChunk(ev.textStart()) + + // 3. STEP_STARTED arrives again — takes active-id branch. It MUST + // clear pendingThinkingStepId, otherwise the stale 'step-a' + // value will be consumed by the next STEP_FINISHED. + processor.processChunk(ev.stepStarted('step-b')) + + // 4. STEP_FINISHED for step-b. If pendingThinkingStepId still held + // 'step-a', handleStepFinishedEvent would promote it to + // state.currentThinkingStepId and attribute 'contentB' to step-a. + processor.processChunk(ev.stepFinished('contentB', 'step-b')) + + const parts = processor.getMessages()[0]!.parts + const thinkingParts = parts.filter((p) => p.type === 'thinking') + + // Only step-b should have produced a ThinkingPart with contentB. + // No phantom step-a part should exist. + expect( + thinkingParts.some((p) => (p as any).stepId === 'step-a'), + ).toBe(false) + expect(thinkingParts).toHaveLength(1) + expect((thinkingParts[0] as any).stepId).toBe('step-b') + expect((thinkingParts[0] as any).content).toBe('contentB') + }) }) // ========================================================================== From dcb8ae292f1c58ee8ba7204392dbcf65209dadc4 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Thu, 23 Apr 2026 13:50:35 +0200 Subject: [PATCH 04/11] test(e2e), changeset: multi-step thinking scenario and release note - Add `thinking-multi-step` mock scenario to the e2e harness emitting STEP_STARTED/STEP_FINISHED pairs for two distinct stepIds with provider signatures, followed by a text message and RUN_FINISHED. - Expose thinkingPartCount / thinkingStepIds on the mock chat page via data-* attributes for assertion. - Add tests/thinking.spec.ts asserting two ThinkingParts with distinct stepIds and matching signatures are produced (pre-PR behavior merged them into a single part). - Add .changeset/thinking-blocks-per-step.md bumping @tanstack/ai, @tanstack/ai-anthropic, and @tanstack/ai-client. --- .changeset/thinking-blocks-per-step.md | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .changeset/thinking-blocks-per-step.md diff --git a/.changeset/thinking-blocks-per-step.md b/.changeset/thinking-blocks-per-step.md new file mode 100644 index 000000000..14fdc0315 --- /dev/null +++ b/.changeset/thinking-blocks-per-step.md @@ -0,0 +1,28 @@ +--- +'@tanstack/ai': patch +'@tanstack/ai-anthropic': patch +'@tanstack/ai-client': patch +--- + +**Fix thinking blocks getting merged across steps and lost on turn 2+ of Anthropic tool loops.** + +Each thinking step emitted by the adapter now produces its own `ThinkingPart` on the `UIMessage` instead of being merged into a single part, and thinking content + Anthropic signatures are preserved in server-side message history so multi-turn tool flows with extended thinking work correctly. + +`@tanstack/ai`: + +- `ThinkingPart` gains optional `stepId` and `signature` fields. +- `ModelMessage` gains an optional `thinking?: Array<{ content; signature? }>` field so prior thinking can be replayed in subsequent turns. +- `StepFinishedEvent` gains an optional `signature` field for provider-supplied thinking signatures. +- `StreamProcessor` tracks thinking per-step via `stepId` and keeps step ordering. `getState().thinking` / `getResult().thinking` concatenate step contents in order. +- The `onThinkingUpdate` callback on `StreamProcessorEvents` now receives `(messageId, stepId, content)` — consumers implementing it directly must add the `stepId` parameter. +- `TextEngine` accumulates thinking + signatures per iteration and includes them in assistant messages with tool calls so the next turn can replay them. + +`@tanstack/ai-anthropic`: + +- Captures `signature_delta` stream events and emits the final `STEP_FINISHED` with the signature on `content_block_stop`. +- Includes thinking blocks with signatures in `formatMessages` for multi-turn history. +- Passes `betas: ['interleaved-thinking-2025-05-14']` to the `beta.messages.create` call site when a thinking budget is configured. The beta flag is scoped to the streaming path only, so `structuredOutput` (which uses the non-beta `messages.create` endpoint) is unaffected. + +`@tanstack/ai-client`: + +- `ChatClient`'s internal `onThinkingUpdate` wiring is updated for the new `stepId` parameter. From 883ceba5ae556320674ee37ab17197741a312a9d Mon Sep 17 00:00:00 2001 From: imsherr Date: Tue, 28 Apr 2026 14:37:40 -0700 Subject: [PATCH 05/11] fix(ai): consume pending stepId in REASONING_MESSAGE_CONTENT When STEP_STARTED arrives before the assistant message exists, its stepId is stashed in pendingThinkingStepId. handleStepFinishedEvent already consumes it, but handleReasoningMessageContentEvent did not, so reasoning deltas were keyed by the reasoning messageId and the matching signature from STEP_FINISHED landed on a different ThinkingPart. With Anthropic's interleaved thinking around tool calls this produced two ThinkingParts per block (one unsigned content, one signed empty). Consume the pending stepId here too so both event paths attribute to the same step. --- .../src/activities/chat/stream/processor.ts | 7 ++-- .../ai/tests/stream-processor.test.ts | 37 +++++++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 82aa372f7..0f88c9493 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -547,7 +547,9 @@ export class StreamProcessor { break case 'STEP_STARTED': - this.handleStepStartedEvent(chunk) + this.handleStepStartedEvent( + chunk as Extract, + ) break default: @@ -1283,7 +1285,6 @@ export class StreamProcessor { * * Accumulates reasoning delta into thinking content and updates the * corresponding ThinkingPart in the UIMessage. - * in the UIMessage. */ private handleReasoningMessageContentEvent( chunk: Extract, @@ -1304,7 +1305,7 @@ export class StreamProcessor { this.pendingThinkingStepId = null } - const stepId = state.currentThinkingStepId ?? 'reasoning' + const stepId = state.currentThinkingStepId ?? chunk.messageId if (!state.thinkingSteps.has(stepId)) { state.thinkingSteps.set(stepId, '') state.thinkingStepOrder.push(stepId) diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index a2da0df32..5678e6cd2 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -3121,6 +3121,43 @@ describe('StreamProcessor', () => { expect(textPart!.content).toBe('Answer') }) + it('should attribute reasoning content to pending STEP_STARTED stepId', () => { + const processor = new StreamProcessor() + + processor.processChunk(ev.runStarted()) + processor.processChunk(chunk('REASONING_START', { messageId: 'r-1' })) + processor.processChunk( + chunk('REASONING_MESSAGE_START', { + messageId: 'r-1', + role: 'reasoning', + }), + ) + processor.processChunk(ev.stepStarted('step-1')) + processor.processChunk( + chunk('REASONING_MESSAGE_CONTENT', { + messageId: 'r-1', + delta: 'Thinking...', + }), + ) + processor.processChunk( + chunk('STEP_FINISHED', { + stepName: 'step-1', + stepId: 'step-1', + content: 'Thinking...', + signature: 'sig-step-1', + }), + ) + + const thinkingParts = processor + .getMessages()[0]! + .parts.filter((p) => p.type === 'thinking') + + expect(thinkingParts).toHaveLength(1) + expect((thinkingParts[0] as any).stepId).toBe('step-1') + expect((thinkingParts[0] as any).content).toBe('Thinking...') + expect((thinkingParts[0] as any).signature).toBe('sig-step-1') + }) + it('should handle REASONING events without errors when no matching message', () => { const events = spyEvents() const processor = new StreamProcessor({ events }) From ce858b4f796ae192697dc99a8a1e745d85c3c723 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 28 Apr 2026 21:38:50 +0000 Subject: [PATCH 06/11] ci: apply automated fixes --- packages/typescript/ai/tests/stream-processor.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index 5678e6cd2..d9d0836ee 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -867,9 +867,9 @@ describe('StreamProcessor', () => { // Only step-b should have produced a ThinkingPart with contentB. // No phantom step-a part should exist. - expect( - thinkingParts.some((p) => (p as any).stepId === 'step-a'), - ).toBe(false) + expect(thinkingParts.some((p) => (p as any).stepId === 'step-a')).toBe( + false, + ) expect(thinkingParts).toHaveLength(1) expect((thinkingParts[0] as any).stepId).toBe('step-b') expect((thinkingParts[0] as any).content).toBe('contentB') From 13c1ef9a8a90e1800d29d9b0a28f930e900dd517 Mon Sep 17 00:00:00 2001 From: imsherr Date: Tue, 28 Apr 2026 14:41:41 -0700 Subject: [PATCH 07/11] refactor(ai): extract consumePendingThinkingStep helper Both handleStepFinishedEvent and handleReasoningMessageContentEvent need to promote a pending stepId from a STEP_STARTED that arrived before the assistant message existed. Pull the shared logic into a small private helper instead of duplicating it. --- .../src/activities/chat/stream/processor.ts | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 0f88c9493..ee843731d 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -596,6 +596,23 @@ export class StreamProcessor { return this.messageStates.get(messageId) } + /** + * Promote a pending stepId from a STEP_STARTED that fired before the + * assistant message existed onto the given message state, so the next + * thinking event (STEP_FINISHED or REASONING_MESSAGE_CONTENT) attributes + * to the correct step. + */ + private consumePendingThinkingStep(state: MessageStreamState): void { + if (!this.pendingThinkingStepId) return + const stepId = this.pendingThinkingStepId + state.currentThinkingStepId = stepId + if (!state.thinkingSteps.has(stepId)) { + state.thinkingSteps.set(stepId, '') + state.thinkingStepOrder.push(stepId) + } + this.pendingThinkingStepId = null + } + /** * Get the most recent active assistant message ID. * Used as fallback for events that don't include a messageId. @@ -1225,15 +1242,7 @@ export class StreamProcessor { return } - // Consume pending stepId from STEP_STARTED that arrived before the message existed - if (this.pendingThinkingStepId) { - state.currentThinkingStepId = this.pendingThinkingStepId - if (!state.thinkingSteps.has(this.pendingThinkingStepId)) { - state.thinkingSteps.set(this.pendingThinkingStepId, '') - state.thinkingStepOrder.push(this.pendingThinkingStepId) - } - this.pendingThinkingStepId = null - } + this.consumePendingThinkingStep(state) const stepId = state.currentThinkingStepId ?? chunk.stepId @@ -1296,14 +1305,7 @@ export class StreamProcessor { state.hasSeenReasoningEvents = true const delta = chunk.delta || '' - if (this.pendingThinkingStepId) { - state.currentThinkingStepId = this.pendingThinkingStepId - if (!state.thinkingSteps.has(this.pendingThinkingStepId)) { - state.thinkingSteps.set(this.pendingThinkingStepId, '') - state.thinkingStepOrder.push(this.pendingThinkingStepId) - } - this.pendingThinkingStepId = null - } + this.consumePendingThinkingStep(state) const stepId = state.currentThinkingStepId ?? chunk.messageId if (!state.thinkingSteps.has(stepId)) { From e26aef1ccc95abf1b4b43b05725f8eb1636c669d Mon Sep 17 00:00:00 2001 From: imsherr Date: Tue, 28 Apr 2026 14:48:20 -0700 Subject: [PATCH 08/11] fix(ai-anthropic): wrap signed STEP_FINISHED yield in asChunk The bare yield bypassed the file's existing StreamChunk cast helper, so after the merge from main StepFinishedEvent now extends the stricter AG-UI base (requires stepName, no signature) and CI failed to build. Use asChunk like every other yield in this generator and add stepName. --- packages/typescript/ai-anthropic/src/adapters/text.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index 14aea0c96..0baa9e976 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -784,6 +784,7 @@ export class AnthropicTextAdapter< if (accumulatedSignature && stepId) { yield asChunk({ type: 'STEP_FINISHED', + stepName: stepId, stepId, model, timestamp, From c53d501325dec42223b29cc771a78893ad9d4bd2 Mon Sep 17 00:00:00 2001 From: imsherr Date: Tue, 5 May 2026 21:47:26 -0400 Subject: [PATCH 09/11] test(ai): cover multi-turn anthropic reasoning --- .../ai-anthropic/src/adapters/text.ts | 52 ++++++-- .../tests/anthropic-adapter.test.ts | 124 ++++++++++++++++++ .../ai/src/activities/chat/messages.ts | 11 ++ .../src/activities/chat/stream/processor.ts | 30 ++++- .../src/activities/chat/tools/tool-calls.ts | 2 +- packages/typescript/ai/tests/chat.test.ts | 87 ++++++++++++ .../ai/tests/message-converters.test.ts | 34 ++++- .../ai/tests/stream-processor.test.ts | 14 +- .../fixtures/multi-turn-reasoning/basic.json | 54 ++++++++ testing/e2e/src/lib/feature-support.ts | 1 + testing/e2e/src/lib/features.ts | 9 ++ testing/e2e/src/lib/types.ts | 2 + .../e2e/tests/multi-turn-reasoning.spec.ts | 45 +++++++ 13 files changed, 443 insertions(+), 22 deletions(-) create mode 100644 testing/e2e/fixtures/multi-turn-reasoning/basic.json create mode 100644 testing/e2e/tests/multi-turn-reasoning.spec.ts diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index 0baa9e976..f057a18a4 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -447,17 +447,7 @@ export class AnthropicTextAdapter< if (role === 'assistant' && message.toolCalls?.length) { const contentBlocks: AnthropicContentBlocks = [] - if (message.thinking?.length) { - for (const thinking of message.thinking) { - if (thinking.signature) { - contentBlocks.push({ - type: 'thinking', - thinking: thinking.content, - signature: thinking.signature, - } as unknown as AnthropicContentBlock) - } - } - } + this.appendThinkingBlocks(contentBlocks, message.thinking) if (message.content) { const content = @@ -497,6 +487,28 @@ export class AnthropicTextAdapter< continue } + if (role === 'assistant') { + const contentBlocks: AnthropicContentBlocks = [] + this.appendThinkingBlocks(contentBlocks, message.thinking) + + if (Array.isArray(message.content)) { + for (const part of message.content) { + contentBlocks.push(this.convertContentPartToAnthropic(part)) + } + } else if (message.content) { + contentBlocks.push({ + type: 'text', + text: message.content, + }) + } + + formattedMessages.push({ + role: 'assistant', + content: contentBlocks.length > 0 ? contentBlocks : '', + }) + continue + } + if (role === 'user' && Array.isArray(message.content)) { const contentBlocks = message.content.map((part) => this.convertContentPartToAnthropic(part), @@ -509,7 +521,7 @@ export class AnthropicTextAdapter< } formattedMessages.push({ - role: role === 'assistant' ? 'assistant' : 'user', + role: 'user', content: typeof message.content === 'string' ? message.content @@ -527,6 +539,22 @@ export class AnthropicTextAdapter< return this.mergeConsecutiveSameRoleMessages(formattedMessages) } + private appendThinkingBlocks( + contentBlocks: AnthropicContentBlocks, + thinkingParts: ModelMessage['thinking'], + ): void { + if (!thinkingParts?.length) return + + for (const thinking of thinkingParts) { + if (!thinking.signature) continue + contentBlocks.push({ + type: 'thinking', + thinking: thinking.content, + signature: thinking.signature, + } as unknown as AnthropicContentBlock) + } + } + /** * Merge consecutive messages of the same role into a single message. * Anthropic's API requires strictly alternating user/assistant roles. diff --git a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts index 5368381c3..b0f330bee 100644 --- a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts +++ b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts @@ -49,6 +49,28 @@ const weatherTool: Tool = { }), } +function createTextStream(text: string) { + return (async function* () { + yield { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + } + yield { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text }, + } + yield { type: 'content_block_stop', index: 0 } + yield { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { output_tokens: 5 }, + } + yield { type: 'message_stop' } + })() +} + describe('Anthropic adapter option mapping', () => { beforeEach(() => { vi.clearAllMocks() @@ -377,6 +399,108 @@ describe('Anthropic adapter option mapping', () => { } }) + it('replays signed thinking blocks before tool use in multi-turn history', async () => { + mocks.betaMessagesCreate.mockResolvedValueOnce( + createTextStream('Follow-up answer'), + ) + + const adapter = createAdapter('claude-3-7-sonnet-20250219') + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [ + { role: 'user', content: 'What is the weather in Berlin?' }, + { + role: 'assistant', + content: null, + thinking: [ + { + content: 'Need to fetch weather before answering.', + signature: 'signed-thinking-1', + }, + ], + toolCalls: [ + { + id: 'call_1', + type: 'function', + function: { name: 'lookup_weather', arguments: toolArguments }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_1', content: '{"temp":72}' }, + { role: 'user', content: 'What should I wear?' }, + ], + tools: [weatherTool], + modelOptions: { + thinking: { type: 'enabled', budget_tokens: 1024 }, + } as AnthropicTextProviderOptions, + })) { + chunks.push(chunk) + } + + expect(mocks.betaMessagesCreate).toHaveBeenCalledTimes(1) + const [payload] = mocks.betaMessagesCreate.mock.calls[0] + + expect(payload.betas).toEqual(['interleaved-thinking-2025-05-14']) + expect(payload.messages[1].content).toEqual([ + { + type: 'thinking', + thinking: 'Need to fetch weather before answering.', + signature: 'signed-thinking-1', + }, + { + type: 'tool_use', + id: 'call_1', + name: 'lookup_weather', + input: { location: 'Berlin' }, + }, + ]) + }) + + it('replays signed thinking blocks for assistant messages without tool calls', async () => { + mocks.betaMessagesCreate.mockResolvedValueOnce( + createTextStream('Next answer'), + ) + + const adapter = createAdapter('claude-3-7-sonnet-20250219') + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [ + { role: 'user', content: 'Think then answer.' }, + { + role: 'assistant', + content: 'Prior answer.', + thinking: [ + { + content: 'Prior signed thinking.', + signature: 'signed-thinking-text-only', + }, + ], + }, + { role: 'user', content: 'Continue.' }, + ], + modelOptions: { + thinking: { type: 'enabled', budget_tokens: 1024 }, + } as AnthropicTextProviderOptions, + })) { + chunks.push(chunk) + } + + const [payload] = mocks.betaMessagesCreate.mock.calls[0] + + expect(payload.messages[1].content).toEqual([ + { + type: 'thinking', + thinking: 'Prior signed thinking.', + signature: 'signed-thinking-text-only', + }, + { type: 'text', text: 'Prior answer.' }, + ]) + }) + it('merges multiple consecutive tool result messages into one user message', async () => { // When multiple tools are called, each tool result becomes a role:'user' message. // These must be merged into a single user message. diff --git a/packages/typescript/ai/src/activities/chat/messages.ts b/packages/typescript/ai/src/activities/chat/messages.ts index cecf16920..893b51943 100644 --- a/packages/typescript/ai/src/activities/chat/messages.ts +++ b/packages/typescript/ai/src/activities/chat/messages.ts @@ -317,6 +317,17 @@ export function modelMessageToUIMessage( ): UIMessage { const parts: Array = [] + if (modelMessage.role === 'assistant' && modelMessage.thinking?.length) { + for (const thinking of modelMessage.thinking) { + if (!thinking.content) continue + parts.push({ + type: 'thinking', + content: thinking.content, + ...(thinking.signature && { signature: thinking.signature }), + }) + } + } + // Handle tool results (when role is "tool") - only produce tool-result part, // not a text part (the content IS the tool result, not display text) if (modelMessage.role === 'tool' && modelMessage.toolCallId) { diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index ee843731d..34daa549d 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -1196,14 +1196,15 @@ export class StreamProcessor { private handleStepStartedEvent( chunk: Extract, ): void { + const stepId = chunk.stepId ?? generateMessageId() const activeId = this.getActiveAssistantMessageId() if (activeId) { const state = this.getMessageState(activeId) if (state) { - state.currentThinkingStepId = chunk.stepId - if (!state.thinkingSteps.has(chunk.stepId)) { - state.thinkingSteps.set(chunk.stepId, '') - state.thinkingStepOrder.push(chunk.stepId) + state.currentThinkingStepId = stepId + if (!state.thinkingSteps.has(stepId)) { + state.thinkingSteps.set(stepId, '') + state.thinkingStepOrder.push(stepId) } // Clear any pending stepId from a prior STEP_STARTED that fired // before the assistant message existed. Now that we're tracking @@ -1216,7 +1217,7 @@ export class StreamProcessor { } // No active message yet — defer until ensureAssistantMessage in STEP_FINISHED - this.pendingThinkingStepId = chunk.stepId + this.pendingThinkingStepId = stepId } /** @@ -1239,12 +1240,29 @@ export class StreamProcessor { // REASONING_MESSAGE_CONTENT events for this message, skip the duplicate // thinking content from STEP_FINISHED to avoid doubled content. if (state.hasSeenReasoningEvents) { + if (chunk.signature) { + const stepId = state.currentThinkingStepId ?? chunk.stepId + if (!stepId) return + const thinking = state.thinkingSteps.get(stepId) + if (thinking !== undefined) { + state.thinkingStepSignatures.set(stepId, chunk.signature) + this.messages = updateThinkingPart( + this.messages, + messageId, + stepId, + thinking, + chunk.signature, + ) + this.emitMessagesChange() + } + } return } this.consumePendingThinkingStep(state) - const stepId = state.currentThinkingStepId ?? chunk.stepId + const stepId = + state.currentThinkingStepId ?? chunk.stepId ?? generateMessageId() // Auto-initialize if no prior STEP_STARTED (backward compat) if (!state.thinkingSteps.has(stepId)) { diff --git a/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts b/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts index 5ed3d9cdf..398ce0170 100644 --- a/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts +++ b/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts @@ -93,7 +93,7 @@ export class ToolCallManager { */ addToolCallStartEvent(event: ToolCallStartEvent): void { const index = event.index ?? this.toolCallsMap.size - const name = event.toolCallName + const name = event.toolCallName ?? event.toolName this.toolCallsMap.set(index, { id: event.toolCallId, type: 'function', diff --git a/packages/typescript/ai/tests/chat.test.ts b/packages/typescript/ai/tests/chat.test.ts index c360a2b55..5890c4d89 100644 --- a/packages/typescript/ai/tests/chat.test.ts +++ b/packages/typescript/ai/tests/chat.test.ts @@ -2,6 +2,7 @@ import { describe, expect, it, vi } from 'vitest' import { chat, createChatOptions } from '../src/activities/chat/index' import type { StreamChunk, Tool } from '../src/types' import { + chunk, ev, createMockAdapter, collectChunks, @@ -1234,6 +1235,92 @@ describe('chat()', () => { expect(tool2Spy).toHaveBeenCalledTimes(1) expect(calls).toHaveLength(3) }) + + it('should preserve signed thinking in continuation message history after a tool call', async () => { + const toolSpy = vi.fn().mockReturnValue({ result: 'inventory' }) + + const { adapter, calls } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + ev.stepStarted('think-1'), + { + ...ev.stepFinished('Need inventory.', 'think-1'), + signature: 'sig-think-1', + } as StreamChunk, + ev.toolStart('call_1', 'getInventory'), + ev.toolArgs('call_1', '{}'), + ev.runFinished('tool_calls'), + ], + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Inventory loaded.'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Check inventory' }], + tools: [serverTool('getInventory', toolSpy)], + }) + + await collectChunks(stream as AsyncIterable) + + expect(toolSpy).toHaveBeenCalledTimes(1) + expect(calls).toHaveLength(2) + + const continuationMessages = calls[1]!.messages as Array + const assistantToolMessage = continuationMessages.find( + (message) => + message.role === 'assistant' && + message.toolCalls?.[0]?.id === 'call_1', + ) + + expect(assistantToolMessage?.thinking).toEqual([ + { content: 'Need inventory.', signature: 'sig-think-1' }, + ]) + }) + + it('should execute tool calls that only provide the deprecated toolName field', async () => { + const toolSpy = vi.fn().mockReturnValue({ result: 'inventory' }) + + const { adapter, calls } = createMockAdapter({ + iterations: [ + [ + ev.runStarted(), + chunk('TOOL_CALL_START', { + toolCallId: 'call_1', + toolName: 'getInventory', + }), + ev.toolArgs('call_1', '{}'), + ev.toolEnd('call_1', 'getInventory'), + ev.runFinished('tool_calls'), + ], + [ + ev.runStarted(), + ev.textStart(), + ev.textContent('Inventory loaded.'), + ev.textEnd(), + ev.runFinished('stop'), + ], + ], + }) + + const stream = chat({ + adapter, + messages: [{ role: 'user', content: 'Check inventory' }], + tools: [serverTool('getInventory', toolSpy)], + }) + + await collectChunks(stream as AsyncIterable) + + expect(toolSpy).toHaveBeenCalledTimes(1) + expect(calls).toHaveLength(2) + }) }) // ========================================================================== diff --git a/packages/typescript/ai/tests/message-converters.test.ts b/packages/typescript/ai/tests/message-converters.test.ts index 76f55270a..0a49d9bec 100644 --- a/packages/typescript/ai/tests/message-converters.test.ts +++ b/packages/typescript/ai/tests/message-converters.test.ts @@ -184,7 +184,7 @@ describe('Message Converters', () => { expect(contentParts[3]?.type).toBe('text') }) - it('should skip thinking parts in conversion', () => { + it('should preserve thinking parts in conversion', () => { const uiMessage: UIMessage = { id: 'msg-1', role: 'assistant', @@ -198,6 +198,7 @@ describe('Message Converters', () => { expect(result.length).toBe(1) expect(result[0]?.content).toBe('Here is my answer') + expect(result[0]?.thinking).toEqual([{ content: 'Let me think...' }]) }) it('should skip system messages', () => { @@ -584,6 +585,37 @@ describe('Message Converters', () => { ]) }) + it('should convert assistant thinking into UIMessage parts', () => { + const modelMessage: ModelMessage = { + role: 'assistant', + content: 'Here is my answer.', + thinking: [ + { + content: 'Signed thought.', + signature: 'sig-1', + }, + { + content: 'Unsigned thought.', + }, + ], + } + + const result = modelMessageToUIMessage(modelMessage) + + expect(result.parts).toEqual([ + { + type: 'thinking', + content: 'Signed thought.', + signature: 'sig-1', + }, + { + type: 'thinking', + content: 'Unsigned thought.', + }, + { type: 'text', content: 'Here is my answer.' }, + ]) + }) + it('should convert assistant message with toolCalls and null content', () => { const modelMessage: ModelMessage = { role: 'assistant', diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index d9d0836ee..85136ef70 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -789,19 +789,27 @@ describe('StreamProcessor', () => { processor.prepareAssistantMessage() processor.processChunk(ev.stepStarted('step-1')) - processor.processChunk(ev.stepFinished('First thought', 'step-1')) + processor.processChunk({ + ...ev.stepFinished('First thought', 'step-1'), + signature: 'sig-step-1', + } as StreamChunk) processor.processChunk(ev.stepFinished(' continued', 'step-1')) processor.processChunk(ev.stepStarted('step-2')) - processor.processChunk(ev.stepFinished('Second thought', 'step-2')) + processor.processChunk({ + ...ev.stepFinished('Second thought', 'step-2'), + signature: 'sig-step-2', + } as StreamChunk) const parts = processor.getMessages()[0]!.parts const thinkingParts = parts.filter((p) => p.type === 'thinking') expect(thinkingParts).toHaveLength(2) expect((thinkingParts[0] as any).content).toBe('First thought continued') expect((thinkingParts[0] as any).stepId).toBe('step-1') + expect((thinkingParts[0] as any).signature).toBe('sig-step-1') expect((thinkingParts[1] as any).content).toBe('Second thought') expect((thinkingParts[1] as any).stepId).toBe('step-2') + expect((thinkingParts[1] as any).signature).toBe('sig-step-2') }) it('should handle STEP_FINISHED without prior STEP_STARTED (backward compat)', () => { @@ -3070,11 +3078,13 @@ describe('StreamProcessor', () => { expect(events.onThinkingUpdate).toHaveBeenNthCalledWith( 1, expect.any(String), + 'r-1', 'Let me think', ) expect(events.onThinkingUpdate).toHaveBeenNthCalledWith( 2, expect.any(String), + 'r-1', 'Let me think about this...', ) }) diff --git a/testing/e2e/fixtures/multi-turn-reasoning/basic.json b/testing/e2e/fixtures/multi-turn-reasoning/basic.json new file mode 100644 index 000000000..d89baf067 --- /dev/null +++ b/testing/e2e/fixtures/multi-turn-reasoning/basic.json @@ -0,0 +1,54 @@ +{ + "fixtures": [ + { + "match": { + "userMessage": "[multi-turn-reasoning] recommend a beginner guitar", + "sequenceIndex": 0 + }, + "response": { + "reasoning": "The user wants a beginner guitar recommendation. I should inspect inventory with getGuitars before recommending anything.", + "toolCalls": [ + { + "name": "getGuitars", + "arguments": "{}" + } + ] + } + }, + { + "match": { + "userMessage": "[multi-turn-reasoning] recommend a beginner guitar", + "sequenceIndex": 1 + }, + "response": { + "reasoning": "The inventory shows several premium guitars. The Fender Stratocaster is the lowest-priced and most versatile choice for a beginner.", + "content": "I recommend the Fender Stratocaster for a beginner. It is the most affordable guitar in stock at $1,299 and is versatile enough for many styles." + } + }, + { + "match": { + "userMessage": "[multi-turn-reasoning-followup] compare cheapest and premium", + "sequenceIndex": 0 + }, + "response": { + "reasoning": "The user is asking a follow-up, and I should use the inventory tool again to compare the cheapest and premium options from current data.", + "toolCalls": [ + { + "name": "getGuitars", + "arguments": "{}" + } + ] + } + }, + { + "match": { + "userMessage": "[multi-turn-reasoning-followup] compare cheapest and premium", + "sequenceIndex": 1 + }, + "response": { + "reasoning": "The Fender Stratocaster is cheapest at $1,299, while the Taylor 814ce is the most premium by price at $3,299.", + "content": "The cheapest option is the Fender Stratocaster at $1,299. The premium option is the Taylor 814ce at $3,299, which is an acoustic-electric with exceptional clarity." + } + } + ] +} diff --git a/testing/e2e/src/lib/feature-support.ts b/testing/e2e/src/lib/feature-support.ts index 96754a286..25a8b4d43 100644 --- a/testing/e2e/src/lib/feature-support.ts +++ b/testing/e2e/src/lib/feature-support.ts @@ -27,6 +27,7 @@ export const matrix: Record> = { 'openrouter', ]), reasoning: new Set(['openai', 'anthropic', 'gemini']), + 'multi-turn-reasoning': new Set(['anthropic']), 'multi-turn': new Set([ 'openai', 'anthropic', diff --git a/testing/e2e/src/lib/features.ts b/testing/e2e/src/lib/features.ts index 340b0a81f..a987930e9 100644 --- a/testing/e2e/src/lib/features.ts +++ b/testing/e2e/src/lib/features.ts @@ -25,6 +25,15 @@ export const featureConfigs: Record = { anthropic: 'claude-sonnet-4-5', }, }, + 'multi-turn-reasoning': { + tools: [getGuitars], + modelOptions: { + thinking: { type: 'enabled', budget_tokens: 1024 }, + }, + modelOverrides: { + anthropic: 'claude-sonnet-4-5', + }, + }, 'multi-turn': { tools: [], modelOptions: {}, diff --git a/testing/e2e/src/lib/types.ts b/testing/e2e/src/lib/types.ts index c62dd1018..eafe588fc 100644 --- a/testing/e2e/src/lib/types.ts +++ b/testing/e2e/src/lib/types.ts @@ -14,6 +14,7 @@ export type Feature = | 'chat' | 'one-shot-text' | 'reasoning' + | 'multi-turn-reasoning' | 'multi-turn' | 'tool-calling' | 'parallel-tool-calls' @@ -47,6 +48,7 @@ export const ALL_FEATURES: Feature[] = [ 'chat', 'one-shot-text', 'reasoning', + 'multi-turn-reasoning', 'multi-turn', 'tool-calling', 'parallel-tool-calls', diff --git a/testing/e2e/tests/multi-turn-reasoning.spec.ts b/testing/e2e/tests/multi-turn-reasoning.spec.ts new file mode 100644 index 000000000..82830e46e --- /dev/null +++ b/testing/e2e/tests/multi-turn-reasoning.spec.ts @@ -0,0 +1,45 @@ +import { test, expect } from './fixtures' +import { + featureUrl, + sendMessage, + waitForAssistantText, + waitForResponse, +} from './helpers' +import { providersFor } from './test-matrix' + +for (const provider of providersFor('multi-turn-reasoning')) { + test.describe(`${provider} — multi-turn-reasoning`, () => { + test('shows reasoning before each model turn in a multi-turn tool conversation', async ({ + page, + testId, + aimockPort, + }) => { + await page.goto( + featureUrl(provider, 'multi-turn-reasoning', testId, aimockPort), + ) + + await sendMessage( + page, + '[multi-turn-reasoning] recommend a beginner guitar', + ) + await waitForResponse(page) + await waitForAssistantText(page, 'Fender Stratocaster') + + const thinkingBlocks = page.getByTestId('thinking-block') + await expect + .poll(async () => (await thinkingBlocks.allInnerTexts()).join(' ')) + .toContain('inventory') + + await sendMessage( + page, + '[multi-turn-reasoning-followup] compare cheapest and premium', + ) + await waitForResponse(page) + await waitForAssistantText(page, 'Taylor 814ce') + + await expect + .poll(async () => (await thinkingBlocks.allInnerTexts()).join(' ')) + .toContain('Taylor 814ce') + }) + }) +} From f1df71eed23dbe4c0f6bc0b410cc314d266a0870 Mon Sep 17 00:00:00 2001 From: imsherr Date: Tue, 5 May 2026 21:55:27 -0400 Subject: [PATCH 10/11] chore: adjust thinking callback changeset bump --- .changeset/thinking-blocks-per-step.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.changeset/thinking-blocks-per-step.md b/.changeset/thinking-blocks-per-step.md index 14fdc0315..2492acc39 100644 --- a/.changeset/thinking-blocks-per-step.md +++ b/.changeset/thinking-blocks-per-step.md @@ -1,13 +1,15 @@ --- -'@tanstack/ai': patch +'@tanstack/ai': minor '@tanstack/ai-anthropic': patch -'@tanstack/ai-client': patch +'@tanstack/ai-client': minor --- **Fix thinking blocks getting merged across steps and lost on turn 2+ of Anthropic tool loops.** Each thinking step emitted by the adapter now produces its own `ThinkingPart` on the `UIMessage` instead of being merged into a single part, and thinking content + Anthropic signatures are preserved in server-side message history so multi-turn tool flows with extended thinking work correctly. +This includes a public callback signature change: `StreamProcessorEvents.onThinkingUpdate` now receives `(messageId, stepId, content)` instead of `(messageId, content)`. `ChatClient` has been updated to handle the new `stepId` argument internally, but consumers implementing `StreamProcessorEvents` directly need to add the new parameter. + `@tanstack/ai`: - `ThinkingPart` gains optional `stepId` and `signature` fields. From 11249c3554818e8095c0d066eef93c89a981101b Mon Sep 17 00:00:00 2001 From: imsherr Date: Tue, 5 May 2026 22:04:01 -0400 Subject: [PATCH 11/11] fix(ai): satisfy lint for tool name fallback --- .../typescript/ai/src/activities/chat/tools/tool-calls.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts b/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts index 398ce0170..a74273018 100644 --- a/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts +++ b/packages/typescript/ai/src/activities/chat/tools/tool-calls.ts @@ -93,7 +93,9 @@ export class ToolCallManager { */ addToolCallStartEvent(event: ToolCallStartEvent): void { const index = event.index ?? this.toolCallsMap.size - const name = event.toolCallName ?? event.toolName + const runtimeEvent = event as Partial & + Pick + const name = runtimeEvent.toolCallName ?? runtimeEvent.toolName this.toolCallsMap.set(index, { id: event.toolCallId, type: 'function',