Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions gui/src/redux/thunks/streamNormalInput.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,10 @@ function buildReasoningCompletionOptions(
hasReasoningEnabled: boolean | undefined,
model: ModelDescription,
): LLMFullCompletionOptions {
if (model.completionOptions?.reasoning === false) {
return baseOptions;
}

if (hasReasoningEnabled === undefined) {
return baseOptions;
}
Expand Down
88 changes: 88 additions & 0 deletions gui/src/redux/thunks/streamResponse.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -476,6 +476,94 @@ describe("streamResponseThunk", () => {
});
});

it("should preserve model-level reasoning disablement in chat requests", async () => {
const initialState = getRootStateWithClaude();
initialState.config.config.selectedModelByRole.chat = {
title: "Qwen 3 30B",
model: "qwen3:30b",
provider: "ollama",
underlyingProviderName: "ollama",
completionOptions: { reasoning: false },
};
initialState.session.hasReasoningEnabled = true;
initialState.session.history = [
{
message: { id: "1", role: "user", content: "Hello" },
contextItems: [],
},
];

const mockStore = createMockStore(initialState);
const mockIdeMessenger = mockStore.mockIdeMessenger;
const requestSpy = vi.spyOn(mockIdeMessenger, "request");

mockIdeMessenger.responses["llm/compileChat"] = {
compiledChatMessages: [{ role: "user", content: "Hello" }],
didPrune: false,
contextPercentage: 0.8,
};

async function* mockStreamGenerator(): AsyncGenerator<
AssistantChatMessage[],
PromptLog
> {
yield [{ role: "assistant", content: "First chunk" }];
return {
prompt: "Hello",
completion: "Hi there!",
modelProvider: "ollama",
modelTitle: "Qwen 3 30B",
};
}

mockIdeMessenger.llmStreamChat = vi
.fn()
.mockReturnValue(mockStreamGenerator());

await mockStore.dispatch(
streamResponseThunk({
editorState: mockEditorState,
modifiers: mockModifiers,
}) as any,
);

expect(requestSpy).toHaveBeenCalledWith("llm/compileChat", {
messages: [
{
role: "system",
content: "You are a helpful assistant.",
},
{
role: "user",
content: [
{
type: "text",
text: "Hello",
},
],
},
{
role: "user",
content: [
{
type: "text",
text: "Hello, please help me with this code",
},
],
},
],
options: {},
});

expect(mockIdeMessenger.llmStreamChat).toHaveBeenCalledWith(
expect.objectContaining({
completionOptions: {},
title: "Qwen 3 30B",
}),
expect.any(AbortSignal),
);
});

it("should execute streaming flow with tool call execution", async () => {
// Set up auto-approved tool setting for our test tool
const stateWithToolSettings = getRootStateWithClaude();
Expand Down
Loading