Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/hooks/think-mode/detector.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
const ENGLISH_PATTERNS = [/\bultrathink\b/i, /\bthink\b/i]

const MULTILINGUAL_KEYWORDS = [
"생각", "고민", "검토", "제대로",
// Korean: intentionally use only explicit reasoning-request forms.
// "고민" (“to worry/ponder”) is excluded — it is a common conversational word
// that fires think mode on everyday sentences like "어떻게 할지 고민하고 있었는데".
// Only "생각해줘" / "깊이 생각" style explicit directives should trigger think mode.
"생각해줘", "깊이 생각", "신중하게 검토", "제대로",
"思考", "考虑", "考慮",
"思考", "考え", "熟考",
"सोच", "विचार",
Expand Down
67 changes: 67 additions & 0 deletions src/hooks/think-mode/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -153,3 +153,70 @@ describe("createThinkModeHook", () => {
expect(output.message.model).toBeUndefined()
})
})

describe("think-mode: regression tests for issue #2382", () => {
const sessionID = "regression-2382"

beforeEach(() => {
clearThinkModeState(sessionID)
})

it("does NOT activate think mode for conversational Korean '고민' (to worry/ponder)", async () => {
// given — a real user sentence that triggered the bug:
// "너와 인공지능 엔진에게 이미지와 참조 자료를 어떻게 전달할지 고민하고 있었는데"
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "opencode",
modelID: "gpt-5-nano",
})
const output = createHookOutput(
"너와 인공지능 엔진에게 이미지와 참조 자료를 어떻게 전달할지 고민하고 있었는데"
)

// when
await hook["chat.message"](input, output)

// then — model must NOT be upgraded; '고민' is conversational, not a reasoning directive
expect(output.message.variant).toBeUndefined()
expect(output.message.model).toBeUndefined()
})

it("does NOT upgrade gpt-5-nano even when think keyword IS present", async () => {
// given — gpt-5-nano has no -high variant on Zen; upgrading it causes Model not found
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "opencode",
modelID: "gpt-5-nano",
})
const output = createHookOutput("신중하게 검토해줘")

// when
await hook["chat.message"](input, output)

// then — no high variant exists for gpt-5-nano, so model stays unchanged
expect(output.message.model).toBeUndefined()
})

it("still activates think mode for explicit Korean reasoning directive '생각해줘'", async () => {
// given — explicit reasoning request should still trigger think mode on capable models
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "anthropic",
modelID: "claude-opus-4-6",
})
const output = createHookOutput("이 문제 깊이 생각해줘")

// when
await hook["chat.message"](input, output)

// then — explicit directive on a capable model should still upgrade
expect(output.message.variant).toBe("high")
expect(output.message.model).toEqual({
providerID: "anthropic",
modelID: "claude-opus-4-6-high",
})
})
})
7 changes: 7 additions & 0 deletions src/hooks/think-mode/switcher.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,13 @@ describe("think-mode switcher", () => {
expect(getHighVariant("llama-3-70b")).toBeNull()
expect(getHighVariant("mistral-large")).toBeNull()
})

it("should return null for gpt-5-nano (no reasoning variant on Zen)", () => {
// given gpt-5-nano — a lightweight model with no high variant on OpenCode Zen
// see: https://github.com/code-yeongyu/oh-my-openagent/issues/2382
expect(getHighVariant("gpt-5-nano")).toBeNull()
expect(getHighVariant("opencode/gpt-5-nano")).toBeNull()
})
})
})

Expand Down
4 changes: 3 additions & 1 deletion src/hooks/think-mode/switcher.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,11 @@ const HIGH_VARIANT_MAP: Record<string, string> = {
"gemini-3-1-pro-low": "gemini-3-1-pro-high",
"gemini-3-flash": "gemini-3-flash-high",
// GPT-5
// NOTE: gpt-5-nano is intentionally excluded — it is a lightweight nano model
// that does not have a reasoning/high-effort variant on the OpenCode Zen provider.
// Mapping it to gpt-5-nano-high causes a "Model not found" error.
"gpt-5": "gpt-5-high",
"gpt-5-mini": "gpt-5-mini-high",
"gpt-5-nano": "gpt-5-nano-high",
"gpt-5-pro": "gpt-5-pro-high",
"gpt-5-chat-latest": "gpt-5-chat-latest-high",
// GPT-5.1
Expand Down