Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
6e98053
Adding a fix to pass `reasoning_effort` in conditionally
avinash2692 Dec 24, 2025
ab9e56b
adding tests
avinash2692 Dec 24, 2025
0862cf0
Fixes #274
nrfulton Dec 26, 2025
29481f7
Adds GPT 5.1 model identifier.
nrfulton Dec 26, 2025
1426ee9
Changes OpenAI Backend default model_id to GPT 5.1.
nrfulton Dec 26, 2025
c11fbef
Fixes bug: GenSlots did not work with OpenAI platform.
nrfulton Dec 26, 2025
0bde6ec
Adds inline documentation for OpenAI model options monkey patching.
nrfulton Dec 26, 2025
4d87c83
removes debug print stmt.
nrfulton Dec 26, 2025
f87f86b
adding a comment about reasoning_effort in openai sdk
avinash2692 Jan 5, 2026
e7e161b
Merge branch 'fix/270-openai-reasoning-effort' of https://github.com/…
avinash2692 Jan 5, 2026
b6d16a6
Merge branch 'main' into fix/270-openai-reasoning-effort
avinash2692 Jan 6, 2026
a94205d
removing all instances of hf_model_id in openai backend
avinash2692 Jan 6, 2026
1e7c1b4
removing apply_chat_template and adding assertions for env variable
avinash2692 Jan 6, 2026
a695cb4
adding some tests for param checking
avinash2692 Jan 6, 2026
41a0c62
changing env variable handling logic.
avinash2692 Jan 6, 2026
c905843
base_url check is now a warning
avinash2692 Jan 6, 2026
0a7747a
fix: change warning message in openai.py
jakelorocco Jan 6, 2026
d0ecfc7
marking test as qualitative cause it's causing timeouts in github act…
avinash2692 Jan 6, 2026
17c2862
Merge branch 'fix/270-openai-reasoning-effort' of https://github.com/…
avinash2692 Jan 6, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion mellea/backends/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -631,15 +631,20 @@ async def _generate_from_chat_context_standard(
formatted_tools = convert_tools_to_json(tools)
use_tools = len(formatted_tools) > 0

# Build optional reasoning parameters
reasoning_params = {}
if thinking is not None:
reasoning_params["reasoning_effort"] = thinking

Comment on lines 671 to 676
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please also add a comment noting that OpenAI doesn't like it when non-reasoning models get reasoning parameters.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done!

chat_response: Coroutine[
Any, Any, ChatCompletion | openai.AsyncStream[ChatCompletionChunk]
] = self._async_client.chat.completions.create(
model=self._hf_model_id,
messages=conversation, # type: ignore
reasoning_effort=thinking, # type: ignore
tools=formatted_tools if use_tools else None, # type: ignore
# parallel_tool_calls=False, # We only support calling one tool per turn. But we do the choosing on our side so we leave this False.
**extra_params,
**reasoning_params, # type: ignore
**self._make_backend_specific_and_remove(
model_opts, is_chat_context=ctx.is_chat_context
),
Expand Down
40 changes: 40 additions & 0 deletions test/backends/test_openai_ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,46 @@ async def get_client_async():
assert len(backend._client_cache.cache.values()) == 2


async def test_reasoning_effort_conditional_passing(backend):
"""Test that reasoning_effort is only passed to API when not None."""
from unittest.mock import AsyncMock, MagicMock, patch

ctx = ChatContext()
ctx = ctx.add(CBlock(value="Test"))

mock_response = MagicMock()
mock_response.choices = [MagicMock()]
mock_response.choices[0].message = MagicMock()
mock_response.choices[0].message.content = "Response"
mock_response.choices[0].message.role = "assistant"

# Test 1: reasoning_effort should NOT be passed when not specified
with patch.object(
backend._async_client.chat.completions, "create", new_callable=AsyncMock
) as mock_create:
mock_create.return_value = mock_response
await backend.generate_from_chat_context(
CBlock(value="Hi"), ctx, model_options={}
)
call_kwargs = mock_create.call_args.kwargs
assert "reasoning_effort" not in call_kwargs, (
"reasoning_effort should not be passed when not specified"
)

# Test 2: reasoning_effort SHOULD be passed when specified
with patch.object(
backend._async_client.chat.completions, "create", new_callable=AsyncMock
) as mock_create:
mock_create.return_value = mock_response
await backend.generate_from_chat_context(
CBlock(value="Hi"), ctx, model_options={ModelOption.THINKING: "medium"}
)
call_kwargs = mock_create.call_args.kwargs
assert call_kwargs.get("reasoning_effort") == "medium", (
"reasoning_effort should be passed with correct value when specified"
)


if __name__ == "__main__":
import pytest

Expand Down
Loading