Skip to content

Commit 4f453fe

Browse files
committed
Fix: Ensure that tool calls with no arguments get handled correctly #3560
When a model decides to use an MCP tool call that requires no arguments, it sets the arguments field to None. This causes validation errors because this field gets removed when being parsed by an openai compatible inference provider like vLLM This PR ensures that, as soon as the tool call args are accumulated while streaming, we check to ensure no tool call function arguments are set to None - if they are we replace them with "{}" Closes #3456 Added new unit test to verify that any tool calls with function arguments set to None get handled correctly Signed-off-by: Jaideep Rao <[email protected]>
1 parent 9c751b6 commit 4f453fe

File tree

2 files changed

+83
-1
lines changed

2 files changed

+83
-1
lines changed

llama_stack/providers/inline/agents/meta_reference/responses/streaming.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,8 +298,11 @@ async def _process_streaming_chunks(
298298

299299
# Emit arguments.done events for completed tool calls (differentiate between MCP and function calls)
300300
for tool_call_index in sorted(chat_response_tool_calls.keys()):
301+
tool_call = chat_response_tool_calls[tool_call_index]
302+
# Ensure that arguments, if sent back to the inference provider, are not None
303+
tool_call.function.arguments = tool_call.function.arguments or "{}"
301304
tool_call_item_id = tool_call_item_ids[tool_call_index]
302-
final_arguments = chat_response_tool_calls[tool_call_index].function.arguments or ""
305+
final_arguments = tool_call.function.arguments
303306
tool_call_name = chat_response_tool_calls[tool_call_index].function.name
304307

305308
# Check if this is an MCP tool call

tests/unit/providers/agents/meta_reference/test_openai_responses.py

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -326,6 +326,85 @@ async def fake_stream_toolcall():
326326
assert chunks[5].response.output[0].name == "get_weather"
327327

328328

329+
async def test_create_openai_response_with_tool_call_function_arguments_none(openai_responses_impl, mock_inference_api):
330+
"""Test creating an OpenAI response with a tool call response that has a function with arguments set to None."""
331+
# Setup
332+
input_text = "How hot it is in San Francisco today?"
333+
model = "meta-llama/Llama-3.1-8B-Instruct"
334+
335+
async def fake_stream_toolcall():
336+
yield ChatCompletionChunk(
337+
id="123",
338+
choices=[
339+
Choice(
340+
index=0,
341+
delta=ChoiceDelta(
342+
tool_calls=[
343+
ChoiceDeltaToolCall(
344+
index=0,
345+
id="tc_123",
346+
function=ChoiceDeltaToolCallFunction(name="get_weather", arguments=None),
347+
type=None,
348+
)
349+
]
350+
),
351+
),
352+
],
353+
created=1,
354+
model=model,
355+
object="chat.completion.chunk",
356+
)
357+
358+
mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall()
359+
360+
# Execute
361+
result = await openai_responses_impl.create_openai_response(
362+
input=input_text,
363+
model=model,
364+
stream=True,
365+
temperature=0.1,
366+
tools=[
367+
OpenAIResponseInputToolFunction(
368+
name="get_weather",
369+
description="Get current temperature for a given location.",
370+
parameters={
371+
"location": "string",
372+
},
373+
)
374+
],
375+
)
376+
377+
# Check that we got the content from our mocked tool execution result
378+
chunks = [chunk async for chunk in result]
379+
380+
# Verify event types
381+
# Should have: response.created, output_item.added, function_call_arguments.delta,
382+
# function_call_arguments.done, output_item.done, response.completed
383+
assert len(chunks) == 5
384+
385+
# Verify inference API was called correctly (after iterating over result)
386+
first_call = mock_inference_api.openai_chat_completion.call_args_list[0]
387+
assert first_call.kwargs["messages"][0].content == input_text
388+
assert first_call.kwargs["tools"] is not None
389+
assert first_call.kwargs["temperature"] == 0.1
390+
391+
# Check response.created event (should have empty output)
392+
assert chunks[0].type == "response.created"
393+
assert len(chunks[0].response.output) == 0
394+
395+
# Check streaming events
396+
assert chunks[1].type == "response.output_item.added"
397+
assert chunks[2].type == "response.function_call_arguments.done"
398+
assert chunks[3].type == "response.output_item.done"
399+
400+
# Check response.completed event (should have the tool call with arguments set to "{}")
401+
assert chunks[4].type == "response.completed"
402+
assert len(chunks[4].response.output) == 1
403+
assert chunks[4].response.output[0].type == "function_call"
404+
assert chunks[4].response.output[0].name == "get_weather"
405+
assert chunks[4].response.output[0].arguments == "{}"
406+
407+
329408
async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api):
330409
"""Test creating an OpenAI response with multiple messages."""
331410
# Setup

0 commit comments

Comments
 (0)