Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -355,8 +355,11 @@ async def _process_streaming_chunks(

# Emit arguments.done events for completed tool calls (differentiate between MCP and function calls)
for tool_call_index in sorted(chat_response_tool_calls.keys()):
tool_call = chat_response_tool_calls[tool_call_index]
# Ensure that arguments, if sent back to the inference provider, are not None
tool_call.function.arguments = tool_call.function.arguments or "{}"
tool_call_item_id = tool_call_item_ids[tool_call_index]
final_arguments = chat_response_tool_calls[tool_call_index].function.arguments or ""
final_arguments = tool_call.function.arguments
tool_call_name = chat_response_tool_calls[tool_call_index].function.name

# Check if this is an MCP tool call
Expand Down
33 changes: 33 additions & 0 deletions tests/integration/agents/test_openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,3 +264,36 @@ def test_function_call_output_response(openai_client, client_with_models, text_m
assert (
"sunny" in response2.output[0].content[0].text.lower() or "warm" in response2.output[0].content[0].text.lower()
)


def test_function_call_output_response_with_none_arguments(openai_client, client_with_models, text_model_id):
"""Test handling of function call outputs in responses when function does not accept arguments."""
if isinstance(client_with_models, LlamaStackAsLibraryClient):
pytest.skip("OpenAI responses are not supported when testing with library client yet.")

client = openai_client

# First create a response that triggers a function call
response = client.responses.create(
model=text_model_id,
input=[
{
"role": "user",
"content": "what's the current time? You MUST call the `get_current_time` function to find out.",
}
],
tools=[
{
"type": "function",
"name": "get_current_time",
"description": "Get the current time",
"parameters": {},
}
],
stream=False,
)

# Verify we got a function call
assert response.output[0].type == "function_call"
assert response.output[0].arguments == "{}"
_ = response.output[0].call_id
4 changes: 2 additions & 2 deletions tests/integration/recordings/responses/05e3ebc68306.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-618",
"id": "chatcmpl-447",
"choices": [
{
"finish_reason": "stop",
Expand All @@ -38,7 +38,7 @@
}
}
],
"created": 1759245078,
"created": 1759282456,
"model": "llama-guard3:1b",
"object": "chat.completion",
"service_tier": null,
Expand Down
10 changes: 5 additions & 5 deletions tests/integration/recordings/responses/0b27fd737699.json
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama-guard3:1b",
"created_at": "2025-09-03T17:37:47.461886Z",
"created_at": "2025-09-30T17:37:24.035083658Z",
"done": true,
"done_reason": "stop",
"total_duration": 338927833,
"load_duration": 100895125,
"total_duration": 2990785181,
"load_duration": 52933018,
"prompt_eval_count": 223,
"prompt_eval_duration": 221583042,
"prompt_eval_duration": 2884018743,
"eval_count": 2,
"eval_duration": 12341416,
"eval_duration": 53216446,
"response": "safe",
"thinking": null,
"context": null
Expand Down
32 changes: 16 additions & 16 deletions tests/integration/recordings/responses/0b3f2e4754ff.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-414",
"id": "chatcmpl-106",
"choices": [
{
"delta": {
Expand All @@ -39,7 +39,7 @@
"logprobs": null
}
],
"created": 1756921333,
"created": 1759254065,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -50,7 +50,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-414",
"id": "chatcmpl-106",
"choices": [
{
"delta": {
Expand All @@ -65,7 +65,7 @@
"logprobs": null
}
],
"created": 1756921333,
"created": 1759254066,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -76,7 +76,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-414",
"id": "chatcmpl-106",
"choices": [
{
"delta": {
Expand All @@ -91,7 +91,7 @@
"logprobs": null
}
],
"created": 1756921333,
"created": 1759254066,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -102,7 +102,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-414",
"id": "chatcmpl-106",
"choices": [
{
"delta": {
Expand All @@ -117,7 +117,7 @@
"logprobs": null
}
],
"created": 1756921333,
"created": 1759254066,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -128,7 +128,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-414",
"id": "chatcmpl-106",
"choices": [
{
"delta": {
Expand All @@ -143,7 +143,7 @@
"logprobs": null
}
],
"created": 1756921334,
"created": 1759254066,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -154,7 +154,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-414",
"id": "chatcmpl-106",
"choices": [
{
"delta": {
Expand All @@ -169,7 +169,7 @@
"logprobs": null
}
],
"created": 1756921334,
"created": 1759254066,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -180,7 +180,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-414",
"id": "chatcmpl-106",
"choices": [
{
"delta": {
Expand All @@ -195,7 +195,7 @@
"logprobs": null
}
],
"created": 1756921334,
"created": 1759254067,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -206,7 +206,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-414",
"id": "chatcmpl-106",
"choices": [
{
"delta": {
Expand All @@ -221,7 +221,7 @@
"logprobs": null
}
],
"created": 1756921334,
"created": 1759254067,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand Down
32 changes: 16 additions & 16 deletions tests/integration/recordings/responses/173ecb3aab28.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-921",
"id": "chatcmpl-629",
"choices": [
{
"delta": {
Expand All @@ -55,7 +55,7 @@
"logprobs": null
}
],
"created": 1756920971,
"created": 1759253815,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -66,7 +66,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-921",
"id": "chatcmpl-629",
"choices": [
{
"delta": {
Expand All @@ -81,7 +81,7 @@
"logprobs": null
}
],
"created": 1756920971,
"created": 1759253815,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -92,7 +92,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-921",
"id": "chatcmpl-629",
"choices": [
{
"delta": {
Expand All @@ -107,7 +107,7 @@
"logprobs": null
}
],
"created": 1756920971,
"created": 1759253815,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -118,7 +118,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-921",
"id": "chatcmpl-629",
"choices": [
{
"delta": {
Expand All @@ -133,7 +133,7 @@
"logprobs": null
}
],
"created": 1756920971,
"created": 1759253816,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -144,7 +144,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-921",
"id": "chatcmpl-629",
"choices": [
{
"delta": {
Expand All @@ -159,7 +159,7 @@
"logprobs": null
}
],
"created": 1756920971,
"created": 1759253816,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -170,7 +170,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-921",
"id": "chatcmpl-629",
"choices": [
{
"delta": {
Expand All @@ -185,7 +185,7 @@
"logprobs": null
}
],
"created": 1756920971,
"created": 1759253816,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -196,7 +196,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-921",
"id": "chatcmpl-629",
"choices": [
{
"delta": {
Expand All @@ -211,7 +211,7 @@
"logprobs": null
}
],
"created": 1756920971,
"created": 1759253816,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand All @@ -222,7 +222,7 @@
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-921",
"id": "chatcmpl-629",
"choices": [
{
"delta": {
Expand All @@ -237,7 +237,7 @@
"logprobs": null
}
],
"created": 1756920971,
"created": 1759253816,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
Expand Down
4 changes: 2 additions & 2 deletions tests/integration/recordings/responses/1a4da7c94fde.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-438",
"id": "chatcmpl-478",
"choices": [
{
"finish_reason": "stop",
Expand All @@ -38,7 +38,7 @@
}
}
],
"created": 1759245073,
"created": 1759282396,
"model": "llama-guard3:1b",
"object": "chat.completion",
"service_tier": null,
Expand Down
Loading