@@ -328,6 +328,132 @@ async def fake_stream_toolcall():
328
328
assert chunks [5 ].response .output [0 ].name == "get_weather"
329
329
330
330
331
+ async def test_create_openai_response_with_tool_call_function_arguments_none (openai_responses_impl , mock_inference_api ):
332
+ """Test creating an OpenAI response with a tool call response that has a function that does not accept arguments, or arguments set to None when they are not mandatory."""
333
+ # Setup
334
+ input_text = "What is the time right now?"
335
+ model = "meta-llama/Llama-3.1-8B-Instruct"
336
+
337
+ async def fake_stream_toolcall ():
338
+ yield ChatCompletionChunk (
339
+ id = "123" ,
340
+ choices = [
341
+ Choice (
342
+ index = 0 ,
343
+ delta = ChoiceDelta (
344
+ tool_calls = [
345
+ ChoiceDeltaToolCall (
346
+ index = 0 ,
347
+ id = "tc_123" ,
348
+ function = ChoiceDeltaToolCallFunction (name = "get_current_time" , arguments = None ),
349
+ type = None ,
350
+ )
351
+ ]
352
+ ),
353
+ ),
354
+ ],
355
+ created = 1 ,
356
+ model = model ,
357
+ object = "chat.completion.chunk" ,
358
+ )
359
+
360
+ mock_inference_api .openai_chat_completion .return_value = fake_stream_toolcall ()
361
+
362
+ # Function does not accept arguments
363
+ result = await openai_responses_impl .create_openai_response (
364
+ input = input_text ,
365
+ model = model ,
366
+ stream = True ,
367
+ temperature = 0.1 ,
368
+ tools = [
369
+ OpenAIResponseInputToolFunction (
370
+ name = "get_current_time" ,
371
+ description = "Get current time for system's timezone" ,
372
+ parameters = {},
373
+ )
374
+ ],
375
+ )
376
+
377
+ # Check that we got the content from our mocked tool execution result
378
+ chunks = [chunk async for chunk in result ]
379
+
380
+ # Verify event types
381
+ # Should have: response.created, output_item.added, function_call_arguments.delta,
382
+ # function_call_arguments.done, output_item.done, response.completed
383
+ assert len (chunks ) == 5
384
+
385
+ # Verify inference API was called correctly (after iterating over result)
386
+ first_call = mock_inference_api .openai_chat_completion .call_args_list [0 ]
387
+ assert first_call .kwargs ["messages" ][0 ].content == input_text
388
+ assert first_call .kwargs ["tools" ] is not None
389
+ assert first_call .kwargs ["temperature" ] == 0.1
390
+
391
+ # Check response.created event (should have empty output)
392
+ assert chunks [0 ].type == "response.created"
393
+ assert len (chunks [0 ].response .output ) == 0
394
+
395
+ # Check streaming events
396
+ assert chunks [1 ].type == "response.output_item.added"
397
+ assert chunks [2 ].type == "response.function_call_arguments.done"
398
+ assert chunks [3 ].type == "response.output_item.done"
399
+
400
+ # Check response.completed event (should have the tool call with arguments set to "{}")
401
+ assert chunks [4 ].type == "response.completed"
402
+ assert len (chunks [4 ].response .output ) == 1
403
+ assert chunks [4 ].response .output [0 ].type == "function_call"
404
+ assert chunks [4 ].response .output [0 ].name == "get_current_time"
405
+ assert chunks [4 ].response .output [0 ].arguments == "{}"
406
+
407
+ mock_inference_api .openai_chat_completion .return_value = fake_stream_toolcall ()
408
+
409
+ # Function accepts optional arguments
410
+ result = await openai_responses_impl .create_openai_response (
411
+ input = input_text ,
412
+ model = model ,
413
+ stream = True ,
414
+ temperature = 0.1 ,
415
+ tools = [
416
+ OpenAIResponseInputToolFunction (
417
+ name = "get_current_time" ,
418
+ description = "Get current time for system's timezone" ,
419
+ parameters = {
420
+ "timezone" : "string" ,
421
+ },
422
+ )
423
+ ],
424
+ )
425
+
426
+ # Check that we got the content from our mocked tool execution result
427
+ chunks = [chunk async for chunk in result ]
428
+
429
+ # Verify event types
430
+ # Should have: response.created, output_item.added, function_call_arguments.delta,
431
+ # function_call_arguments.done, output_item.done, response.completed
432
+ assert len (chunks ) == 5
433
+
434
+ # Verify inference API was called correctly (after iterating over result)
435
+ first_call = mock_inference_api .openai_chat_completion .call_args_list [0 ]
436
+ assert first_call .kwargs ["messages" ][0 ].content == input_text
437
+ assert first_call .kwargs ["tools" ] is not None
438
+ assert first_call .kwargs ["temperature" ] == 0.1
439
+
440
+ # Check response.created event (should have empty output)
441
+ assert chunks [0 ].type == "response.created"
442
+ assert len (chunks [0 ].response .output ) == 0
443
+
444
+ # Check streaming events
445
+ assert chunks [1 ].type == "response.output_item.added"
446
+ assert chunks [2 ].type == "response.function_call_arguments.done"
447
+ assert chunks [3 ].type == "response.output_item.done"
448
+
449
+ # Check response.completed event (should have the tool call with arguments set to "{}")
450
+ assert chunks [4 ].type == "response.completed"
451
+ assert len (chunks [4 ].response .output ) == 1
452
+ assert chunks [4 ].response .output [0 ].type == "function_call"
453
+ assert chunks [4 ].response .output [0 ].name == "get_current_time"
454
+ assert chunks [4 ].response .output [0 ].arguments == "{}"
455
+
456
+
331
457
async def test_create_openai_response_with_multiple_messages (openai_responses_impl , mock_inference_api ):
332
458
"""Test creating an OpenAI response with multiple messages."""
333
459
# Setup
0 commit comments