@@ -63,6 +63,7 @@ def create(
63
63
functions : Optional [Iterable [completion_create_params .Function ]] | NotGiven = NOT_GIVEN ,
64
64
logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
65
65
logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
66
+ max_completion_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
66
67
max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
67
68
n : Optional [int ] | NotGiven = NOT_GIVEN ,
68
69
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
@@ -98,6 +99,7 @@ def create(
98
99
functions : Optional [Iterable [completion_create_params .Function ]] | NotGiven = NOT_GIVEN ,
99
100
logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
100
101
logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
102
+ max_completion_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
101
103
max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
102
104
n : Optional [int ] | NotGiven = NOT_GIVEN ,
103
105
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
@@ -133,6 +135,7 @@ def create(
133
135
functions : Optional [Iterable [completion_create_params .Function ]] | NotGiven = NOT_GIVEN ,
134
136
logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
135
137
logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
138
+ max_completion_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
136
139
max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
137
140
n : Optional [int ] | NotGiven = NOT_GIVEN ,
138
141
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
@@ -167,6 +170,7 @@ def create(
167
170
functions : Optional [Iterable [completion_create_params .Function ]] | NotGiven = NOT_GIVEN ,
168
171
logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
169
172
logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
173
+ max_completion_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
170
174
max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
171
175
n : Optional [int ] | NotGiven = NOT_GIVEN ,
172
176
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
@@ -224,10 +228,14 @@ def create(
224
228
probabilities of the output tokens or not. If true, returns the log
225
229
probabilities of each output token returned in the `content` of `message`.
226
230
227
- max_tokens : The maximum number of tokens that can be generated in the chat completion. The
231
+ max_completion_tokens : The maximum number of tokens that can be generated in the chat completion. The
228
232
total length of input tokens and generated tokens is limited by the model's
229
233
context length.
230
234
235
+ max_tokens: Deprecated in favor of `max_completion_tokens`. The maximum number of tokens
236
+ that can be generated in the chat completion. The total length of input tokens
237
+ and generated tokens is limited by the model's context length.
238
+
231
239
n: How many chat completion choices to generate for each input message. Note that
232
240
the current moment, only n=1 is supported. Other values will result in a 400
233
241
response.
@@ -316,6 +324,7 @@ def create(
316
324
"functions" : functions ,
317
325
"logit_bias" : logit_bias ,
318
326
"logprobs" : logprobs ,
327
+ "max_completion_tokens" : max_completion_tokens ,
319
328
"max_tokens" : max_tokens ,
320
329
"n" : n ,
321
330
"parallel_tool_calls" : parallel_tool_calls ,
@@ -374,6 +383,7 @@ async def create(
374
383
functions : Optional [Iterable [completion_create_params .Function ]] | NotGiven = NOT_GIVEN ,
375
384
logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
376
385
logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
386
+ max_completion_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
377
387
max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
378
388
n : Optional [int ] | NotGiven = NOT_GIVEN ,
379
389
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
@@ -409,6 +419,7 @@ async def create(
409
419
functions : Optional [Iterable [completion_create_params .Function ]] | NotGiven = NOT_GIVEN ,
410
420
logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
411
421
logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
422
+ max_completion_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
412
423
max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
413
424
n : Optional [int ] | NotGiven = NOT_GIVEN ,
414
425
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
@@ -444,6 +455,7 @@ async def create(
444
455
functions : Optional [Iterable [completion_create_params .Function ]] | NotGiven = NOT_GIVEN ,
445
456
logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
446
457
logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
458
+ max_completion_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
447
459
max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
448
460
n : Optional [int ] | NotGiven = NOT_GIVEN ,
449
461
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
@@ -478,6 +490,7 @@ async def create(
478
490
functions : Optional [Iterable [completion_create_params .Function ]] | NotGiven = NOT_GIVEN ,
479
491
logit_bias : Optional [Dict [str , int ]] | NotGiven = NOT_GIVEN ,
480
492
logprobs : Optional [bool ] | NotGiven = NOT_GIVEN ,
493
+ max_completion_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
481
494
max_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
482
495
n : Optional [int ] | NotGiven = NOT_GIVEN ,
483
496
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
@@ -535,10 +548,14 @@ async def create(
535
548
probabilities of the output tokens or not. If true, returns the log
536
549
probabilities of each output token returned in the `content` of `message`.
537
550
538
- max_tokens : The maximum number of tokens that can be generated in the chat completion. The
551
+ max_completion_tokens : The maximum number of tokens that can be generated in the chat completion. The
539
552
total length of input tokens and generated tokens is limited by the model's
540
553
context length.
541
554
555
+ max_tokens: Deprecated in favor of `max_completion_tokens`. The maximum number of tokens
556
+ that can be generated in the chat completion. The total length of input tokens
557
+ and generated tokens is limited by the model's context length.
558
+
542
559
n: How many chat completion choices to generate for each input message. Note that
543
560
the current moment, only n=1 is supported. Other values will result in a 400
544
561
response.
@@ -627,6 +644,7 @@ async def create(
627
644
"functions" : functions ,
628
645
"logit_bias" : logit_bias ,
629
646
"logprobs" : logprobs ,
647
+ "max_completion_tokens" : max_completion_tokens ,
630
648
"max_tokens" : max_tokens ,
631
649
"n" : n ,
632
650
"parallel_tool_calls" : parallel_tool_calls ,
0 commit comments