Skip to content

Commit 61cffbc

Browse files
stainless-app[bot]gradenr
authored andcommitted
feat(api): api update (#175)
1 parent 0b2e997 commit 61cffbc

File tree

4 files changed

+31
-4
lines changed

4 files changed

+31
-4
lines changed

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 7
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-33be0d612b9f1153b86f53e95bf7c571af2f1e466bda2330b632e6c05832e2a6.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-8bf31041292f851076489c3ac1270d06c49b995225d91cf5de2288a4bcfa8c29.yml

src/groq/resources/chat/completions.py

+20-2
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ def create(
6363
functions: Optional[Iterable[completion_create_params.Function]] | NotGiven = NOT_GIVEN,
6464
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
6565
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
66+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
6667
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
6768
n: Optional[int] | NotGiven = NOT_GIVEN,
6869
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -98,6 +99,7 @@ def create(
9899
functions: Optional[Iterable[completion_create_params.Function]] | NotGiven = NOT_GIVEN,
99100
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
100101
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
102+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
101103
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
102104
n: Optional[int] | NotGiven = NOT_GIVEN,
103105
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -133,6 +135,7 @@ def create(
133135
functions: Optional[Iterable[completion_create_params.Function]] | NotGiven = NOT_GIVEN,
134136
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
135137
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
138+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
136139
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
137140
n: Optional[int] | NotGiven = NOT_GIVEN,
138141
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -167,6 +170,7 @@ def create(
167170
functions: Optional[Iterable[completion_create_params.Function]] | NotGiven = NOT_GIVEN,
168171
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
169172
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
173+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
170174
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
171175
n: Optional[int] | NotGiven = NOT_GIVEN,
172176
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -224,10 +228,14 @@ def create(
224228
probabilities of the output tokens or not. If true, returns the log
225229
probabilities of each output token returned in the `content` of `message`.
226230
227-
max_tokens: The maximum number of tokens that can be generated in the chat completion. The
231+
max_completion_tokens: The maximum number of tokens that can be generated in the chat completion. The
228232
total length of input tokens and generated tokens is limited by the model's
229233
context length.
230234
235+
max_tokens: Deprecated in favor of `max_completion_tokens`. The maximum number of tokens
236+
that can be generated in the chat completion. The total length of input tokens
237+
and generated tokens is limited by the model's context length.
238+
231239
n: How many chat completion choices to generate for each input message. Note that
232240
the current moment, only n=1 is supported. Other values will result in a 400
233241
response.
@@ -316,6 +324,7 @@ def create(
316324
"functions": functions,
317325
"logit_bias": logit_bias,
318326
"logprobs": logprobs,
327+
"max_completion_tokens": max_completion_tokens,
319328
"max_tokens": max_tokens,
320329
"n": n,
321330
"parallel_tool_calls": parallel_tool_calls,
@@ -374,6 +383,7 @@ async def create(
374383
functions: Optional[Iterable[completion_create_params.Function]] | NotGiven = NOT_GIVEN,
375384
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
376385
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
386+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
377387
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
378388
n: Optional[int] | NotGiven = NOT_GIVEN,
379389
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -409,6 +419,7 @@ async def create(
409419
functions: Optional[Iterable[completion_create_params.Function]] | NotGiven = NOT_GIVEN,
410420
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
411421
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
422+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
412423
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
413424
n: Optional[int] | NotGiven = NOT_GIVEN,
414425
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -444,6 +455,7 @@ async def create(
444455
functions: Optional[Iterable[completion_create_params.Function]] | NotGiven = NOT_GIVEN,
445456
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
446457
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
458+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
447459
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
448460
n: Optional[int] | NotGiven = NOT_GIVEN,
449461
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -478,6 +490,7 @@ async def create(
478490
functions: Optional[Iterable[completion_create_params.Function]] | NotGiven = NOT_GIVEN,
479491
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
480492
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
493+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
481494
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
482495
n: Optional[int] | NotGiven = NOT_GIVEN,
483496
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -535,10 +548,14 @@ async def create(
535548
probabilities of the output tokens or not. If true, returns the log
536549
probabilities of each output token returned in the `content` of `message`.
537550
538-
max_tokens: The maximum number of tokens that can be generated in the chat completion. The
551+
max_completion_tokens: The maximum number of tokens that can be generated in the chat completion. The
539552
total length of input tokens and generated tokens is limited by the model's
540553
context length.
541554
555+
max_tokens: Deprecated in favor of `max_completion_tokens`. The maximum number of tokens
556+
that can be generated in the chat completion. The total length of input tokens
557+
and generated tokens is limited by the model's context length.
558+
542559
n: How many chat completion choices to generate for each input message. Note that
543560
the current moment, only n=1 is supported. Other values will result in a 400
544561
response.
@@ -627,6 +644,7 @@ async def create(
627644
"functions": functions,
628645
"logit_bias": logit_bias,
629646
"logprobs": logprobs,
647+
"max_completion_tokens": max_completion_tokens,
630648
"max_tokens": max_tokens,
631649
"n": n,
632650
"parallel_tool_calls": parallel_tool_calls,

src/groq/types/chat/completion_create_params.py

+8-1
Original file line numberDiff line numberDiff line change
@@ -64,13 +64,20 @@ class CompletionCreateParams(TypedDict, total=False):
6464
probabilities of each output token returned in the `content` of `message`.
6565
"""
6666

67-
max_tokens: Optional[int]
67+
max_completion_tokens: Optional[int]
6868
"""The maximum number of tokens that can be generated in the chat completion.
6969
7070
The total length of input tokens and generated tokens is limited by the model's
7171
context length.
7272
"""
7373

74+
max_tokens: Optional[int]
75+
"""
76+
Deprecated in favor of `max_completion_tokens`. The maximum number of tokens
77+
that can be generated in the chat completion. The total length of input tokens
78+
and generated tokens is limited by the model's context length.
79+
"""
80+
7481
n: Optional[int]
7582
"""How many chat completion choices to generate for each input message.
7683

tests/api_resources/chat/test_completions.py

+2
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ def test_method_create_with_all_params(self, client: Groq) -> None:
5252
],
5353
logit_bias={"foo": 0},
5454
logprobs=True,
55+
max_completion_tokens=0,
5556
max_tokens=0,
5657
n=1,
5758
parallel_tool_calls=True,
@@ -154,6 +155,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N
154155
],
155156
logit_bias={"foo": 0},
156157
logprobs=True,
158+
max_completion_tokens=0,
157159
max_tokens=0,
158160
n=1,
159161
parallel_tool_calls=True,

0 commit comments

Comments
 (0)