Skip to content

Commit a4cf081

Browse files
authored
support sending **kwargs to api (#181)
* support sending **kwargs to api * fix precommit
1 parent 99b6d26 commit a4cf081

File tree

8 files changed

+30
-8
lines changed

8 files changed

+30
-8
lines changed

src/together/legacy/complete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class Complete:
1414
def create(
1515
cls,
1616
prompt: str,
17-
**kwargs,
17+
**kwargs: Any,
1818
) -> Dict[str, Any]:
1919
"""Legacy completion function."""
2020

@@ -36,7 +36,7 @@ def create(
3636
def create_streaming(
3737
cls,
3838
prompt: str,
39-
**kwargs,
39+
**kwargs: Any,
4040
) -> Iterator[Dict[str, Any]]:
4141
"""Legacy streaming completion function."""
4242

@@ -59,7 +59,7 @@ class Completion:
5959
def create(
6060
cls,
6161
prompt: str,
62-
**kwargs,
62+
**kwargs: Any,
6363
) -> CompletionResponse | Iterator[CompletionChunk]:
6464
"""Completion function."""
6565

@@ -79,7 +79,7 @@ class AsyncComplete:
7979
async def create(
8080
cls,
8181
prompt: str,
82-
**kwargs,
82+
**kwargs: Any,
8383
) -> CompletionResponse | AsyncGenerator[CompletionChunk, None]:
8484
"""Async completion function."""
8585

src/together/legacy/embeddings.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ class Embeddings:
1111
def create(
1212
cls,
1313
input: str,
14-
**kwargs,
14+
**kwargs: Any,
1515
) -> Dict[str, Any]:
1616
"""Legacy embeddings function."""
1717

src/together/legacy/images.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ class Image:
1111
def create(
1212
cls,
1313
prompt: str,
14-
**kwargs,
14+
**kwargs: Any,
1515
) -> Dict[str, Any]:
1616
"""Legacy image function."""
1717

src/together/resources/chat/completions.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ def create(
4040
response_format: Dict[str, str | Dict[str, Any]] | None = None,
4141
tools: Dict[str, str | Dict[str, Any]] | None = None,
4242
tool_choice: str | Dict[str, str | Dict[str, str]] | None = None,
43+
**kwargs: Any,
4344
) -> ChatCompletionResponse | Iterator[ChatCompletionChunk]:
4445
"""
4546
Method to generate completions based on a given prompt using a specified model.
@@ -131,6 +132,7 @@ def create(
131132
response_format=response_format,
132133
tools=tools,
133134
tool_choice=tool_choice,
135+
**kwargs,
134136
).model_dump(exclude_none=True)
135137

136138
response, _, _ = requestor.request(
@@ -177,6 +179,7 @@ async def create(
177179
response_format: Dict[str, Any] | None = None,
178180
tools: Dict[str, str | Dict[str, str | Dict[str, Any]]] | None = None,
179181
tool_choice: str | Dict[str, str | Dict[str, str]] | None = None,
182+
**kwargs: Any,
180183
) -> AsyncGenerator[ChatCompletionChunk, None] | ChatCompletionResponse:
181184
"""
182185
Async method to generate completions based on a given prompt using a specified model.
@@ -268,6 +271,7 @@ async def create(
268271
response_format=response_format,
269272
tools=tools,
270273
tool_choice=tool_choice,
274+
**kwargs,
271275
).model_dump(exclude_none=True)
272276

273277
response, _, _ = await requestor.arequest(

src/together/resources/completions.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from __future__ import annotations
22

3-
from typing import AsyncGenerator, Dict, Iterator, List
3+
from typing import AsyncGenerator, Dict, Iterator, List, Any
44

55
from together.abstract import api_requestor
66
from together.together_response import TogetherResponse
@@ -37,6 +37,7 @@ def create(
3737
echo: bool | None = None,
3838
n: int | None = None,
3939
safety_model: str | None = None,
40+
**kwargs: Any,
4041
) -> CompletionResponse | Iterator[CompletionChunk]:
4142
"""
4243
Method to generate completions based on a given prompt using a specified model.
@@ -113,6 +114,7 @@ def create(
113114
echo=echo,
114115
n=n,
115116
safety_model=safety_model,
117+
**kwargs,
116118
).model_dump(exclude_none=True)
117119

118120
response, _, _ = requestor.request(
@@ -156,6 +158,7 @@ async def create(
156158
echo: bool | None = None,
157159
n: int | None = None,
158160
safety_model: str | None = None,
161+
**kwargs: Any,
159162
) -> AsyncGenerator[CompletionChunk, None] | CompletionResponse:
160163
"""
161164
Async method to generate completions based on a given prompt using a specified model.
@@ -232,6 +235,7 @@ async def create(
232235
echo=echo,
233236
n=n,
234237
safety_model=safety_model,
238+
**kwargs,
235239
).model_dump(exclude_none=True)
236240

237241
response, _, _ = await requestor.arequest(

src/together/resources/embeddings.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from __future__ import annotations
22

3-
from typing import List
3+
from typing import List, Any
44

55
from together.abstract import api_requestor
66
from together.together_response import TogetherResponse
@@ -21,6 +21,7 @@ def create(
2121
*,
2222
input: str | List[str],
2323
model: str,
24+
**kwargs: Any,
2425
) -> EmbeddingResponse:
2526
"""
2627
Method to generate completions based on a given prompt using a specified model.
@@ -40,6 +41,7 @@ def create(
4041
parameter_payload = EmbeddingRequest(
4142
input=input,
4243
model=model,
44+
**kwargs,
4345
).model_dump(exclude_none=True)
4446

4547
response, _, _ = requestor.request(
@@ -65,6 +67,7 @@ async def create(
6567
*,
6668
input: str | List[str],
6769
model: str,
70+
**kwargs: Any,
6871
) -> EmbeddingResponse:
6972
"""
7073
Async method to generate completions based on a given prompt using a specified model.
@@ -84,6 +87,7 @@ async def create(
8487
parameter_payload = EmbeddingRequest(
8588
input=input,
8689
model=model,
90+
**kwargs,
8791
).model_dump(exclude_none=True)
8892

8993
response, _, _ = await requestor.arequest(

src/together/resources/images.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
from __future__ import annotations
22

3+
from typing import Any
4+
35
from together.abstract import api_requestor
46
from together.together_response import TogetherResponse
57
from together.types import (
@@ -25,6 +27,7 @@ def generate(
2527
height: int | None = 1024,
2628
width: int | None = 1024,
2729
negative_prompt: str | None = None,
30+
**kwargs: Any,
2831
) -> ImageResponse:
2932
"""
3033
Method to generate images based on a given prompt using a specified model.
@@ -67,6 +70,7 @@ def generate(
6770
height=height,
6871
width=width,
6972
negative_prompt=negative_prompt,
73+
**kwargs,
7074
).model_dump(exclude_none=True)
7175

7276
response, _, _ = requestor.request(
@@ -98,6 +102,7 @@ async def generate(
98102
height: int | None = 1024,
99103
width: int | None = 1024,
100104
negative_prompt: str | None = None,
105+
**kwargs: Any,
101106
) -> ImageResponse:
102107
"""
103108
Async method to generate images based on a given prompt using a specified model.
@@ -140,6 +145,7 @@ async def generate(
140145
height=height,
141146
width=width,
142147
negative_prompt=negative_prompt,
148+
**kwargs,
143149
).model_dump(exclude_none=True)
144150

145151
response, _, _ = await requestor.arequest(

src/together/resources/rerank.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ def create(
2525
top_n: int | None = None,
2626
return_documents: bool = False,
2727
rank_fields: List[str] | None = None,
28+
**kwargs: Any,
2829
) -> RerankResponse:
2930
"""
3031
Method to generate completions based on a given prompt using a specified model.
@@ -52,6 +53,7 @@ def create(
5253
top_n=top_n,
5354
return_documents=return_documents,
5455
rank_fields=rank_fields,
56+
**kwargs,
5557
).model_dump(exclude_none=True)
5658

5759
response, _, _ = requestor.request(
@@ -81,6 +83,7 @@ async def create(
8183
top_n: int | None = None,
8284
return_documents: bool = False,
8385
rank_fields: List[str] | None = None,
86+
**kwargs: Any,
8487
) -> RerankResponse:
8588
"""
8689
Async method to generate completions based on a given prompt using a specified model.
@@ -108,6 +111,7 @@ async def create(
108111
top_n=top_n,
109112
return_documents=return_documents,
110113
rank_fields=rank_fields,
114+
**kwargs,
111115
).model_dump(exclude_none=True)
112116

113117
response, _, _ = await requestor.arequest(

0 commit comments

Comments
 (0)