Skip to content

Commit b1554ac

Browse files
move infer to alpha namespace
1 parent f5c7ac5 commit b1554ac

File tree

17 files changed

+261
-320
lines changed

17 files changed

+261
-320
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ print(f"Tx hash: {completion.transaction_hash}")
7474
#### Custom Model Inference
7575
Browse models on our [Model Hub](https://hub.opengradient.ai/) or upload your own:
7676
```python
77-
result = client.inference.infer(
77+
result = client.alpha.infer(
7878
model_cid="your-model-cid",
7979
model_input={"input": [1.0, 2.0, 3.0]},
8080
inference_mode=og.InferenceMode.VANILLA,

docs/CLAUDE_SDK_USERS.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ result = client.llm.completion(
8888
### ONNX Model Inference
8989

9090
```python
91-
result = client.inference.infer(
91+
result = client.alpha.infer(
9292
model_cid: str, # IPFS CID of model
9393
inference_mode: og.InferenceMode, # VANILLA, TEE, or ZKML
9494
model_input: Dict[str, Any], # Input tensors

examples/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ og_client = og.Client(
175175
Basic inference pattern:
176176

177177
```python
178-
result = og_client.inference.infer(
178+
result = og_client.alpha.infer(
179179
model_cid="your-model-cid",
180180
model_input={"input_key": "input_value"},
181181
inference_mode=og.InferenceMode.VANILLA

examples/alpha/run_embeddings_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"Since you're reading this, you are probably someone from a judo background or someone who is just wondering how judo techniques can be applied under wrestling rules. So without further ado, let's get to the question. Are Judo throws allowed in wrestling? Yes, judo throws are allowed in freestyle and folkstyle wrestling. You only need to be careful to follow the slam rules when executing judo throws. In wrestling, a slam is lifting and returning an opponent to the mat with unnecessary force.",
1515
]
1616

17-
model_embeddings = og_client.inference.infer(
17+
model_embeddings = og_client.alpha.infer(
1818
model_cid="intfloat/multilingual-e5-large-instruct",
1919
model_input={"queries": queries, "instruction": instruction, "passages": passages},
2020
inference_mode=og.InferenceMode.VANILLA,

examples/alpha/run_inference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
og_client = og.Client(private_key=os.environ.get("OG_PRIVATE_KEY"))
66

7-
inference_result = og_client.inference.infer(
7+
inference_result = og_client.alpha.infer(
88
model_cid="hJD2Ja3akZFt1A2LT-D_1oxOCz_OtuGYw4V9eE1m39M",
99
model_input={
1010
"open_high_low_close": [

integrationtest/agent/test_agent.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,12 @@ def output_formatter(inference_result: InferenceResult):
8787
tool_name="One_hour_volatility_ETH_USDT",
8888
model_input_provider=model_input_provider,
8989
model_output_formatter=output_formatter,
90-
inference=self.client.inference,
90+
inference=self.client.alpha,
9191
tool_description="This tool measures the live 1 hour volatility for the trading pair ETH/USDT.",
9292
inference_mode=og.InferenceMode.VANILLA,
9393
)
9494

95-
expected_result = self.client.inference.infer(
95+
expected_result = self.client.alpha.infer(
9696
inference_mode=og.InferenceMode.VANILLA, model_cid="QmRhcpDXfYCKsimTmJYrAVM4Bbvck59Zb2onj3MHv9Kw5N", model_input=model_input
9797
)
9898
formatted_expected_result = format(float(expected_result.model_output["Y"].item()), ".3%")
@@ -193,14 +193,14 @@ def output_formatter(inference_result: InferenceResult):
193193
tool_name="Return_volatility_tool",
194194
model_input_provider=model_input_provider,
195195
model_output_formatter=output_formatter,
196-
inference=self.client.inference,
196+
inference=self.client.alpha,
197197
tool_input_schema=InputSchema,
198198
tool_description="This tool takes a token and measures the return volatility (standard deviation of returns).",
199199
inference_mode=og.InferenceMode.VANILLA,
200200
)
201201

202202
# Test option ETH
203-
expected_result_eth = self.client.inference.infer(
203+
expected_result_eth = self.client.alpha.infer(
204204
inference_mode=og.InferenceMode.VANILLA, model_cid="QmZdSfHWGJyzBiB2K98egzu3MypPcv4R1ASypUxwZ1MFUG", model_input=eth_model_input
205205
)
206206
formatted_expected_result_eth = format(float(expected_result_eth.model_output["std"].item()), ".3%")
@@ -215,7 +215,7 @@ def output_formatter(inference_result: InferenceResult):
215215
self.assertIn(formatted_expected_result_eth, list(events)[-1]["messages"][-1].content)
216216

217217
# Test option BTC
218-
expected_result_btc = self.client.inference.infer(
218+
expected_result_btc = self.client.alpha.infer(
219219
inference_mode=og.InferenceMode.VANILLA, model_cid="QmZdSfHWGJyzBiB2K98egzu3MypPcv4R1ASypUxwZ1MFUG", model_input=btc_model_input
220220
)
221221
formatted_expected_result_btc = format(float(expected_result_btc.model_output["std"].item()), ".3%")

src/opengradient/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
print(chunk.choices[0].delta.content, end="")
3939
4040
# Run on-chain ONNX model inference
41-
result = client.inference.infer(
41+
result = client.alpha.infer(
4242
model_cid="your_model_cid",
4343
inference_mode=og.InferenceMode.VANILLA,
4444
model_input={"input": [1.0, 2.0, 3.0]},
@@ -51,7 +51,7 @@
5151
The `opengradient.client.Client` object exposes three namespaces:
5252
5353
- **`opengradient.client.llm`** -- LLM chat and completion
54-
- **`opengradient.client.onchain_inference`** -- On-chain ONNX model inference
54+
- **`opengradient.client.alpha`** -- On-chain ONNX model inference, workflow deployment, and scheduled ML model execution
5555
- **`opengradient.client.model_hub`** -- Model repository management
5656
5757
## Model Hub (requires email auth)

src/opengradient/agents/og_langchain.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -155,11 +155,13 @@ def _generate(
155155
]
156156
sdk_messages.append(msg)
157157
elif isinstance(message, ToolMessage):
158-
sdk_messages.append({
159-
"role": "tool",
160-
"content": _extract_content(message.content),
161-
"tool_call_id": message.tool_call_id,
162-
})
158+
sdk_messages.append(
159+
{
160+
"role": "tool",
161+
"content": _extract_content(message.content),
162+
"tool_call_id": message.tool_call_id,
163+
}
164+
)
163165
else:
164166
raise ValueError(f"Unexpected message type: {message}")
165167

@@ -181,9 +183,7 @@ def _generate(
181183
else:
182184
ai_message = AIMessage(content=_extract_content(chat_response.get("content", "")))
183185

184-
return ChatResult(
185-
generations=[ChatGeneration(message=ai_message, generation_info={"finish_reason": finish_reason})]
186-
)
186+
return ChatResult(generations=[ChatGeneration(message=ai_message, generation_info={"finish_reason": finish_reason})])
187187

188188
@property
189189
def _identifying_params(self) -> Dict[str, Any]:

src/opengradient/alphasense/run_model_tool.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from langchain_core.tools import BaseTool, StructuredTool
55
from pydantic import BaseModel
66

7-
from ..client.onchain_inference import Inference
7+
from ..client.alpha import Alpha
88
from ..types import InferenceMode, InferenceResult
99
from .types import ToolType
1010

@@ -15,7 +15,7 @@ def create_run_model_tool(
1515
tool_name: str,
1616
model_input_provider: Callable[..., Dict[str, Union[str, int, float, List, np.ndarray]]],
1717
model_output_formatter: Callable[[InferenceResult], str],
18-
inference: Optional[Inference] = None,
18+
inference: Optional[Alpha] = None,
1919
tool_input_schema: Optional[Type[BaseModel]] = None,
2020
tool_description: str = "Executes the given ML model",
2121
inference_mode: InferenceMode = InferenceMode.VANILLA,
@@ -49,8 +49,8 @@ def create_run_model_tool(
4949
InferenceResult has attributes:
5050
* transaction_hash (str): Blockchain hash for the transaction
5151
* model_output (Dict[str, np.ndarray]): Output of the ONNX model
52-
inference (Inference, optional): The inference namespace from an initialized OpenGradient client
53-
(client.inference). If not provided, falls back to the global client set via ``opengradient.init()``.
52+
inference (Alpha, optional): The alpha namespace from an initialized OpenGradient client
53+
(client.alpha). If not provided, falls back to the global client set via ``opengradient.init()``.
5454
tool_input_schema (Type[BaseModel], optional): A Pydantic BaseModel class defining the
5555
input schema.
5656
@@ -104,7 +104,7 @@ def create_run_model_tool(
104104
... tool_name="Return_volatility_tool",
105105
... model_input_provider=model_input_provider,
106106
... model_output_formatter=output_formatter,
107-
... inference=client.inference,
107+
... inference=client.alpha,
108108
... tool_input_schema=InputSchema,
109109
... tool_description="This tool takes a token and measures the return volatility (standard deviation of returns).",
110110
... inference_mode=og.InferenceMode.VANILLA,
@@ -117,9 +117,9 @@ def create_run_model_tool(
117117
if og.global_client is None:
118118
raise ValueError(
119119
"No inference instance provided and no global client initialized. "
120-
"Either pass inference=client.inference or call opengradient.init() first."
120+
"Either pass inference=client.alpha or call opengradient.init() first."
121121
)
122-
inference = og.global_client.inference
122+
inference = og.global_client.alpha
123123

124124
def model_executor(**llm_input):
125125
# Pass LLM input arguments (formatted based on tool_input_schema) as parameters into model_input_provider

src/opengradient/cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
324324
model_input = json.load(file)
325325

326326
click.echo(f'Running {inference_mode} inference for model "{model_cid}"')
327-
inference_result = client.inference.infer(
327+
inference_result = client.alpha.infer(
328328
model_cid=model_cid, inference_mode=InferenceModes[inference_mode], model_input=model_input
329329
)
330330

0 commit comments

Comments
 (0)