diff --git a/examples/alpha/create_workflow.py b/examples/alpha/create_workflow.py index 669d7a7..57d0ddc 100644 --- a/examples/alpha/create_workflow.py +++ b/examples/alpha/create_workflow.py @@ -2,7 +2,7 @@ import opengradient as og -alpha = og.Alpha(private_key=os.environ.get("OG_PRIVATE_KEY")) +alpha = og.Alpha(private_key=os.environ["OG_PRIVATE_KEY"]) # Define model input input_query = og.HistoricalInputQuery( diff --git a/examples/alpha/run_embeddings_model.py b/examples/alpha/run_embeddings_model.py index 11832d6..bd95278 100644 --- a/examples/alpha/run_embeddings_model.py +++ b/examples/alpha/run_embeddings_model.py @@ -2,7 +2,7 @@ import opengradient as og -alpha = og.Alpha(private_key=os.environ.get("OG_PRIVATE_KEY")) +alpha = og.Alpha(private_key=os.environ["OG_PRIVATE_KEY"]) queries = [ "how much protein should a female eat", diff --git a/examples/alpha/run_inference.py b/examples/alpha/run_inference.py index d23ed03..e5ad69f 100644 --- a/examples/alpha/run_inference.py +++ b/examples/alpha/run_inference.py @@ -2,7 +2,7 @@ import opengradient as og -alpha = og.Alpha(private_key=os.environ.get("OG_PRIVATE_KEY")) +alpha = og.Alpha(private_key=os.environ["OG_PRIVATE_KEY"]) inference_result = alpha.infer( model_cid="hJD2Ja3akZFt1A2LT-D_1oxOCz_OtuGYw4V9eE1m39M", diff --git a/examples/alpha/use_workflow.py b/examples/alpha/use_workflow.py index a86ffda..390cb2f 100644 --- a/examples/alpha/use_workflow.py +++ b/examples/alpha/use_workflow.py @@ -2,7 +2,7 @@ import opengradient as og -alpha = og.Alpha(private_key=os.environ.get("OG_PRIVATE_KEY")) +alpha = og.Alpha(private_key=os.environ["OG_PRIVATE_KEY"]) model_output = alpha.read_workflow_result( # This is the workflow contract address that you previously deployed diff --git a/examples/langchain_react_agent.py b/examples/langchain_react_agent.py index ebd7c03..9ca5faa 100644 --- a/examples/langchain_react_agent.py +++ b/examples/langchain_react_agent.py @@ -15,7 +15,7 @@ import opengradient as og -private_key = os.environ.get("OG_PRIVATE_KEY") +private_key = os.environ["OG_PRIVATE_KEY"] # One-time Permit2 approval for OPG spending (idempotent) llm_client = og.LLM(private_key=private_key) diff --git a/examples/twins_chat.py b/examples/twins_chat.py index 786c9c4..5485732 100644 --- a/examples/twins_chat.py +++ b/examples/twins_chat.py @@ -5,7 +5,7 @@ import opengradient as og -twins = og.Twins(api_key=os.environ.get("TWINS_API_KEY")) +twins = og.Twins(api_key=os.environ["TWINS_API_KEY"]) # Chat with Elon Musk print("--------------------------------") @@ -18,7 +18,7 @@ messages=[{"role": "user", "content": "What do you think about AI?"}], max_tokens=1000, ) -print(f"Elon: {elon.chat_output['content']}") +print(f"Elon: {elon.chat_output['content'] if elon.chat_output else None}") # Chat with Donald Trump print("--------------------------------") @@ -31,4 +31,4 @@ messages=[{"role": "user", "content": "What's your plan for America?"}], max_tokens=1000, ) -print(f"Trump: {trump.chat_output['content']}") +print(f"Trump: {trump.chat_output['content'] if trump.chat_output else None}") diff --git a/src/opengradient/client/_utils.py b/src/opengradient/client/_utils.py index 7e73011..5e2938a 100644 --- a/src/opengradient/client/_utils.py +++ b/src/opengradient/client/_utils.py @@ -20,7 +20,8 @@ def get_abi(abi_name: str) -> dict: """Returns the ABI for the requested contract.""" abi_path = _ABI_DIR / abi_name with open(abi_path, "r") as f: - return json.load(f) + result: dict = json.load(f) + return result def get_bin(bin_name: str) -> str: diff --git a/src/opengradient/client/alpha.py b/src/opengradient/client/alpha.py index 1a31e33..a4e633e 100644 --- a/src/opengradient/client/alpha.py +++ b/src/opengradient/client/alpha.py @@ -18,7 +18,7 @@ from web3.logs import DISCARD from ..types import HistoricalInputQuery, InferenceMode, InferenceResult, ModelOutput, SchedulerParams -from ._conversions import convert_array_to_model_output, convert_to_model_input, convert_to_model_output +from ._conversions import convert_array_to_model_output, convert_to_model_input, convert_to_model_output # type: ignore[attr-defined] from ._utils import get_abi, get_bin, run_with_retry DEFAULT_RPC_URL = "https://ogevmdevnet.opengradient.ai" @@ -57,8 +57,8 @@ def __init__( self._wallet_account: LocalAccount = self._blockchain.eth.account.from_key(private_key) self._inference_hub_contract_address = inference_contract_address self._api_url = api_url - self._inference_abi = None - self._precompile_abi = None + self._inference_abi: Optional[dict] = None + self._precompile_abi: Optional[dict] = None @property def inference_abi(self) -> dict: @@ -98,8 +98,12 @@ def infer( """ def execute_transaction(): - contract = self._blockchain.eth.contract(address=self._inference_hub_contract_address, abi=self.inference_abi) - precompile_contract = self._blockchain.eth.contract(address=PRECOMPILE_CONTRACT_ADDRESS, abi=self.precompile_abi) + contract = self._blockchain.eth.contract( + address=Web3.to_checksum_address(self._inference_hub_contract_address), abi=self.inference_abi + ) + precompile_contract = self._blockchain.eth.contract( + address=Web3.to_checksum_address(PRECOMPILE_CONTRACT_ADDRESS), abi=self.precompile_abi + ) inference_mode_uint8 = inference_mode.value converted_model_input = convert_to_model_input(model_input) @@ -122,7 +126,8 @@ def execute_transaction(): return InferenceResult(tx_hash.hex(), model_output) - return run_with_retry(execute_transaction, max_retries) + result: InferenceResult = run_with_retry(execute_transaction, max_retries) + return result def _send_tx_with_revert_handling(self, run_function): """ @@ -161,7 +166,7 @@ def _send_tx_with_revert_handling(self, run_function): } ) - signed_tx = self._wallet_account.sign_transaction(transaction) + signed_tx = self._wallet_account.sign_transaction(transaction) # type: ignore[arg-type] tx_hash = self._blockchain.eth.send_raw_transaction(signed_tx.raw_transaction) tx_receipt = self._blockchain.eth.wait_for_transaction_receipt(tx_hash, timeout=INFERENCE_TX_TIMEOUT) @@ -176,7 +181,7 @@ def _send_tx_with_revert_handling(self, run_function): return tx_hash, tx_receipt - def _get_inference_result_from_node(self, inference_id: str, inference_mode: InferenceMode) -> Dict: + def _get_inference_result_from_node(self, inference_id: str, inference_mode: InferenceMode) -> Optional[Dict]: """ Get the inference result from node. @@ -317,7 +322,7 @@ def deploy_transaction(): return tx_receipt.contractAddress - contract_address = run_with_retry(deploy_transaction) + contract_address: str = run_with_retry(deploy_transaction) if scheduler_params: self._register_with_scheduler(contract_address, scheduler_params) @@ -343,7 +348,7 @@ def _register_with_scheduler(self, contract_address: str, scheduler_params: Sche # Scheduler contract address scheduler_address = DEFAULT_SCHEDULER_ADDRESS - scheduler_contract = self._blockchain.eth.contract(address=scheduler_address, abi=scheduler_abi) + scheduler_contract = self._blockchain.eth.contract(address=Web3.to_checksum_address(scheduler_address), abi=scheduler_abi) try: # Register the workflow with the scheduler @@ -359,7 +364,7 @@ def _register_with_scheduler(self, contract_address: str, scheduler_params: Sche } ) - signed_scheduler_tx = self._wallet_account.sign_transaction(scheduler_tx) + signed_scheduler_tx = self._wallet_account.sign_transaction(scheduler_tx) # type: ignore[arg-type] scheduler_tx_hash = self._blockchain.eth.send_raw_transaction(signed_scheduler_tx.raw_transaction) self._blockchain.eth.wait_for_transaction_receipt(scheduler_tx_hash, timeout=REGULAR_TX_TIMEOUT) except Exception as e: @@ -388,7 +393,8 @@ def read_workflow_result(self, contract_address: str) -> ModelOutput: # Get the result result = contract.functions.getInferenceResult().call() - return convert_array_to_model_output(result) + output: ModelOutput = convert_array_to_model_output(result) + return output def run_workflow(self, contract_address: str) -> ModelOutput: """ @@ -423,17 +429,18 @@ def run_workflow(self, contract_address: str) -> ModelOutput: } ) - signed_txn = self._wallet_account.sign_transaction(transaction) + signed_txn = self._wallet_account.sign_transaction(transaction) # type: ignore[arg-type] tx_hash = self._blockchain.eth.send_raw_transaction(signed_txn.raw_transaction) tx_receipt = self._blockchain.eth.wait_for_transaction_receipt(tx_hash, timeout=INFERENCE_TX_TIMEOUT) - if tx_receipt.status == 0: + if tx_receipt["status"] == 0: raise ContractLogicError(f"Run transaction failed. Receipt: {tx_receipt}") # Get the inference result from the contract result = contract.functions.getInferenceResult().call() - return convert_array_to_model_output(result) + run_output: ModelOutput = convert_array_to_model_output(result) + return run_output def read_workflow_history(self, contract_address: str, num_results: int) -> List[ModelOutput]: """ diff --git a/src/opengradient/client/model_hub.py b/src/opengradient/client/model_hub.py index 35f0b72..d8f5c92 100644 --- a/src/opengradient/client/model_hub.py +++ b/src/opengradient/client/model_hub.py @@ -165,10 +165,10 @@ def upload(self, model_path: str, model_name: str, version: str) -> FileUploadRe else: raise RuntimeError("Empty or null response content received") elif response.status_code == 500: - raise RuntimeError("Internal server error occurred", status_code=500) + raise RuntimeError(f"Internal server error occurred (status_code=500)") else: error_message = response.json().get("detail", "Unknown error occurred") - raise RuntimeError(f"Upload failed: {error_message}", status_code=response.status_code) + raise RuntimeError(f"Upload failed: {error_message} (status_code={response.status_code})") except requests.RequestException as e: raise RuntimeError(f"Upload failed: {str(e)}") @@ -200,7 +200,8 @@ def list_files(self, model_name: str, version: str) -> List[Dict]: try: response = requests.get(url, headers=headers) response.raise_for_status() - return response.json() + result: list[dict] = response.json() + return result except requests.RequestException as e: raise RuntimeError(f"File listing failed: {str(e)}") diff --git a/src/opengradient/client/opg_token.py b/src/opengradient/client/opg_token.py index 87d3867..22cc8bc 100644 --- a/src/opengradient/client/opg_token.py +++ b/src/opengradient/client/opg_token.py @@ -100,11 +100,11 @@ def ensure_opg_approval(wallet_account: LocalAccount, opg_amount: float) -> Perm } ) - signed = wallet_account.sign_transaction(tx) + signed = wallet_account.sign_transaction(tx) # type: ignore[arg-type] tx_hash = w3.eth.send_raw_transaction(signed.raw_transaction) receipt = w3.eth.wait_for_transaction_receipt(tx_hash, timeout=120) - if receipt.status != 1: + if receipt.status != 1: # type: ignore[attr-defined] raise RuntimeError(f"Permit2 approval transaction reverted: {tx_hash.hex()}") allowance_after = token.functions.allowance(owner, spender).call() diff --git a/src/opengradient/client/twins.py b/src/opengradient/client/twins.py index 55adeaf..94bafcc 100644 --- a/src/opengradient/client/twins.py +++ b/src/opengradient/client/twins.py @@ -87,9 +87,6 @@ def chat( except RuntimeError: raise except httpx.HTTPStatusError as e: - raise RuntimeError( - f"Twins chat request failed: {e.response.status_code} {e.response.text}", - status_code=e.response.status_code, - ) + raise RuntimeError(f"Twins chat request failed: {e.response.status_code} {e.response.text}") except Exception as e: raise RuntimeError(f"Twins chat request failed: {str(e)}") diff --git a/src/opengradient/py.typed b/src/opengradient/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/opengradient/types.py b/src/opengradient/types.py index 492bed4..1f7ec75 100644 --- a/src/opengradient/types.py +++ b/src/opengradient/types.py @@ -324,7 +324,7 @@ def __next__(self) -> StreamChunk: while True: try: - line = next(self._iterator) + line = next(self._iterator) # type: ignore[arg-type] except StopIteration: raise @@ -355,7 +355,7 @@ async def __anext__(self) -> StreamChunk: while True: try: - line = await self._iterator.__anext__() + line = await self._iterator.__anext__() # type: ignore[union-attr] except StopAsyncIteration: raise