Skip to content

Commit af8c47d

Browse files
committed
sanity test
Signed-off-by: Erin Ho <[email protected]> precheck
1 parent d884d2d commit af8c47d

File tree

2 files changed

+7
-2
lines changed

2 files changed

+7
-2
lines changed

tensorrt_llm/executor/rpc/rpc_client.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,8 @@ def custom_exception_handler(loop, context):
412412
exception = context.get('exception')
413413
message = context.get('message', '')
414414

415-
if isinstance(exception, asyncio.CancelledError) or "pending" in message:
415+
if isinstance(exception,
416+
asyncio.CancelledError) or "pending" in message:
416417
logger.debug(f"Suppressed error during shutdown: {message}")
417418
return
418419

tests/integration/defs/examples/test_ray.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,11 @@ def ray_example_root(llm_root):
1212
return example_root
1313

1414

15-
def test_llm_inference_async_ray(ray_example_root, llm_venv):
15+
@pytest.mark.parametrize("use_rpc", [True, False], ids=["rpc", "no_rpc"])
16+
def test_llm_inference_async_ray(ray_example_root, llm_venv, monkeypatch,
17+
use_rpc):
18+
if use_rpc:
19+
monkeypatch.setenv("TLLM_RAY_USE_RPC", "1")
1620
script_path = os.path.join(ray_example_root, "llm_inference_async_ray.py")
1721
model_path = f"{llm_models_root()}/llama-models-v2/TinyLlama-1.1B-Chat-v1.0"
1822
venv_check_call(llm_venv, [script_path, "--model", model_path])

0 commit comments

Comments
 (0)