Skip to content

Commit

Permalink
Add initial support for intel Gaudi accelerators (#2121)
Browse files Browse the repository at this point in the history
  • Loading branch information
ankurneog authored Nov 23, 2024
1 parent 66d4859 commit 865233e
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 7 deletions.
5 changes: 5 additions & 0 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.3.dev13"]
# xpu is not enabled in public vllm and torch whl,
# need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm
srt_xpu = ["sglang[runtime_common]"]
#For Intel Gaudi(device : hpu) follow the installation guide
#https://docs.vllm.ai/en/latest/getting_started/gaudi-installation.html
srt_hpu = ["sglang[runtime_common]"]

openai = ["openai>=1.0", "tiktoken"]
anthropic = ["anthropic>=0.20.0"]
Expand All @@ -46,9 +49,11 @@ test = [
all = ["sglang[srt]", "sglang[openai]", "sglang[anthropic]", "sglang[litellm]"]
all_hip = ["sglang[srt_hip]", "sglang[openai]", "sglang[anthropic]", "sglang[litellm]"]
all_xpu = ["sglang[srt_xpu]", "sglang[openai]", "sglang[anthropic]", "sglang[litellm]"]
all_hpu = ["sglang[srt_hpu]", "sglang[openai]", "sglang[anthropic]", "sglang[litellm]"]
dev = ["sglang[all]", "sglang[test]"]
dev_hip = ["sglang[all_hip]", "sglang[test]"]
dev_xpu = ["sglang[all_xpu]", "sglang[test]"]
dev_hpu = ["sglang[all_hpu]", "sglang[test]"]

[project.urls]
"Homepage" = "https://github.com/sgl-project/sglang"
Expand Down
5 changes: 1 addition & 4 deletions python/sglang/bench_one_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,10 +278,7 @@ def correctness_test(


def synchronize(device):
if device == "cuda":
torch.cuda.synchronize()
elif device == "xpu":
torch.xpu.synchronize()
torch.get_device_module(device).synchronize()


def latency_test_run_once(
Expand Down
5 changes: 3 additions & 2 deletions python/sglang/srt/model_executor/model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,14 +176,15 @@ def __init__(
def init_torch_distributed(self):
logger.info("Init torch distributed begin.")
# Init torch distributed
torch.get_device_module(self.device).set_device(self.gpu_id)
if self.device == "cuda":
torch.cuda.set_device(self.gpu_id)
backend = "nccl"
# ToDO(liangan1):Just use gloo to bypass the initilization fail
# Need to use xccl for xpu backend in the future
elif self.device == "xpu":
torch.xpu.set_device(self.gpu_id)
backend = "gloo"
elif self.device == "hpu":
backend = "hccl"

if not self.server_args.enable_p2p_check:
monkey_patch_vllm_p2p_access_check(self.gpu_id)
Expand Down
2 changes: 1 addition & 1 deletion python/sglang/srt/server_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ def add_cli_args(parser: argparse.ArgumentParser):
"--device",
type=str,
default="cuda",
choices=["cuda", "xpu"],
choices=["cuda", "xpu", "hpu"],
help="The device type.",
)
parser.add_argument(
Expand Down

0 comments on commit 865233e

Please sign in to comment.