Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions gptqmodel/models/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -580,14 +580,14 @@ def build_layerwise_device_map(
device_ids = list(range(num_gpus))
device_map: Dict[str, str] = {}
mod2name = {m: n for n, m in model.named_modules()}

if torch.cuda.is_available():
device_strs = [f"cuda:{i}" for i in range(num_gpus)]
elif hasattr(torch, "xpu") and torch.xpu.is_available():
device_strs = [f"xpu:{i}" for i in range(num_gpus)]
else:
device_strs = ["cpu"] * num_gpus

def assign(mod, device_id):
if mod is None:
return
Expand Down Expand Up @@ -726,7 +726,7 @@ def assign(mod, device_id):
)
if not _validate_machete_device_support():
raise ValueError(
f"Kernel: Machete kernel requires compute capability >= 9.0. Detected capability: {torch.cuda.get_device_capability()}"
f"Kernel: Machete kernel currently supports Hopper GPUs (SM 90). Detected capability: {torch.cuda.get_device_capability()}."
)

if backend in [BACKEND.MARLIN, BACKEND.MARLIN_FP16] and (
Expand Down
200 changes: 0 additions & 200 deletions gptqmodel/nn_modules/qlinear/awq_machete.py

This file was deleted.

Loading
Loading