Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import platform
import weakref
import gc
import os

class VRAMState(Enum):
DISABLED = 0 #No vram present: no need to move models to vram
Expand Down Expand Up @@ -335,11 +336,11 @@ def amd_min_version(device=None, min_rdna_version=0):

try:
if is_amd():
arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName
if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)):
torch.backends.cudnn.enabled = False # Seems to improve things a lot on AMD
logging.info("Set: torch.backends.cudnn.enabled = False for better AMD performance.")
if os.getenv('MIOPEN_FIND_MODE') is None:
os.environ['MIOPEN_FIND_MODE'] = "FAST"
logging.info("Set: MIOPEN_FIND_MODE=FAST for better AMD performance, change by setting MIOPEN_FIND_MODE.")

arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName
try:
rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2]))
except:
Expand Down