Release memory on ROCm as well, it works fine here, fixes #3257

This commit is contained in:
Ximin Luo 2024-07-10 12:01:59 +01:00
parent 5a71495822
commit 72d6fc761d
1 changed files with 3 additions and 4 deletions

View File

@ -759,16 +759,15 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True):
return True
def soft_empty_cache(force=False):
def soft_empty_cache(_force_deprecated=False):
global cpu_state
if cpu_state == CPUState.MPS:
torch.mps.empty_cache()
elif is_intel_xpu():
torch.xpu.empty_cache()
elif torch.cuda.is_available():
if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def unload_all_models():
free_memory(1e30, get_torch_device())