aboutsummaryrefslogtreecommitdiffstats
path: root/modules/lowvram.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2022-09-07 19:29:44 +0000
committerGitHub <noreply@github.com>2022-09-07 19:29:44 +0000
commit296d012423f8d1862a63680443bb88b7d904ba4e (patch)
treef0dcc162fb3b3fa97d84d9c9ac7922203fc11df9 /modules/lowvram.py
parentee29bb77bfe3d2095bc08861bcdebeea20b890f1 (diff)
parentba1124b326280202cb583bbdc669fb5303bbd3e3 (diff)
downloadstable-diffusion-webui-gfx803-296d012423f8d1862a63680443bb88b7d904ba4e.tar.gz
stable-diffusion-webui-gfx803-296d012423f8d1862a63680443bb88b7d904ba4e.tar.bz2
stable-diffusion-webui-gfx803-296d012423f8d1862a63680443bb88b7d904ba4e.zip
Merge pull request #108 from xeonvs/mps-support
Added support for launching on Apple Silicon M1/M2
Diffstat (limited to 'modules/lowvram.py')
-rw-r--r--modules/lowvram.py9
1 files changed, 6 insertions, 3 deletions
diff --git a/modules/lowvram.py b/modules/lowvram.py
index 4b78deab..bd117491 100644
--- a/modules/lowvram.py
+++ b/modules/lowvram.py
@@ -2,9 +2,12 @@ import torch
module_in_gpu = None
cpu = torch.device("cpu")
-gpu = torch.device("cuda")
-device = gpu if torch.cuda.is_available() else cpu
-
+if torch.has_cuda:
+ device = gpu = torch.device("cuda")
+elif torch.has_mps:
+ device = gpu = torch.device("mps")
+else:
+ device = gpu = torch.device("cpu")
def setup_for_low_vram(sd_model, use_medvram):
parents = {}