diff options
author | zhaohu xing <920232796@qq.com> | 2022-12-06 01:03:55 +0000 |
---|---|---|
committer | zhaohu xing <920232796@qq.com> | 2022-12-06 01:03:55 +0000 |
commit | 4929503258d80abbc4b5f40da034298fe3803906 (patch) | |
tree | c7af372ebf229811dfa21b19f7a64eab99e2e607 /modules | |
parent | 9c86fb8cace6d8ac0843e0ddad0ba5ae7f3148c9 (diff) | |
download | stable-diffusion-webui-gfx803-4929503258d80abbc4b5f40da034298fe3803906.tar.gz stable-diffusion-webui-gfx803-4929503258d80abbc4b5f40da034298fe3803906.tar.bz2 stable-diffusion-webui-gfx803-4929503258d80abbc4b5f40da034298fe3803906.zip |
fix bugs
Signed-off-by: zhaohu xing <920232796@qq.com>
Diffstat (limited to 'modules')
-rw-r--r-- | modules/devices.py | 4 | ||||
-rw-r--r-- | modules/sd_hijack.py | 2 |
2 files changed, 3 insertions, 3 deletions
diff --git a/modules/devices.py b/modules/devices.py index e69c1fe3..f00079c6 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -38,8 +38,8 @@ def get_optimal_device(): if torch.cuda.is_available(): return torch.device(get_cuda_device_string()) - # if has_mps(): - # return torch.device("mps") + if has_mps(): + return torch.device("mps") return cpu diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index edb8b420..cd65d356 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -28,7 +28,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At # new memory efficient cross attention blocks do not support hypernets and we already
# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
-# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
+ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
# silence new console spam from SD2
ldm.modules.attention.print = lambda *args: None
|