diff options
author | Pam <pamhome21@gmail.com> | 2023-03-10 07:58:10 +0000 |
---|---|---|
committer | Pam <pamhome21@gmail.com> | 2023-03-10 07:58:10 +0000 |
commit | 0981dea94832f34d638b1aa8964cfaeffd223b47 (patch) | |
tree | fb27b00e5d82780105c190b8bafc779251d8d756 /modules/sd_hijack.py | |
parent | 37acba263389e22bc46cfffc80b2ca8b76a85287 (diff) | |
download | stable-diffusion-webui-gfx803-0981dea94832f34d638b1aa8964cfaeffd223b47.tar.gz stable-diffusion-webui-gfx803-0981dea94832f34d638b1aa8964cfaeffd223b47.tar.bz2 stable-diffusion-webui-gfx803-0981dea94832f34d638b1aa8964cfaeffd223b47.zip |
sdp refactoring
Diffstat (limited to 'modules/sd_hijack.py')
-rw-r--r-- | modules/sd_hijack.py | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index f62e9adb..e98ae51a 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -37,20 +37,21 @@ def apply_optimizations(): optimization_method = None
+ can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp
+
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
optimization_method = 'xformers'
- elif cmd_opts.opt_sdp_attention and (hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention"))):
- if cmd_opts.opt_sdp_no_mem_attention:
- print("Applying scaled dot product cross attention optimization (without memory efficient attention).")
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_no_mem_attention_forward
- optimization_method = 'sdp-no-mem'
- else:
- print("Applying scaled dot product cross attention optimization.")
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_attention_forward
- optimization_method = 'sdp'
+ elif cmd_opts.opt_sdp_no_mem_attention and can_use_sdp:
+ print("Applying scaled dot product cross attention optimization (without memory efficient attention).")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_no_mem_attention_forward
+ optimization_method = 'sdp-no-mem'
+ elif cmd_opts.opt_sdp_attention and can_use_sdp:
+ print("Applying scaled dot product cross attention optimization.")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_attention_forward
+ optimization_method = 'sdp'
elif cmd_opts.opt_sub_quad_attention:
print("Applying sub-quadratic cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.sub_quad_attention_forward
|