diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-03-11 09:24:03 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-03-11 09:24:03 +0000 |
commit | d81c50391823aaa264bf9c0896a1552e5af5fea6 (patch) | |
tree | 50b78df4a95ba0a341bcbfcfc6c6654a6de66766 /modules/sd_hijack.py | |
parent | 1ace16e799c1ff43a6f67947be2506c2f83857a1 (diff) | |
parent | 8d7fa2f67cb0554d8902d5d407166876020e067e (diff) | |
download | stable-diffusion-webui-gfx803-d81c50391823aaa264bf9c0896a1552e5af5fea6.tar.gz stable-diffusion-webui-gfx803-d81c50391823aaa264bf9c0896a1552e5af5fea6.tar.bz2 stable-diffusion-webui-gfx803-d81c50391823aaa264bf9c0896a1552e5af5fea6.zip |
Merge pull request #8367 from pamparamm/scaled-dot-product-attention
Add scaled dot product attention
Diffstat (limited to 'modules/sd_hijack.py')
-rw-r--r-- | modules/sd_hijack.py | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 79476783..f4bb0266 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -37,11 +37,23 @@ def apply_optimizations(): optimization_method = None
+ can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp
+
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
optimization_method = 'xformers'
+ elif cmd_opts.opt_sdp_no_mem_attention and can_use_sdp:
+ print("Applying scaled dot product cross attention optimization (without memory efficient attention).")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_no_mem_attention_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sdp_no_mem_attnblock_forward
+ optimization_method = 'sdp-no-mem'
+ elif cmd_opts.opt_sdp_attention and can_use_sdp:
+ print("Applying scaled dot product cross attention optimization.")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_attention_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sdp_attnblock_forward
+ optimization_method = 'sdp'
elif cmd_opts.opt_sub_quad_attention:
print("Applying sub-quadratic cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.sub_quad_attention_forward
|