aboutsummaryrefslogtreecommitdiffstats
path: root/modules/sd_hijack_optimizations.py
diff options
context:
space:
mode:
authorPam <pamhome21@gmail.com>2023-03-10 07:19:36 +0000
committerPam <pamhome21@gmail.com>2023-03-10 07:19:36 +0000
commit37acba263389e22bc46cfffc80b2ca8b76a85287 (patch)
tree67dd2e6d1749b44b28dcb0d73f5ecf945493f245 /modules/sd_hijack_optimizations.py
parentfec0a895119a124a295e3dad5205de5766031dc7 (diff)
downloadstable-diffusion-webui-gfx803-37acba263389e22bc46cfffc80b2ca8b76a85287.tar.gz
stable-diffusion-webui-gfx803-37acba263389e22bc46cfffc80b2ca8b76a85287.tar.bz2
stable-diffusion-webui-gfx803-37acba263389e22bc46cfffc80b2ca8b76a85287.zip
argument to disable memory efficient for sdp
Diffstat (limited to 'modules/sd_hijack_optimizations.py')
-rw-r--r--modules/sd_hijack_optimizations.py4
1 files changed, 4 insertions, 0 deletions
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index a324a592..68b1dd84 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -388,6 +388,10 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None):
hidden_states = self.to_out[1](hidden_states)
return hidden_states
+def scaled_dot_product_no_mem_attention_forward(self, x, context=None, mask=None):
+ with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False):
+ return scaled_dot_product_attention_forward(self, x, context, mask)
+
def cross_attention_attnblock_forward(self, x):
h_ = x
h_ = self.norm(h_)