diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-03-25 06:10:01 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-03-25 06:10:01 +0000 |
commit | 03c8eefbccaa054e3cc90cd43c14a701e71d1fe6 (patch) | |
tree | 48c2738abce1a20ca1637f244df6ba54e94d08b7 /modules/sd_hijack_optimizations.py | |
parent | b0b777e64da238f1b259b77b0899b61c26a99dee (diff) | |
parent | a9eab236d7e8afa4d6205127904a385b2c43bb24 (diff) | |
download | stable-diffusion-webui-gfx803-03c8eefbccaa054e3cc90cd43c14a701e71d1fe6.tar.gz stable-diffusion-webui-gfx803-03c8eefbccaa054e3cc90cd43c14a701e71d1fe6.tar.bz2 stable-diffusion-webui-gfx803-03c8eefbccaa054e3cc90cd43c14a701e71d1fe6.zip |
Merge pull request #8782 from FNSpd/master
--upcast-sampling support for CUDA
Diffstat (limited to 'modules/sd_hijack_optimizations.py')
-rw-r--r-- | modules/sd_hijack_optimizations.py | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 2e307b5d..372555ff 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -337,7 +337,7 @@ def xformers_attention_forward(self, x, context=None, mask=None): dtype = q.dtype
if shared.opts.upcast_attn:
- q, k = q.float(), k.float()
+ q, k, v = q.float(), k.float(), v.float()
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v))
@@ -372,7 +372,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None): dtype = q.dtype
if shared.opts.upcast_attn:
- q, k = q.float(), k.float()
+ q, k, v = q.float(), k.float(), v.float()
# the output of sdp = (batch, num_heads, seq_len, head_dim)
hidden_states = torch.nn.functional.scaled_dot_product_attention(
|