aboutsummaryrefslogtreecommitdiffstats
path: root/modules/sub_quadratic_attention.py
diff options
context:
space:
mode:
authorJaredTherriault <noirjt@live.com>2023-09-04 17:29:33 -0700
committerGitHub <noreply@github.com>2023-09-04 17:29:33 -0700
commit5e16914a4e157ab3ed96f8b7841e1290a56f4484 (patch)
tree655f4582e692f0fc3667b3b668ad365ac3ab92ae /modules/sub_quadratic_attention.py
parent8f3b02f09535f55d3673aa9ea589396b8614f799 (diff)
parent5ef669de080814067961f28357256e8fe27544f4 (diff)
downloadstable-diffusion-webui-gfx803-5e16914a4e157ab3ed96f8b7841e1290a56f4484.tar.gz
Merge branch 'AUTOMATIC1111:master' into master
Diffstat (limited to 'modules/sub_quadratic_attention.py')
-rw-r--r--modules/sub_quadratic_attention.py4
1 files changed, 2 insertions, 2 deletions
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
index 497568eb..ae4ee4bb 100644
--- a/modules/sub_quadratic_attention.py
+++ b/modules/sub_quadratic_attention.py
@@ -58,7 +58,7 @@ def _summarize_chunk(
scale: float,
) -> AttnChunk:
attn_weights = torch.baddbmm(
- torch.empty(1, 1, 1, device=query.device, dtype=query.dtype),
+ torch.zeros(1, 1, 1, device=query.device, dtype=query.dtype),
query,
key.transpose(1,2),
alpha=scale,
@@ -121,7 +121,7 @@ def _get_attention_scores_no_kv_chunking(
scale: float,
) -> Tensor:
attn_scores = torch.baddbmm(
- torch.empty(1, 1, 1, device=query.device, dtype=query.dtype),
+ torch.zeros(1, 1, 1, device=query.device, dtype=query.dtype),
query,
key.transpose(1,2),
alpha=scale,