diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-03-11 09:24:03 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-03-11 09:24:03 +0000 |
commit | d81c50391823aaa264bf9c0896a1552e5af5fea6 (patch) | |
tree | 50b78df4a95ba0a341bcbfcfc6c6654a6de66766 /modules/shared.py | |
parent | 1ace16e799c1ff43a6f67947be2506c2f83857a1 (diff) | |
parent | 8d7fa2f67cb0554d8902d5d407166876020e067e (diff) | |
download | stable-diffusion-webui-gfx803-d81c50391823aaa264bf9c0896a1552e5af5fea6.tar.gz stable-diffusion-webui-gfx803-d81c50391823aaa264bf9c0896a1552e5af5fea6.tar.bz2 stable-diffusion-webui-gfx803-d81c50391823aaa264bf9c0896a1552e5af5fea6.zip |
Merge pull request #8367 from pamparamm/scaled-dot-product-attention
Add scaled dot product attention
Diffstat (limited to 'modules/shared.py')
-rw-r--r-- | modules/shared.py | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/modules/shared.py b/modules/shared.py index d481c25b..dbab0018 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -69,6 +69,8 @@ parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size fo parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
+parser.add_argument("--opt-sdp-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization; requires PyTorch 2.*")
+parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization without memory efficient attention, makes image generation deterministic; requires PyTorch 2.*")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
|