diff options
author | Pam <pamhome21@gmail.com> | 2023-03-10 07:58:10 +0000 |
---|---|---|
committer | Pam <pamhome21@gmail.com> | 2023-03-10 07:58:10 +0000 |
commit | 0981dea94832f34d638b1aa8964cfaeffd223b47 (patch) | |
tree | fb27b00e5d82780105c190b8bafc779251d8d756 /modules/shared.py | |
parent | 37acba263389e22bc46cfffc80b2ca8b76a85287 (diff) | |
download | stable-diffusion-webui-gfx803-0981dea94832f34d638b1aa8964cfaeffd223b47.tar.gz stable-diffusion-webui-gfx803-0981dea94832f34d638b1aa8964cfaeffd223b47.tar.bz2 stable-diffusion-webui-gfx803-0981dea94832f34d638b1aa8964cfaeffd223b47.zip |
sdp refactoring
Diffstat (limited to 'modules/shared.py')
-rw-r--r-- | modules/shared.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/modules/shared.py b/modules/shared.py index 4b81c591..66a6bfa5 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -70,7 +70,7 @@ parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--opt-sdp-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization; requires PyTorch 2.*")
-parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="disables memory efficient sdp, makes image generation deterministic; requires --opt-sdp-attention")
+parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization without memory efficient attention, makes image generation deterministic; requires PyTorch 2.*")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
|