diff options
author | AUTOMATIC <16777216c@gmail.com> | 2023-06-01 05:12:06 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2023-06-01 05:12:06 +0000 |
commit | 36888092afa82ee248bc947229f813b453629317 (patch) | |
tree | 1225366202c135e1e8da2e831bd932a0b2a39e02 | |
parent | f1533de982350af06bc9cbaa436b3e4dfdef4eb8 (diff) | |
download | stable-diffusion-webui-gfx803-36888092afa82ee248bc947229f813b453629317.tar.gz stable-diffusion-webui-gfx803-36888092afa82ee248bc947229f813b453629317.tar.bz2 stable-diffusion-webui-gfx803-36888092afa82ee248bc947229f813b453629317.zip |
revert default cross attention optimization to Doggettx
make --disable-opt-split-attention command line option work again
-rw-r--r-- | modules/cmd_args.py | 2 | ||||
-rw-r--r-- | modules/sd_hijack.py | 2 | ||||
-rw-r--r-- | modules/sd_hijack_optimizations.py | 6 |
3 files changed, 6 insertions, 4 deletions
diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 0974056d..de905caa 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -62,7 +62,7 @@ parser.add_argument("--opt-split-attention-invokeai", action='store_true', help= parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization")
parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*")
parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*")
-parser.add_argument("--disable-opt-split-attention", action='store_true', help="does not do anything")
+parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 487dfd60..3b6f95ce 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -68,6 +68,8 @@ def apply_optimizations(option=None): if selection == "None":
matching_optimizer = None
+ elif selection == "Automatic" and shared.cmd_opts.disable_opt_split_attention:
+ matching_optimizer = None
elif matching_optimizer is None:
matching_optimizer = optimizers[0]
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 5f0ff513..b41aa419 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -57,7 +57,7 @@ class SdOptimizationSdpNoMem(SdOptimization): name = "sdp-no-mem"
label = "scaled dot product without memory efficient attention"
cmd_opt = "opt_sdp_no_mem_attention"
- priority = 90
+ priority = 80
def is_available(self):
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
@@ -71,7 +71,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem): name = "sdp"
label = "scaled dot product"
cmd_opt = "opt_sdp_attention"
- priority = 80
+ priority = 70
def apply(self):
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
@@ -114,7 +114,7 @@ class SdOptimizationInvokeAI(SdOptimization): class SdOptimizationDoggettx(SdOptimization):
name = "Doggettx"
cmd_opt = "opt_split_attention"
- priority = 20
+ priority = 90
def apply(self):
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward
|