diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2022-10-09 07:52:21 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-10-09 07:52:21 +0000 |
commit | e00b4df7c6f0a13941d6f6ea425eebdaa2bc9318 (patch) | |
tree | c01de5d0efb2f6bb414722e398e29316ecafb74f /modules/shared.py | |
parent | 14192c5b207b16b1ec7a4c9c4ea538d1a6811a4d (diff) | |
parent | 0ec80f0125a14c03ac860279f40c0c062dbde0cf (diff) | |
download | stable-diffusion-webui-gfx803-e00b4df7c6f0a13941d6f6ea425eebdaa2bc9318.tar.gz stable-diffusion-webui-gfx803-e00b4df7c6f0a13941d6f6ea425eebdaa2bc9318.tar.bz2 stable-diffusion-webui-gfx803-e00b4df7c6f0a13941d6f6ea425eebdaa2bc9318.zip |
Merge pull request #1752 from Greendayle/dev/deepdanbooru
Added DeepDanbooru interrogator
Diffstat (limited to 'modules/shared.py')
-rw-r--r-- | modules/shared.py | 1 |
1 files changed, 1 insertions, 0 deletions
diff --git a/modules/shared.py b/modules/shared.py index 2dc092d6..b2c76a32 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -45,6 +45,7 @@ parser.add_argument("--swinir-models-path", type=str, help="Path to directory wi parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
+parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
|