aboutsummaryrefslogtreecommitdiffstats
path: root/modules/shared_options.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-12-16 07:22:51 +0000
committerGitHub <noreply@github.com>2023-12-16 07:22:51 +0000
commitc121f8c31587a21020e8670664977f6f76e68905 (patch)
tree5db19664111d4264d4b018bee6557d7d6b0ec1c4 /modules/shared_options.py
parent60186c7b9d6034ff08f4fe9e213a495b5321302d (diff)
parent8edb9144cc76b39f3d68c0407b3bb990809d1b03 (diff)
downloadstable-diffusion-webui-gfx803-c121f8c31587a21020e8670664977f6f76e68905.tar.gz
stable-diffusion-webui-gfx803-c121f8c31587a21020e8670664977f6f76e68905.tar.bz2
stable-diffusion-webui-gfx803-c121f8c31587a21020e8670664977f6f76e68905.zip
Merge pull request #14031 from AUTOMATIC1111/test-fp8
A big improvement for dtype casting system with fp8 storage type and manual cast
Diffstat (limited to 'modules/shared_options.py')
-rw-r--r--modules/shared_options.py2
1 files changed, 2 insertions, 0 deletions
diff --git a/modules/shared_options.py b/modules/shared_options.py
index d2e86ff1..d470eb8f 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -206,6 +206,8 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd"
"pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
"persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"),
"batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
+ "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Dropdown, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."),
+ "cache_fp16_weight": OptionInfo(False, "Cache FP16 weight for LoRA").info("Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram."),
}))
options_templates.update(options_section(('compatibility', "Compatibility", "sd"), {