diff options
author | d8ahazard <d8ahazard@gmail.com> | 2022-09-26 14:29:50 +0000 |
---|---|---|
committer | d8ahazard <d8ahazard@gmail.com> | 2022-09-26 14:29:50 +0000 |
commit | 740070ea9cdb254209f66417418f2a4af8b099d6 (patch) | |
tree | 52896a6159b706024af9520c855c10091162372c /modules/shared.py | |
parent | bfb7f15d46048f27338eeac3a591a5943d03c5f1 (diff) | |
download | stable-diffusion-webui-gfx803-740070ea9cdb254209f66417418f2a4af8b099d6.tar.gz stable-diffusion-webui-gfx803-740070ea9cdb254209f66417418f2a4af8b099d6.tar.bz2 stable-diffusion-webui-gfx803-740070ea9cdb254209f66417418f2a4af8b099d6.zip |
Re-implement universal model loading
Diffstat (limited to 'modules/shared.py')
-rw-r--r-- | modules/shared.py | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/modules/shared.py b/modules/shared.py index c32da110..1444040d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -16,11 +16,11 @@ import modules.sd_models sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
-
+model_path = os.path.join(script_path, 'models')
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
-parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",)
+parser.add_argument("--ckpt-dir", type=str, default=model_path, help="path to directory with stable diffusion checkpoints",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
@@ -34,8 +34,12 @@ parser.add_argument("--always-batch-cond-uncond", action='store_true', help="dis parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
-parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
-parser.add_argument("--swinir-models-path", type=str, help="path to directory with SwinIR models", default=os.path.join(script_path, 'SwinIR'))
+parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model(s)", default=os.path.join(model_path, 'Codeformer'))
+parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model(s)", default=os.path.join(model_path, 'GFPGAN'))
+parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN models", default=os.path.join(model_path, 'ESRGAN'))
+parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN models", default=os.path.join(model_path, 'RealESRGAN'))
+parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR models", default=os.path.join(model_path, 'SwinIR'))
+parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR models", default=os.path.join(model_path, 'LDSR'))
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
|