diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-03-28 16:39:20 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-03-28 16:39:20 +0000 |
commit | f1db987e6a3d16e278ea44979b43e585d893237e (patch) | |
tree | c130645b2c8de1c528b69e613c28aed860664c12 /modules/sd_models_config.py | |
parent | e49c479819760af910eae4881619c7f119359a5f (diff) | |
parent | 1f08600345298fac0bcb66cc215a81875a84d7b9 (diff) | |
download | stable-diffusion-webui-gfx803-f1db987e6a3d16e278ea44979b43e585d893237e.tar.gz stable-diffusion-webui-gfx803-f1db987e6a3d16e278ea44979b43e585d893237e.tar.bz2 stable-diffusion-webui-gfx803-f1db987e6a3d16e278ea44979b43e585d893237e.zip |
Merge pull request #8958 from MrCheeze/variations-model
Add support for the unclip (Variations) models, unclip-h and unclip-l
Diffstat (limited to 'modules/sd_models_config.py')
-rw-r--r-- | modules/sd_models_config.py | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 91c21700..9398f528 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -14,6 +14,8 @@ config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml") config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
+config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
+config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
@@ -65,9 +67,14 @@ def is_using_v_parameterization_for_sd2(state_dict): def guess_model_config_from_state_dict(sd, filename):
sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
+ sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
return config_depth_model
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
+ return config_unclip
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
+ return config_unopenclip
if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
if diffusion_model_input.shape[1] == 9:
|