aboutsummaryrefslogtreecommitdiffstats
path: root/modules/sd_models_config.py
diff options
context:
space:
mode:
authorMrCheeze <fishycheeze@yahoo.ca>2023-03-25 02:48:16 +0000
committerMrCheeze <fishycheeze@yahoo.ca>2023-03-26 01:03:07 +0000
commit8a34671fe91e142bce9e5556cca2258b3be9dd6e (patch)
tree001fee7c5f149bd9763db74af1355828925cea04 /modules/sd_models_config.py
parenta9fed7c364061ae6efb37f797b6b522cb3cf7aa2 (diff)
downloadstable-diffusion-webui-gfx803-8a34671fe91e142bce9e5556cca2258b3be9dd6e.tar.gz
stable-diffusion-webui-gfx803-8a34671fe91e142bce9e5556cca2258b3be9dd6e.tar.bz2
stable-diffusion-webui-gfx803-8a34671fe91e142bce9e5556cca2258b3be9dd6e.zip
Add support for the Variations models (unclip-h and unclip-l)
Diffstat (limited to 'modules/sd_models_config.py')
-rw-r--r--modules/sd_models_config.py7
1 files changed, 7 insertions, 0 deletions
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
index 91c21700..9398f528 100644
--- a/modules/sd_models_config.py
+++ b/modules/sd_models_config.py
@@ -14,6 +14,8 @@ config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
+config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
+config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
@@ -65,9 +67,14 @@ def is_using_v_parameterization_for_sd2(state_dict):
def guess_model_config_from_state_dict(sd, filename):
sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
+ sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
return config_depth_model
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
+ return config_unclip
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
+ return config_unopenclip
if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
if diffusion_model_input.shape[1] == 9: