diff options
author | AUTOMATIC <16777216c@gmail.com> | 2023-01-10 13:51:04 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2023-01-10 13:51:04 +0000 |
commit | ce3f639ec8758ce2bc90483336361d2dc25acd3a (patch) | |
tree | 5f19d3e92ba8f22fbe6840311c1d7bdc45372e93 /modules/sd_models.py | |
parent | 0c3feb202c5714abd50d879c1db2cd9a71ce93e3 (diff) | |
download | stable-diffusion-webui-gfx803-ce3f639ec8758ce2bc90483336361d2dc25acd3a.tar.gz stable-diffusion-webui-gfx803-ce3f639ec8758ce2bc90483336361d2dc25acd3a.tar.bz2 stable-diffusion-webui-gfx803-ce3f639ec8758ce2bc90483336361d2dc25acd3a.zip |
add more stuff to ignore when creating model from config
prevent .vae.safetensors files from being listed as stable diffusion models
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r-- | modules/sd_models.py | 32 |
1 files changed, 28 insertions, 4 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py index ee241032..1bb9088b 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -2,6 +2,7 @@ import collections import os.path
import sys
import gc
+import time
from collections import namedtuple
import torch
import re
@@ -61,7 +62,7 @@ def find_checkpoint_config(info): def list_models():
checkpoints_list.clear()
- model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"])
+ model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], ext_blacklist=[".vae.safetensors"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
@@ -288,6 +289,17 @@ def enable_midas_autodownload(): midas.api.load_model = load_model_wrapper
+class Timer:
+ def __init__(self):
+ self.start = time.time()
+
+ def elapsed(self):
+ end = time.time()
+ res = end - self.start
+ self.start = end
+ return res
+
+
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
@@ -319,11 +331,17 @@ def load_model(checkpoint_info=None): if shared.cmd_opts.no_half:
sd_config.model.params.unet_config.params.use_fp16 = False
+ timer = Timer()
+
with sd_disable_initialization.DisableInitialization():
sd_model = instantiate_from_config(sd_config.model)
+ elapsed_create = timer.elapsed()
+
load_model_weights(sd_model, checkpoint_info)
+ elapsed_load_weights = timer.elapsed()
+
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
@@ -338,7 +356,9 @@ def load_model(checkpoint_info=None): script_callbacks.model_loaded_callback(sd_model)
- print("Model loaded.")
+ elapsed_the_rest = timer.elapsed()
+
+ print(f"Model loaded in {elapsed_create + elapsed_load_weights + elapsed_the_rest:.1f}s ({elapsed_create:.1f}s create model, {elapsed_load_weights:.1f}s load weights).")
return sd_model
@@ -349,7 +369,7 @@ def reload_model_weights(sd_model=None, info=None): if not sd_model:
sd_model = shared.sd_model
- if sd_model is None: # previous model load failed
+ if sd_model is None: # previous model load failed
current_checkpoint_info = None
else:
current_checkpoint_info = sd_model.sd_checkpoint_info
@@ -371,6 +391,8 @@ def reload_model_weights(sd_model=None, info=None): sd_hijack.model_hijack.undo_hijack(sd_model)
+ timer = Timer()
+
try:
load_model_weights(sd_model, checkpoint_info)
except Exception as e:
@@ -384,6 +406,8 @@ def reload_model_weights(sd_model=None, info=None): if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
- print("Weights loaded.")
+ elapsed = timer.elapsed()
+
+ print(f"Weights loaded in {elapsed:.1f}s.")
return sd_model
|