aboutsummaryrefslogtreecommitdiffstats
path: root/modules/sd_models.py
diff options
context:
space:
mode:
authorcluder <1590330+cluder@users.noreply.github.com>2022-11-09 03:54:21 +0000
committercluder <1590330+cluder@users.noreply.github.com>2022-11-09 04:43:57 +0000
commit3b51d239ac9201228c6032fc109111e347e8e6b0 (patch)
tree2859f97b0e2efc44dc09c1522b0a6c1d47b73034 /modules/sd_models.py
parent2f47724b73c40b96e158bea9ac2c6e84fbad3e73 (diff)
downloadstable-diffusion-webui-gfx803-3b51d239ac9201228c6032fc109111e347e8e6b0.tar.gz
stable-diffusion-webui-gfx803-3b51d239ac9201228c6032fc109111e347e8e6b0.tar.bz2
stable-diffusion-webui-gfx803-3b51d239ac9201228c6032fc109111e347e8e6b0.zip
- do not use ckpt cache, if disabled
- cache model after is has been loaded from file
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r--modules/sd_models.py27
1 files changed, 17 insertions, 10 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 34c57bfa..720c2a96 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -163,13 +163,21 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
- if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"):
+ cache_enabled = shared.opts.sd_checkpoint_cache > 0
+
+ if cache_enabled:
sd_vae.restore_base_vae(model)
- checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
- if checkpoint_info not in checkpoints_loaded:
+ if cache_enabled and checkpoint_info in checkpoints_loaded:
+ # use checkpoint cache
+ vae_name = sd_vae.get_filename(vae_file) if vae_file else None
+ vae_message = f" with {vae_name} VAE" if vae_name else ""
+ print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
+ model.load_state_dict(checkpoints_loaded[checkpoint_info])
+ else:
+ # load from file
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
@@ -180,6 +188,10 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
del pl_sd
model.load_state_dict(sd, strict=False)
del sd
+
+ if cache_enabled:
+ # cache newly loaded model
+ checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
@@ -199,13 +211,8 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
model.first_stage_model.to(devices.dtype_vae)
- else:
- vae_name = sd_vae.get_filename(vae_file) if vae_file else None
- vae_message = f" with {vae_name} VAE" if vae_name else ""
- print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
- model.load_state_dict(checkpoints_loaded[checkpoint_info])
-
- if shared.opts.sd_checkpoint_cache > 0:
+ # clean up cache if limit is reached
+ if cache_enabled:
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU