diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2022-11-11 13:04:17 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-11-11 13:04:17 +0000 |
commit | e666220ee458ae1e80a2ba12c64a0da9d68f20a2 (patch) | |
tree | b579effd091738d94bd076c4719de7a1610026e3 | |
parent | 6a2044f566d5d000a836e71fb5cb754e7d98b1b3 (diff) | |
parent | eebf49592ad2c0933e58b06a098b92e48d47e4fe (diff) | |
download | stable-diffusion-webui-gfx803-e666220ee458ae1e80a2ba12c64a0da9d68f20a2.tar.gz stable-diffusion-webui-gfx803-e666220ee458ae1e80a2ba12c64a0da9d68f20a2.tar.bz2 stable-diffusion-webui-gfx803-e666220ee458ae1e80a2ba12c64a0da9d68f20a2.zip |
Merge pull request #4514 from cluder/4448_fix_ckpt_cache
#4448 fix checkpoint cache usage
-rw-r--r-- | modules/sd_models.py | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py index 34c57bfa..80addf03 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -163,13 +163,21 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
- if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"):
+ cache_enabled = shared.opts.sd_checkpoint_cache > 0
+
+ if cache_enabled:
sd_vae.restore_base_vae(model)
- checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
- if checkpoint_info not in checkpoints_loaded:
+ if cache_enabled and checkpoint_info in checkpoints_loaded:
+ # use checkpoint cache
+ vae_name = sd_vae.get_filename(vae_file) if vae_file else None
+ vae_message = f" with {vae_name} VAE" if vae_name else ""
+ print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
+ model.load_state_dict(checkpoints_loaded[checkpoint_info])
+ else:
+ # load from file
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
@@ -180,6 +188,10 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): del pl_sd
model.load_state_dict(sd, strict=False)
del sd
+
+ if cache_enabled:
+ # cache newly loaded model
+ checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
@@ -199,14 +211,9 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): model.first_stage_model.to(devices.dtype_vae)
- else:
- vae_name = sd_vae.get_filename(vae_file) if vae_file else None
- vae_message = f" with {vae_name} VAE" if vae_name else ""
- print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
- model.load_state_dict(checkpoints_loaded[checkpoint_info])
-
- if shared.opts.sd_checkpoint_cache > 0:
- while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ # clean up cache if limit is reached
+ if cache_enabled:
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash
|