diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2022-12-03 07:19:51 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-12-03 07:19:51 +0000 |
commit | c9a2cfdf2a53d37c2de1908423e4f548088667ef (patch) | |
tree | eabdca1b7665e0ee00f130e9f8544ffd23e474a2 /modules/sd_models.py | |
parent | 39541d7725bc42f456a604b07c50aba503a5a09a (diff) | |
parent | 5cd5a672f7889dcc018c3873ec557d645ebe35d0 (diff) | |
download | stable-diffusion-webui-gfx803-c9a2cfdf2a53d37c2de1908423e4f548088667ef.tar.gz stable-diffusion-webui-gfx803-c9a2cfdf2a53d37c2de1908423e4f548088667ef.tar.bz2 stable-diffusion-webui-gfx803-c9a2cfdf2a53d37c2de1908423e4f548088667ef.zip |
Merge branch 'master' into racecond_fix
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r-- | modules/sd_models.py | 63 |
1 files changed, 38 insertions, 25 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py index ae427a5c..283cf1cd 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -5,6 +5,7 @@ import gc from collections import namedtuple
import torch
import re
+import safetensors.torch
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
@@ -45,7 +46,7 @@ def checkpoint_tiles(): def list_models():
checkpoints_list.clear()
- model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
+ model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
@@ -143,8 +144,8 @@ def transform_checkpoint_dict_key(k): def get_state_dict_from_checkpoint(pl_sd):
- if "state_dict" in pl_sd:
- pl_sd = pl_sd["state_dict"]
+ pl_sd = pl_sd.pop("state_dict", pl_sd)
+ pl_sd.pop("state_dict", None)
sd = {}
for k, v in pl_sd.items():
@@ -159,25 +160,41 @@ def get_state_dict_from_checkpoint(pl_sd): return pl_sd
+def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
+ _, extension = os.path.splitext(checkpoint_file)
+ if extension.lower() == ".safetensors":
+ pl_sd = safetensors.torch.load_file(checkpoint_file, device=map_location or shared.weight_load_location)
+ else:
+ pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
+
+ if print_global_state and "global_step" in pl_sd:
+ print(f"Global Step: {pl_sd['global_step']}")
+
+ sd = get_state_dict_from_checkpoint(pl_sd)
+ return sd
+
+
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
- vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
-
- checkpoint_key = checkpoint_info
+ cache_enabled = shared.opts.sd_checkpoint_cache > 0
- if checkpoint_key not in checkpoints_loaded:
+ if cache_enabled and checkpoint_info in checkpoints_loaded:
+ # use checkpoint cache
+ print(f"Loading weights [{sd_model_hash}] from cache")
+ model.load_state_dict(checkpoints_loaded[checkpoint_info])
+ else:
+ # load from file
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
- pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
-
- sd = get_state_dict_from_checkpoint(pl_sd)
- del pl_sd
+ sd = read_state_dict(checkpoint_file)
model.load_state_dict(sd, strict=False)
del sd
+
+ if cache_enabled:
+ # cache newly loaded model
+ checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
@@ -197,23 +214,16 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): model.first_stage_model.to(devices.dtype_vae)
- if shared.opts.sd_checkpoint_cache > 0:
- # if PR #4035 were to get merged, restore base VAE first before caching
- checkpoints_loaded[checkpoint_key] = model.state_dict().copy()
- while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
- checkpoints_loaded.popitem(last=False) # LRU
-
- else:
- vae_name = sd_vae.get_filename(vae_file) if vae_file else None
- vae_message = f" with {vae_name} VAE" if vae_name else ""
- print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
- checkpoints_loaded.move_to_end(checkpoint_key)
- model.load_state_dict(checkpoints_loaded[checkpoint_key])
+ # clean up cache if limit is reached
+ if cache_enabled:
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
+ checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
+ vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
sd_vae.load_vae(model, vae_file)
@@ -244,6 +254,9 @@ def load_model(checkpoint_info=None): do_inpainting_hijack()
+ if shared.cmd_opts.no_half:
+ sd_config.model.params.unet_config.params.use_fp16 = False
+
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
|