aboutsummaryrefslogtreecommitdiffstats
path: root/modules/sd_models.py
diff options
context:
space:
mode:
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r--modules/sd_models.py70
1 files changed, 28 insertions, 42 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 75f7ab09..283cf1cd 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -5,6 +5,7 @@ import gc
from collections import namedtuple
import torch
import re
+import safetensors.torch
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
@@ -16,10 +17,9 @@ from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inp
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
-CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config', 'exttype'])
+CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
checkpoints_loaded = collections.OrderedDict()
-checkpoint_types = {'.ckpt':'pickle','.safetensors':'safetensors'}
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
@@ -46,7 +46,7 @@ def checkpoint_tiles():
def list_models():
checkpoints_list.clear()
- model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt",".safetensors"])
+ model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
@@ -61,15 +61,15 @@ def list_models():
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
- shortname, ext = os.path.splitext(name.replace("/", "_").replace("\\", "_"))
+ shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
- return f'{name} [{checkpoint_types[ext]}] [{shorthash}]', shortname
+ return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
- checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config, '')
+ checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
@@ -77,12 +77,12 @@ def list_models():
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
- basename, ext = os.path.splitext(filename)
+ basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
- checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config, ext)
+ checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
@@ -142,35 +142,10 @@ def transform_checkpoint_dict_key(k):
return k
-def torch_load(model_filename, model_info, map_override=None):
- map_override=shared.weight_load_location if not map_override else map_override
- if(checkpoint_types[model_info.exttype] == 'safetensors'):
- # safely load weights
- # TODO: safetensors supports zero copy fast load to gpu, see issue #684.
- # GPU only for now, see https://github.com/huggingface/safetensors/issues/95
- try:
- from safetensors.torch import load_file
- except ImportError as e:
- raise ImportError(f"The model is in safetensors format and it is not installed, use `pip install safetensors`: {e}")
- return load_file(model_filename, device='cuda')
- else:
- return torch.load(model_filename, map_location=map_override)
-
-def torch_save(model, output_filename):
- basename, exttype = os.path.splitext(output_filename)
- if(checkpoint_types[exttype] == 'safetensors'):
- # [===== >] Reticulating brines...
- try:
- from safetensors.torch import save_file
- except ImportError as e:
- raise ImportError(f"Export as safetensors selected, yet it is not installed, use `pip install safetensors`: {e}")
- save_file(model, output_filename, metadata={"format": "pt"})
- else:
- torch.save(model, output_filename)
def get_state_dict_from_checkpoint(pl_sd):
- if "state_dict" in pl_sd:
- pl_sd = pl_sd["state_dict"]
+ pl_sd = pl_sd.pop("state_dict", pl_sd)
+ pl_sd.pop("state_dict", None)
sd = {}
for k, v in pl_sd.items():
@@ -185,6 +160,20 @@ def get_state_dict_from_checkpoint(pl_sd):
return pl_sd
+def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
+ _, extension = os.path.splitext(checkpoint_file)
+ if extension.lower() == ".safetensors":
+ pl_sd = safetensors.torch.load_file(checkpoint_file, device=map_location or shared.weight_load_location)
+ else:
+ pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
+
+ if print_global_state and "global_step" in pl_sd:
+ print(f"Global Step: {pl_sd['global_step']}")
+
+ sd = get_state_dict_from_checkpoint(pl_sd)
+ return sd
+
+
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
@@ -199,13 +188,7 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
# load from file
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
- pl_sd = torch_load(checkpoint_file, checkpoint_info)
-
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
-
- sd = get_state_dict_from_checkpoint(pl_sd)
- del pl_sd
+ sd = read_state_dict(checkpoint_file)
model.load_state_dict(sd, strict=False)
del sd
@@ -271,6 +254,9 @@ def load_model(checkpoint_info=None):
do_inpainting_hijack()
+ if shared.cmd_opts.no_half:
+ sd_config.model.params.unet_config.params.use_fp16 = False
+
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)