aboutsummaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/images.py13
-rw-r--r--modules/memmon.py77
-rw-r--r--modules/processing.py11
-rw-r--r--modules/sd_models.py148
-rw-r--r--modules/shared.py24
-rw-r--r--modules/ui.py27
6 files changed, 283 insertions, 17 deletions
diff --git a/modules/images.py b/modules/images.py
index f37f5f08..a3064333 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -274,7 +274,7 @@ def apply_filename_pattern(x, p, seed, prompt):
x = x.replace("[height]", str(p.height))
x = x.replace("[sampler]", sd_samplers.samplers[p.sampler_index].name)
- x = x.replace("[model_hash]", shared.sd_model_hash)
+ x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
x = x.replace("[date]", datetime.date.today().isoformat())
if cmd_opts.hide_ui_dir_config:
@@ -353,13 +353,12 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
})
if extension.lower() in ("jpg", "jpeg", "webp"):
- image.save(fullfn, quality=opts.jpeg_quality, exif_bytes=exif_bytes())
+ image.save(fullfn, quality=opts.jpeg_quality)
+ if opts.enable_pnginfo and info is not None:
+ piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo)
- if extension.lower() == "webp":
- piexif.insert(exif_bytes, fullfn)
-
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
@@ -370,7 +369,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
- image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality, exif_bytes=exif_bytes())
+ image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
+ if opts.enable_pnginfo and info is not None:
+ piexif.insert(exif_bytes(), fullfn)
if opts.save_txt and info is not None:
with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
diff --git a/modules/memmon.py b/modules/memmon.py
new file mode 100644
index 00000000..f2cac841
--- /dev/null
+++ b/modules/memmon.py
@@ -0,0 +1,77 @@
+import threading
+import time
+from collections import defaultdict
+
+import torch
+
+
+class MemUsageMonitor(threading.Thread):
+ run_flag = None
+ device = None
+ disabled = False
+ opts = None
+ data = None
+
+ def __init__(self, name, device, opts):
+ threading.Thread.__init__(self)
+ self.name = name
+ self.device = device
+ self.opts = opts
+
+ self.daemon = True
+ self.run_flag = threading.Event()
+ self.data = defaultdict(int)
+
+ def run(self):
+ if self.disabled:
+ return
+
+ while True:
+ self.run_flag.wait()
+
+ torch.cuda.reset_peak_memory_stats()
+ self.data.clear()
+
+ if self.opts.memmon_poll_rate <= 0:
+ self.run_flag.clear()
+ continue
+
+ self.data["min_free"] = torch.cuda.mem_get_info()[0]
+
+ while self.run_flag.is_set():
+ free, total = torch.cuda.mem_get_info() # calling with self.device errors, torch bug?
+ self.data["min_free"] = min(self.data["min_free"], free)
+
+ time.sleep(1 / self.opts.memmon_poll_rate)
+
+ def dump_debug(self):
+ print(self, 'recorded data:')
+ for k, v in self.read().items():
+ print(k, -(v // -(1024 ** 2)))
+
+ print(self, 'raw torch memory stats:')
+ tm = torch.cuda.memory_stats(self.device)
+ for k, v in tm.items():
+ if 'bytes' not in k:
+ continue
+ print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2)))
+
+ print(torch.cuda.memory_summary())
+
+ def monitor(self):
+ self.run_flag.set()
+
+ def read(self):
+ free, total = torch.cuda.mem_get_info()
+ self.data["total"] = total
+
+ torch_stats = torch.cuda.memory_stats(self.device)
+ self.data["active_peak"] = torch_stats["active_bytes.all.peak"]
+ self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"]
+ self.data["system_peak"] = total - self.data["min_free"]
+
+ return self.data
+
+ def stop(self):
+ self.run_flag.clear()
+ return self.read()
diff --git a/modules/processing.py b/modules/processing.py
index 81c83f06..6a99d383 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -188,7 +188,11 @@ def fix_seed(p):
def process_images(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
- assert p.prompt is not None
+ if type(p.prompt) == list:
+ assert(len(p.prompt) > 0)
+ else:
+ assert p.prompt is not None
+
devices.torch_gc()
fix_seed(p)
@@ -227,7 +231,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
- "Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model_hash else shared.sd_model_hash),
+ "Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
@@ -265,6 +269,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
seeds = all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
+ if (len(prompts) == 0):
+ break
+
#uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt])
#c = p.sd_model.get_learned_conditioning(prompts)
uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps)
diff --git a/modules/sd_models.py b/modules/sd_models.py
new file mode 100644
index 00000000..4bd70fc5
--- /dev/null
+++ b/modules/sd_models.py
@@ -0,0 +1,148 @@
+import glob
+import os.path
+import sys
+from collections import namedtuple
+import torch
+from omegaconf import OmegaConf
+
+
+from ldm.util import instantiate_from_config
+
+from modules import shared
+
+CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash'])
+checkpoints_list = {}
+
+try:
+ # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
+
+ from transformers import logging
+
+ logging.set_verbosity_error()
+except Exception:
+ pass
+
+
+def list_models():
+ checkpoints_list.clear()
+
+ model_dir = os.path.abspath(shared.cmd_opts.ckpt_dir)
+
+ def modeltitle(path, h):
+ abspath = os.path.abspath(path)
+
+ if abspath.startswith(model_dir):
+ name = abspath.replace(model_dir, '')
+ else:
+ name = os.path.basename(path)
+
+ if name.startswith("\\") or name.startswith("/"):
+ name = name[1:]
+
+ return f'{name} [{h}]'
+
+ cmd_ckpt = shared.cmd_opts.ckpt
+ if os.path.exists(cmd_ckpt):
+ h = model_hash(cmd_ckpt)
+ title = modeltitle(cmd_ckpt, h)
+ checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h)
+ elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
+ print(f"Checkpoint in --ckpt argument not found: {cmd_ckpt}", file=sys.stderr)
+
+ if os.path.exists(model_dir):
+ for filename in glob.glob(model_dir + '/**/*.ckpt', recursive=True):
+ h = model_hash(filename)
+ title = modeltitle(filename, h)
+ checkpoints_list[title] = CheckpointInfo(filename, title, h)
+
+
+def model_hash(filename):
+ try:
+ with open(filename, "rb") as file:
+ import hashlib
+ m = hashlib.sha256()
+
+ file.seek(0x100000)
+ m.update(file.read(0x10000))
+ return m.hexdigest()[0:8]
+ except FileNotFoundError:
+ return 'NOFILE'
+
+
+def select_checkpoint():
+ model_checkpoint = shared.opts.sd_model_checkpoint
+ checkpoint_info = checkpoints_list.get(model_checkpoint, None)
+ if checkpoint_info is not None:
+ return checkpoint_info
+
+ if len(checkpoints_list) == 0:
+ print(f"Checkpoint {model_checkpoint} not found and no other checkpoints found", file=sys.stderr)
+ return None
+
+ checkpoint_info = next(iter(checkpoints_list.values()))
+ if model_checkpoint is not None:
+ print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
+
+ return checkpoint_info
+
+
+def load_model_weights(model, checkpoint_file, sd_model_hash):
+ print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
+
+ pl_sd = torch.load(checkpoint_file, map_location="cpu")
+ if "global_step" in pl_sd:
+ print(f"Global Step: {pl_sd['global_step']}")
+ sd = pl_sd["state_dict"]
+
+ model.load_state_dict(sd, strict=False)
+
+ if shared.cmd_opts.opt_channelslast:
+ model.to(memory_format=torch.channels_last)
+
+ if not shared.cmd_opts.no_half:
+ model.half()
+
+ model.sd_model_hash = sd_model_hash
+ model.sd_model_checkpint = checkpoint_file
+
+
+def load_model():
+ from modules import lowvram, sd_hijack
+ checkpoint_info = select_checkpoint()
+
+ sd_config = OmegaConf.load(shared.cmd_opts.config)
+ sd_model = instantiate_from_config(sd_config.model)
+ load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
+ else:
+ sd_model.to(shared.device)
+
+ sd_hijack.model_hijack.hijack(sd_model)
+
+ sd_model.eval()
+
+ print(f"Model loaded.")
+ return sd_model
+
+
+def reload_model_weights(sd_model, info=None):
+ from modules import lowvram, devices
+ checkpoint_info = info or select_checkpoint()
+
+ if sd_model.sd_model_checkpint == checkpoint_info.filename:
+ return
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ sd_model.to(devices.cpu)
+
+ load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
+
+ if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
+ sd_model.to(devices.device)
+
+ print(f"Weights loaded.")
+ return sd_model
diff --git a/modules/shared.py b/modules/shared.py
index da56b6ae..3c3aa9b6 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -12,14 +12,16 @@ from modules.paths import script_path, sd_path
from modules.devices import get_optimal_device
import modules.styles
import modules.interrogate
+import modules.memmon
+import modules.sd_models
sd_model_file = os.path.join(script_path, 'model.ckpt')
-if not os.path.exists(sd_model_file):
- sd_model_file = "models/ldm/stable-diffusion-v1/model.ckpt"
+default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
-parser.add_argument("--ckpt", type=str, default=os.path.join(sd_path, sd_model_file), help="path to checkpoint of model",)
+parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
+parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default='GFPGANv1.3.pth')
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
@@ -87,13 +89,17 @@ interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
+modules.sd_models.list_models()
+
+
class Options:
class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None):
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
+ self.onchange = onchange
data = None
hide_dirs = {"visible": False} if cmd_opts.hide_ui_dir_config else None
@@ -138,6 +144,7 @@ class Options:
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
+ "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step":1}),
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
@@ -148,6 +155,7 @@ class Options:
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}),
}
def __init__(self):
@@ -178,6 +186,10 @@ class Options:
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
+ def onchange(self, key, func):
+ item = self.data_labels.get(key)
+ item.onchange = func
+
opts = Options()
if os.path.exists(config_filename):
@@ -186,7 +198,6 @@ if os.path.exists(config_filename):
sd_upscalers = []
sd_model = None
-sd_model_hash = ''
progress_print_out = sys.stdout
@@ -217,3 +228,6 @@ class TotalTQDM:
total_tqdm = TotalTQDM()
+
+mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
+mem_mon.start()
diff --git a/modules/ui.py b/modules/ui.py
index 738ac945..960f1e36 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -119,6 +119,7 @@ def save_files(js_data, images, index):
def wrap_gradio_call(func):
def f(*args, **kwargs):
+ shared.mem_mon.monitor()
t = time.perf_counter()
try:
@@ -135,8 +136,20 @@ def wrap_gradio_call(func):
elapsed = time.perf_counter() - t
+ mem_stats = {k: -(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()}
+ active_peak = mem_stats['active_peak']
+ reserved_peak = mem_stats['reserved_peak']
+ sys_peak = '?' if opts.memmon_poll_rate <= 0 else mem_stats['system_peak']
+ sys_total = mem_stats['total']
+ sys_pct = '?' if opts.memmon_poll_rate <= 0 else round(sys_peak/sys_total * 100, 2)
+ vram_tooltip = "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.&#013;" \
+ "Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.&#013;" \
+ "Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)."
+
+ vram_html = '' if opts.memmon_poll_rate == 0 else f"<p class='vram' title='{vram_tooltip}'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
+
# last item is always HTML
- res[-1] = res[-1] + f"<p class='performance'>Time taken: {elapsed:.2f}s</p>"
+ res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
@@ -324,6 +337,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
+ progressbar = gr.HTML(elem_id="progressbar")
+
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery').style(grid=4)
@@ -336,8 +351,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
- progressbar = gr.HTML(elem_id="progressbar")
-
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
@@ -461,6 +474,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
+ progressbar = gr.HTML(elem_id="progressbar")
+
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', elem_id='img2img_gallery').style(grid=4)
@@ -474,7 +489,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
interrupt = gr.Button('Interrupt')
img2img_save_style = gr.Button('Save prompt as style')
- progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
html_info = gr.HTML()
@@ -745,7 +759,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
+ oldval = opts.data.get(key, None)
opts.data[key] = value
+
+ if oldval != value and opts.data_labels[key].onchange is not None:
+ opts.data_labels[key].onchange()
+
up.append(comp.update(value=value))
opts.save(shared.config_filename)