From d380f939b5ab6a28bed6d1de3cf283e194255963 Mon Sep 17 00:00:00 2001
From: Leon Feng <523684+leon0707@users.noreply.github.com>
Date: Sat, 15 Jul 2023 23:31:59 -0400
Subject: Update shared.py
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/shared.py b/modules/shared.py
index a0862055..564799bc 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -394,7 +394,7 @@ options_templates.update(options_section(('training', "Training"), {
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
+ "sd_model_checkpoint": OptionInfo("", "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
--
cgit v1.2.3
From 35510f7529dc05437a82496187ef06b852be9ab1 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 17 Jul 2023 10:06:02 +0300
Subject: add alias to lyco network read networks from LyCORIS dir if it exists
add credits
---
modules/extra_networks.py | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
(limited to 'modules')
diff --git a/modules/extra_networks.py b/modules/extra_networks.py
index 41799b0a..6ae07e91 100644
--- a/modules/extra_networks.py
+++ b/modules/extra_networks.py
@@ -4,16 +4,22 @@ from collections import defaultdict
from modules import errors
extra_network_registry = {}
+extra_network_aliases = {}
def initialize():
extra_network_registry.clear()
+ extra_network_aliases.clear()
def register_extra_network(extra_network):
extra_network_registry[extra_network.name] = extra_network
+def register_extra_network_alias(extra_network, alias):
+ extra_network_aliases[alias] = extra_network
+
+
def register_default_extra_networks():
from modules.extra_networks_hypernet import ExtraNetworkHypernet
register_extra_network(ExtraNetworkHypernet())
@@ -82,20 +88,26 @@ def activate(p, extra_network_data):
"""call activate for extra networks in extra_network_data in specified order, then call
activate for all remaining registered networks with an empty argument list"""
+ activated = []
+
for extra_network_name, extra_network_args in extra_network_data.items():
extra_network = extra_network_registry.get(extra_network_name, None)
+
+ if extra_network is None:
+ extra_network = extra_network_aliases.get(extra_network_name, None)
+
if extra_network is None:
print(f"Skipping unknown extra network: {extra_network_name}")
continue
try:
extra_network.activate(p, extra_network_args)
+ activated.append(extra_network)
except Exception as e:
errors.display(e, f"activating extra network {extra_network_name} with arguments {extra_network_args}")
for extra_network_name, extra_network in extra_network_registry.items():
- args = extra_network_data.get(extra_network_name, None)
- if args is not None:
+ if extra_network in activated:
continue
try:
--
cgit v1.2.3
From 699108bfbb05c2a7d2ee4a2c7abcfaa0a244d8ea Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 17 Jul 2023 18:56:14 +0300
Subject: hide cards for networks of incompatible stable diffusion version in
Lora extra networks interface
---
modules/sd_models.py | 3 +++
modules/ui_extra_networks.py | 3 ++-
modules/ui_extra_networks_user_metadata.py | 7 ++++++-
3 files changed, 11 insertions(+), 2 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 729f03d7..4d9382dd 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -290,6 +290,9 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
model.is_sdxl = hasattr(model, 'conditioner')
+ model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
+ model.is_sd1 = not model.is_sdxl and not model.is_sd2
+
if model.is_sdxl:
sd_models_xl.extend_sdxl(model)
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index 6c73998f..49612298 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -62,7 +62,8 @@ def get_single_card(page: str = "", tabname: str = "", name: str = ""):
page = next(iter([x for x in extra_pages if x.name == page]), None)
try:
- item = page.create_item(name)
+ item = page.create_item(name, enable_filter=False)
+ page.items[name] = item
except Exception as e:
errors.display(e, "creating item for extra network")
item = page.items.get(name)
diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py
index 01ff4e4b..63d4b503 100644
--- a/modules/ui_extra_networks_user_metadata.py
+++ b/modules/ui_extra_networks_user_metadata.py
@@ -42,6 +42,9 @@ class UserMetadataEditor:
return user_metadata
+ def create_extra_default_items_in_left_column(self):
+ pass
+
def create_default_editor_elems(self):
with gr.Row():
with gr.Column(scale=2):
@@ -49,6 +52,8 @@ class UserMetadataEditor:
self.edit_description = gr.Textbox(label="Description", lines=4)
self.html_filedata = gr.HTML()
+ self.create_extra_default_items_in_left_column()
+
with gr.Column(scale=1, min_width=0):
self.html_preview = gr.HTML()
@@ -111,7 +116,7 @@ class UserMetadataEditor:
table = '
'
- return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', ''),
+ return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', '')
def write_user_metadata(self, name, metadata):
item = self.page.items.get(name, {})
--
cgit v1.2.3
From a99d5708e6d603e8f7cfd1b8c6595f8026219ba0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 17 Jul 2023 20:10:24 +0300
Subject: skip installing packages with pip if theyare already installed record
time it took to launch
---
modules/launch_utils.py | 46 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 434facbc..03552bc2 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -1,4 +1,5 @@
# this scripts installs necessary requirements and launches main program in webui.py
+import re
import subprocess
import os
import sys
@@ -9,6 +10,9 @@ from functools import lru_cache
from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
+from modules import timer
+
+timer.startup_timer.record("start")
args, _ = cmd_args.parser.parse_known_args()
@@ -226,6 +230,44 @@ def run_extensions_installers(settings_file):
run_extension_installer(os.path.join(extensions_dir, dirname_extension))
+re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
+
+
+def requrements_met(requirements_file):
+ """
+ Does a simple parse of a requirements.txt file to determine if all rerqirements in it
+ are already installed. Returns True if so, False if not installed or parsing fails.
+ """
+
+ import importlib.metadata
+ import packaging.version
+
+ with open(requirements_file, "r", encoding="utf8") as file:
+ for line in file:
+ if line.strip() == "":
+ continue
+
+ m = re.match(re_requirement, line)
+ if m is None:
+ return False
+
+ package = m.group(1).strip()
+ version_required = (m.group(2) or "").strip()
+
+ if version_required == "":
+ continue
+
+ try:
+ version_installed = importlib.metadata.version(package)
+ except Exception:
+ return False
+
+ if packaging.version.parse(version_required) != packaging.version.parse(version_installed):
+ return False
+
+ return True
+
+
def prepare_environment():
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
@@ -311,7 +353,9 @@ def prepare_environment():
if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)
- run_pip(f"install -r \"{requirements_file}\"", "requirements")
+
+ if not requrements_met(requirements_file):
+ run_pip(f"install -r \"{requirements_file}\"", "requirements")
run_extensions_installers(settings_file=args.ui_settings_file)
--
cgit v1.2.3
From 40a18d38a8fcb88d1c2947a2653b52cd2085536f Mon Sep 17 00:00:00 2001
From: lambertae
Date: Tue, 18 Jul 2023 00:32:01 -0400
Subject: add restart sampler
---
modules/sd_samplers_kdiffusion.py | 70 +++++++++++++++++++++++++++++++++++++--
1 file changed, 68 insertions(+), 2 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 71581b76..c63b677c 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -1,3 +1,5 @@
+# export PIP_CACHE_DIR=/scratch/dengm/cache
+# export XDG_CACHE_HOME=/scratch/dengm/cache
from collections import deque
import torch
import inspect
@@ -30,12 +32,76 @@ samplers_k_diffusion = [
('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}),
+ ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}),
]
+
+@torch.no_grad()
+def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = {0.1: [10, 2, 2]}):
+ """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)"""
+ '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}'''
+
+ from tqdm.auto import trange, tqdm
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ step_id = 0
+
+ from k_diffusion.sampling import to_d, append_zero
+
+ def heun_step(x, old_sigma, new_sigma):
+ nonlocal step_id
+ denoised = model(x, old_sigma * s_in, **extra_args)
+ d = to_d(x, old_sigma, denoised)
+ if callback is not None:
+ callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised})
+ dt = new_sigma - old_sigma
+ if new_sigma == 0:
+ # Euler method
+ x = x + d * dt
+ else:
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, new_sigma * s_in, **extra_args)
+ d_2 = to_d(x_2, new_sigma, denoised_2)
+ d_prime = (d + d_2) / 2
+ x = x + d_prime * dt
+ step_id += 1
+ return x
+ # print(sigmas)
+ temp_list = dict()
+ for key, value in restart_list.items():
+ temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value
+ restart_list = temp_list
+
+
+ def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
+ ramp = torch.linspace(0, 1, n).to(device)
+ min_inv_rho = (sigma_min ** (1 / rho))
+ max_inv_rho = (sigma_max ** (1 / rho))
+ if isinstance(min_inv_rho, torch.Tensor):
+ min_inv_rho = min_inv_rho.to(device)
+ if isinstance(max_inv_rho, torch.Tensor):
+ max_inv_rho = max_inv_rho.to(device)
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
+ return append_zero(sigmas).to(device)
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ x = heun_step(x, sigmas[i], sigmas[i+1])
+ if i + 1 in restart_list:
+ restart_steps, restart_times, restart_max = restart_list[i + 1]
+ min_idx = i + 1
+ max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end
+ for times in range(restart_times):
+ x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5
+ for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]):
+ x = heun_step(x, old_sigma, new_sigma)
+ return x
+
samplers_data_k_diffusion = [
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
- if hasattr(k_diffusion.sampling, funcname)
+ if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler')
]
sampler_extra_params = {
@@ -245,7 +311,7 @@ class KDiffusionSampler:
self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
- self.func = getattr(k_diffusion.sampling, self.funcname)
+ self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
--
cgit v1.2.3
From 15a94d6cf7fa075c09362e73c1239692d021c559 Mon Sep 17 00:00:00 2001
From: lambertae
Date: Tue, 18 Jul 2023 00:39:26 -0400
Subject: remove useless header
---
modules/sd_samplers_kdiffusion.py | 2 --
1 file changed, 2 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index c63b677c..7888d864 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -1,5 +1,3 @@
-# export PIP_CACHE_DIR=/scratch/dengm/cache
-# export XDG_CACHE_HOME=/scratch/dengm/cache
from collections import deque
import torch
import inspect
--
cgit v1.2.3
From f0e2098f1a533c88396536282c1d6cd7d847a51c Mon Sep 17 00:00:00 2001
From: brkirch
Date: Mon, 17 Jul 2023 23:39:38 -0400
Subject: Add support for `--upcast-sampling` with SD XL
---
modules/sd_hijack_unet.py | 8 +++++++-
modules/sd_models.py | 2 +-
2 files changed, 8 insertions(+), 2 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py
index ca1daf45..2101f1a0 100644
--- a/modules/sd_hijack_unet.py
+++ b/modules/sd_hijack_unet.py
@@ -39,7 +39,10 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
if isinstance(cond, dict):
for y in cond.keys():
- cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ if isinstance(cond[y], list):
+ cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ else:
+ cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
with devices.autocast():
return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
@@ -77,3 +80,6 @@ first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devi
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
+
+CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast)
+CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 4d9382dd..5813b550 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -326,7 +326,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
timer.record("apply half()")
- devices.dtype_unet = model.model.diffusion_model.dtype
+ devices.dtype_unet = torch.float16 if model.is_sdxl and not shared.cmd_opts.no_half else model.model.diffusion_model.dtype
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
model.first_stage_model.to(devices.dtype_vae)
--
cgit v1.2.3
From 37e048a7e2356f4caebfd976351112f03856f082 Mon Sep 17 00:00:00 2001
From: lambertae
Date: Tue, 18 Jul 2023 00:55:02 -0400
Subject: fix floating error
---
modules/sd_samplers_kdiffusion.py | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 7888d864..1bb25adf 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -89,11 +89,12 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
restart_steps, restart_times, restart_max = restart_list[i + 1]
min_idx = i + 1
max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
- sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end
- for times in range(restart_times):
- x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5
- for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]):
- x = heun_step(x, old_sigma, new_sigma)
+ if max_idx < min_idx:
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end
+ for times in range(restart_times):
+ x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5
+ for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]):
+ x = heun_step(x, old_sigma, new_sigma)
return x
samplers_data_k_diffusion = [
--
cgit v1.2.3
From 7bb0fbed136c6a345b211e09102659fd89362576 Mon Sep 17 00:00:00 2001
From: lambertae
Date: Tue, 18 Jul 2023 01:02:04 -0400
Subject: code styling
---
modules/sd_samplers_kdiffusion.py | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 1bb25adf..db7013f2 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -35,17 +35,15 @@ samplers_k_diffusion = [
@torch.no_grad()
-def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = {0.1: [10, 2, 2]}):
+def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.):
"""Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)"""
'''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}'''
-
- from tqdm.auto import trange, tqdm
+ restart_list = {0.1: [10, 2, 2]}
+ from tqdm.auto import trange
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
step_id = 0
-
from k_diffusion.sampling import to_d, append_zero
-
def heun_step(x, old_sigma, new_sigma):
nonlocal step_id
denoised = model(x, old_sigma * s_in, **extra_args)
@@ -70,8 +68,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
for key, value in restart_list.items():
temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value
restart_list = temp_list
-
-
def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
ramp = torch.linspace(0, 1, n).to(device)
min_inv_rho = (sigma_min ** (1 / rho))
@@ -82,7 +78,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
max_inv_rho = max_inv_rho.to(device)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device)
-
for i in trange(len(sigmas) - 1, disable=disable):
x = heun_step(x, sigmas[i], sigmas[i+1])
if i + 1 in restart_list:
@@ -91,7 +86,8 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
if max_idx < min_idx:
sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end
- for times in range(restart_times):
+ while restart_times > 0:
+ restart_times -= 1
x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5
for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]):
x = heun_step(x, old_sigma, new_sigma)
--
cgit v1.2.3
From d6668347c8b85b11b696ac56777cc396e34ee1f9 Mon Sep 17 00:00:00 2001
From: Leon Feng
Date: Tue, 18 Jul 2023 04:19:58 -0400
Subject: remove duplicate
---
modules/textual_inversion/logging.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/textual_inversion/logging.py b/modules/textual_inversion/logging.py
index 734a4b6f..a822a7a5 100644
--- a/modules/textual_inversion/logging.py
+++ b/modules/textual_inversion/logging.py
@@ -2,7 +2,7 @@ import datetime
import json
import os
-saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "clip_grad_mode", "clip_grad_value", "gradient_step", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file", "gradient_step", "latent_sampling_method"}
+saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "clip_grad_mode", "clip_grad_value", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file", "gradient_step", "latent_sampling_method"}
saved_params_ti = {"embedding_name", "num_vectors_per_token", "save_embedding_every", "save_image_with_stored_embedding"}
saved_params_hypernet = {"hypernetwork_name", "layer_structure", "activation_func", "weight_init", "add_layer_norm", "use_dropout", "save_hypernetwork_every"}
saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet
--
cgit v1.2.3
From 420cc8f68e6aca8a3a0f42ee0e626a6b03712763 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 11:48:40 +0300
Subject: also make None a valid option for options API for #11854
---
modules/api/models.py | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
(limited to 'modules')
diff --git a/modules/api/models.py b/modules/api/models.py
index b5683071..b55fa728 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,4 +1,6 @@
import inspect
+import types
+
from pydantic import BaseModel, Field, create_model
from typing import Any, Optional
from typing_extensions import Literal
@@ -207,11 +209,14 @@ class PreprocessResponse(BaseModel):
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
+ if key == 'sd_model_checkpoint':
+ value = None
optType = opts.typemap.get(type(metadata.default), type(value))
- if (metadata is not None):
- fields.update({key: (Optional[optType], Field(
- default=metadata.default ,description=metadata.label))})
+ if optType == types.NoneType:
+ pass
+ elif metadata is not None:
+ fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
else:
fields.update({key: (Optional[optType], Field())})
--
cgit v1.2.3
From 3c570421d3a2eb24528b5f5bb615dcb0c7717e4a Mon Sep 17 00:00:00 2001
From: wfjsw
Date: Tue, 18 Jul 2023 19:00:16 +0800
Subject: move start timer
---
modules/api/models.py | 2 +-
modules/launch_utils.py | 3 ---
2 files changed, 1 insertion(+), 4 deletions(-)
(limited to 'modules')
diff --git a/modules/api/models.py b/modules/api/models.py
index b55fa728..96cfe920 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -213,7 +213,7 @@ for key, metadata in opts.data_labels.items():
value = None
optType = opts.typemap.get(type(metadata.default), type(value))
- if optType == types.NoneType:
+ if isinstance(optType, types.NoneType):
pass
elif metadata is not None:
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 03552bc2..ea995eda 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -10,9 +10,6 @@ from functools import lru_cache
from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
-from modules import timer
-
-timer.startup_timer.record("start")
args, _ = cmd_args.parser.parse_known_args()
--
cgit v1.2.3
From ed82f1c5f1677c85298f4d2c6c030a5551682c71 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 15:55:23 +0300
Subject: lint
---
modules/api/models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/api/models.py b/modules/api/models.py
index b55fa728..96cfe920 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -213,7 +213,7 @@ for key, metadata in opts.data_labels.items():
value = None
optType = opts.typemap.get(type(metadata.default), type(value))
- if optType == types.NoneType:
+ if isinstance(optType, types.NoneType):
pass
elif metadata is not None:
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
--
cgit v1.2.3
From 66c5f1bb1556a2d86d9f11aeb92f83d4a09832cc Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 17:41:37 +0300
Subject: return sd_model_checkpoint to None
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/shared.py b/modules/shared.py
index a256d090..6162938a 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -409,7 +409,7 @@ options_templates.update(options_section(('training', "Training"), {
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo("", "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
--
cgit v1.2.3
From b270ded268c92950a35a7a326da54496ef4151c8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 18:10:04 +0300
Subject: fix the issue with /sdapi/v1/options failing (this time for sure!)
fix automated tests downloading CLIP model
---
modules/api/models.py | 6 ++----
modules/cmd_args.py | 1 +
modules/sd_models.py | 2 +-
3 files changed, 4 insertions(+), 5 deletions(-)
(limited to 'modules')
diff --git a/modules/api/models.py b/modules/api/models.py
index 96cfe920..4cd20a92 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -209,11 +209,9 @@ class PreprocessResponse(BaseModel):
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
- if key == 'sd_model_checkpoint':
- value = None
- optType = opts.typemap.get(type(metadata.default), type(value))
+ optType = opts.typemap.get(type(metadata.default), type(metadata.default))
- if isinstance(optType, types.NoneType):
+ if metadata.default is None:
pass
elif metadata is not None:
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index ae78f469..e401f641 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -15,6 +15,7 @@ parser.add_argument("--update-check", action='store_true', help="launch.py argum
parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
+parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 5813b550..fb31a793 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -494,7 +494,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
sd_model = None
try:
- with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
+ with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip):
sd_model = instantiate_from_config(sd_config.model)
except Exception:
pass
--
cgit v1.2.3
From 7f7db1700bda40ba3171a49b6a4ef38f868b7d0a Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 18:16:23 +0300
Subject: linter fix
---
modules/api/models.py | 1 -
1 file changed, 1 deletion(-)
(limited to 'modules')
diff --git a/modules/api/models.py b/modules/api/models.py
index 4cd20a92..bf97b1a3 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,5 +1,4 @@
import inspect
-import types
from pydantic import BaseModel, Field, create_model
from typing import Any, Optional
--
cgit v1.2.3
From 136c8859a49a35cbffe269aafc0bbdfca0b3561d Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 20:11:30 +0300
Subject: add backwards compatibility --lyco-dir-backcompat option, use that
for LyCORIS directory instead of hardcoded value prevent running preload.py
for disabled extensions
---
modules/script_loading.py | 5 +++--
modules/shared.py | 3 ++-
2 files changed, 5 insertions(+), 3 deletions(-)
(limited to 'modules')
diff --git a/modules/script_loading.py b/modules/script_loading.py
index 306a1f35..0d55f193 100644
--- a/modules/script_loading.py
+++ b/modules/script_loading.py
@@ -12,11 +12,12 @@ def load_module(path):
return module
-def preload_extensions(extensions_dir, parser):
+def preload_extensions(extensions_dir, parser, extension_list=None):
if not os.path.isdir(extensions_dir):
return
- for dirname in sorted(os.listdir(extensions_dir)):
+ extensions = extension_list if extension_list is not None else os.listdir(extensions_dir)
+ for dirname in sorted(extensions):
preload_script = os.path.join(extensions_dir, dirname, "preload.py")
if not os.path.isfile(preload_script):
continue
diff --git a/modules/shared.py b/modules/shared.py
index 6162938a..1ce7b49e 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -11,6 +11,7 @@ import gradio as gr
import torch
import tqdm
+import launch
import modules.interrogate
import modules.memmon
import modules.styles
@@ -26,7 +27,7 @@ demo = None
parser = cmd_args.parser
-script_loading.preload_extensions(extensions_dir, parser)
+script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
script_loading.preload_extensions(extensions_builtin_dir, parser)
if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
--
cgit v1.2.3
From c8b55f29e2838e67bd9e394f5dbca4350ccbb68f Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Wed, 19 Jul 2023 08:27:19 +0900
Subject: missing p save_image before-highres-fix
---
modules/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/processing.py b/modules/processing.py
index 6567b3cf..b89ca5c2 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -1029,7 +1029,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
image = sd_samplers.sample_to_image(image, index, approximation=0)
info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
- images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix")
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix")
if latent_scale_mode is not None:
for i in range(samples.shape[0]):
--
cgit v1.2.3
From cb7573489670cc7a042d24285e158b797c9558b2 Mon Sep 17 00:00:00 2001
From: yfzhou
Date: Wed, 19 Jul 2023 17:53:28 +0800
Subject: =?UTF-8?q?=E3=80=90bug=E3=80=91reload=20altclip=20model=20error?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When using BertSeriesModelWithTransformation as the cond_stage_model, the undo_hijack should be performed using the FrozenXLMREmbedderWithCustomWords type; otherwise, it will result in a failed model reload.
---
modules/sd_hijack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 3b6f95ce..928233ab 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -203,7 +203,7 @@ class StableDiffusionModelHijack:
ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward
def undo_hijack(self, m):
- if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
+ if type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
--
cgit v1.2.3
From 4334d25978ded517a76359e9e92b8101610cc35f Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Wed, 19 Jul 2023 15:49:31 +0300
Subject: bugfix: model name was added together with directory name to infotext
and to [model_name] filename pattern
---
modules/images.py | 2 +-
modules/processing.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
(limited to 'modules')
diff --git a/modules/images.py b/modules/images.py
index fb5d2e75..38aa933d 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -363,7 +363,7 @@ class FilenameGenerator:
'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
- 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.model_name, replace_spaces=False),
+ 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False),
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime