From d380f939b5ab6a28bed6d1de3cf283e194255963 Mon Sep 17 00:00:00 2001
From: Leon Feng <523684+leon0707@users.noreply.github.com>
Date: Sat, 15 Jul 2023 23:31:59 -0400
Subject: Update shared.py
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/shared.py b/modules/shared.py
index a0862055..564799bc 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -394,7 +394,7 @@ options_templates.update(options_section(('training', "Training"), {
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
+ "sd_model_checkpoint": OptionInfo("", "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
--
cgit v1.2.3
From 35510f7529dc05437a82496187ef06b852be9ab1 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 17 Jul 2023 10:06:02 +0300
Subject: add alias to lyco network read networks from LyCORIS dir if it exists
add credits
---
README.md | 1 +
extensions-builtin/Lora/networks.py | 3 ++-
extensions-builtin/Lora/scripts/lora_script.py | 5 ++++-
modules/extra_networks.py | 16 ++++++++++++++--
4 files changed, 21 insertions(+), 4 deletions(-)
(limited to 'modules')
diff --git a/README.md b/README.md
index e6d8e4bd..b796d150 100644
--- a/README.md
+++ b/README.md
@@ -168,5 +168,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al
- Security advice - RyotaK
- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC
- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd
+- LyCORIS - KohakuBlueleaf
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You)
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 401430e8..7b4c0312 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -11,7 +11,7 @@ import network_full
import torch
from typing import Union
-from modules import shared, devices, sd_models, errors, scripts, sd_hijack
+from modules import shared, devices, sd_models, errors, scripts, sd_hijack, paths
module_types = [
network_lora.ModuleTypeLora(),
@@ -399,6 +399,7 @@ def list_available_networks():
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
+ candidates += list(shared.walk_files(os.path.join(paths.models_path, "LyCORIS"), allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
for filename in candidates:
if os.path.isdir(filename):
continue
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index 4c75821e..f478f718 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -22,7 +22,10 @@ def unload():
def before_ui():
ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
- extra_networks.register_extra_network(extra_networks_lora.ExtraNetworkLora())
+
+ extra_network = extra_networks_lora.ExtraNetworkLora()
+ extra_networks.register_extra_network(extra_network)
+ extra_networks.register_extra_network_alias(extra_network, "lyco")
if not hasattr(torch.nn, 'Linear_forward_before_network'):
diff --git a/modules/extra_networks.py b/modules/extra_networks.py
index 41799b0a..6ae07e91 100644
--- a/modules/extra_networks.py
+++ b/modules/extra_networks.py
@@ -4,16 +4,22 @@ from collections import defaultdict
from modules import errors
extra_network_registry = {}
+extra_network_aliases = {}
def initialize():
extra_network_registry.clear()
+ extra_network_aliases.clear()
def register_extra_network(extra_network):
extra_network_registry[extra_network.name] = extra_network
+def register_extra_network_alias(extra_network, alias):
+ extra_network_aliases[alias] = extra_network
+
+
def register_default_extra_networks():
from modules.extra_networks_hypernet import ExtraNetworkHypernet
register_extra_network(ExtraNetworkHypernet())
@@ -82,20 +88,26 @@ def activate(p, extra_network_data):
"""call activate for extra networks in extra_network_data in specified order, then call
activate for all remaining registered networks with an empty argument list"""
+ activated = []
+
for extra_network_name, extra_network_args in extra_network_data.items():
extra_network = extra_network_registry.get(extra_network_name, None)
+
+ if extra_network is None:
+ extra_network = extra_network_aliases.get(extra_network_name, None)
+
if extra_network is None:
print(f"Skipping unknown extra network: {extra_network_name}")
continue
try:
extra_network.activate(p, extra_network_args)
+ activated.append(extra_network)
except Exception as e:
errors.display(e, f"activating extra network {extra_network_name} with arguments {extra_network_args}")
for extra_network_name, extra_network in extra_network_registry.items():
- args = extra_network_data.get(extra_network_name, None)
- if args is not None:
+ if extra_network in activated:
continue
try:
--
cgit v1.2.3
From 699108bfbb05c2a7d2ee4a2c7abcfaa0a244d8ea Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 17 Jul 2023 18:56:14 +0300
Subject: hide cards for networks of incompatible stable diffusion version in
Lora extra networks interface
---
extensions-builtin/Lora/network.py | 20 +++++++++++++
extensions-builtin/Lora/scripts/lora_script.py | 2 ++
extensions-builtin/Lora/ui_edit_user_metadata.py | 20 +++++++++----
extensions-builtin/Lora/ui_extra_networks_lora.py | 34 +++++++++++++++++++----
html/extra-networks-card.html | 2 +-
javascript/extraNetworks.js | 2 +-
modules/sd_models.py | 3 ++
modules/ui_extra_networks.py | 3 +-
modules/ui_extra_networks_user_metadata.py | 7 ++++-
style.css | 6 +++-
10 files changed, 84 insertions(+), 15 deletions(-)
(limited to 'modules')
diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py
index fe42dbdd..8ecfa29a 100644
--- a/extensions-builtin/Lora/network.py
+++ b/extensions-builtin/Lora/network.py
@@ -1,5 +1,6 @@
import os
from collections import namedtuple
+import enum
from modules import sd_models, cache, errors, hashes, shared
@@ -8,6 +9,13 @@ NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
+class SdVersion(enum.Enum):
+ Unknown = 1
+ SD1 = 2
+ SD2 = 3
+ SDXL = 4
+
+
class NetworkOnDisk:
def __init__(self, name, filename):
self.name = name
@@ -44,6 +52,18 @@ class NetworkOnDisk:
''
)
+ self.sd_version = self.detect_version()
+
+ def detect_version(self):
+ if str(self.metadata.get('ss_base_model_version', "")).startswith("sdxl_"):
+ return SdVersion.SDXL
+ elif str(self.metadata.get('ss_v2', "")) == "True":
+ return SdVersion.SD2
+ elif len(self.metadata):
+ return SdVersion.SD1
+
+ return SdVersion.Unknown
+
def set_hash(self, v):
self.hash = v
self.shorthash = self.hash[0:12]
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index f478f718..cd28afc9 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -63,6 +63,8 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
+ "lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
+ "lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
}))
diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py
index 354a1d68..c8730443 100644
--- a/extensions-builtin/Lora/ui_edit_user_metadata.py
+++ b/extensions-builtin/Lora/ui_edit_user_metadata.py
@@ -46,14 +46,17 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
def __init__(self, ui, tabname, page):
super().__init__(ui, tabname, page)
+ self.select_sd_version = None
+
self.taginfo = None
self.edit_activation_text = None
self.slider_preferred_weight = None
self.edit_notes = None
- def save_lora_user_metadata(self, name, desc, activation_text, preferred_weight, notes):
+ def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, notes):
user_metadata = self.get_user_metadata(name)
user_metadata["description"] = desc
+ user_metadata["sd version"] = sd_version
user_metadata["activation text"] = activation_text
user_metadata["preferred weight"] = preferred_weight
user_metadata["notes"] = notes
@@ -112,11 +115,11 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
gradio_tags = [(tag, str(count)) for tag, count in tags[0:24]]
return [
- *values[0:4],
+ *values[0:5],
+ item.get("sd_version", "Unknown"),
gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False),
user_metadata.get('activation text', ''),
float(user_metadata.get('preferred weight', 0.0)),
- user_metadata.get('notes', ''),
gr.update(visible=True if tags else False),
gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False),
]
@@ -141,10 +144,15 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
return ", ".join(sorted(res))
+ def create_extra_default_items_in_left_column(self):
+
+ # this would be a lot better as gr.Radio but I can't make it work
+ self.select_sd_version = gr.Dropdown(['SD1', 'SD2', 'SDXL', 'Unknown'], value='Unknown', label='Stable Diffusion version', interactive=True)
+
def create_editor(self):
self.create_default_editor_elems()
- self.taginfo = gr.HighlightedText(label="Tags")
+ self.taginfo = gr.HighlightedText(label="Training dataset tags")
self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora")
self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01)
@@ -178,10 +186,11 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.edit_description,
self.html_filedata,
self.html_preview,
+ self.edit_notes,
+ self.select_sd_version,
self.taginfo,
self.edit_activation_text,
self.slider_preferred_weight,
- self.edit_notes,
row_random_prompt,
random_prompt,
]
@@ -192,6 +201,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
edited_components = [
self.edit_description,
+ self.select_sd_version,
self.edit_activation_text,
self.slider_preferred_weight,
self.edit_notes,
diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py
index b6171a26..4b32098b 100644
--- a/extensions-builtin/Lora/ui_extra_networks_lora.py
+++ b/extensions-builtin/Lora/ui_extra_networks_lora.py
@@ -1,7 +1,9 @@
import os
+
+import network
import networks
-from modules import shared, ui_extra_networks
+from modules import shared, ui_extra_networks, paths
from modules.ui_extra_networks import quote_js
from ui_edit_user_metadata import LoraUserMetadataEditor
@@ -13,14 +15,13 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
def refresh(self):
networks.list_available_networks()
- def create_item(self, name, index=None):
+ def create_item(self, name, index=None, enable_filter=True):
lora_on_disk = networks.available_networks.get(name)
path, ext = os.path.splitext(lora_on_disk.filename)
alias = lora_on_disk.get_alias()
- # in 1.5 filename changes to be full filename instead of path without extension, and metadata is dict instead of json string
item = {
"name": name,
"filename": lora_on_disk.filename,
@@ -30,6 +31,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
"local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": lora_on_disk.metadata,
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
+ "sd_version": lora_on_disk.sd_version.name,
}
self.read_user_metadata(item)
@@ -40,15 +42,37 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
if activation_text:
item["prompt"] += " + " + quote_js(" " + activation_text)
+ sd_version = item["user_metadata"].get("sd version")
+ if sd_version in network.SdVersion.__members__:
+ item["sd_version"] = sd_version
+ sd_version = network.SdVersion[sd_version]
+ else:
+ sd_version = lora_on_disk.sd_version
+
+ if shared.opts.lora_show_all or not enable_filter:
+ pass
+ elif sd_version == network.SdVersion.Unknown:
+ model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1
+ if model_version.name in shared.opts.lora_hide_unknown_for_versions:
+ return None
+ elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL:
+ return None
+ elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2:
+ return None
+ elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1:
+ return None
+
return item
def list_items(self):
for index, name in enumerate(networks.available_networks):
item = self.create_item(name, index)
- yield item
+
+ if item is not None:
+ yield item
def allowed_directories_for_previews(self):
- return [shared.cmd_opts.lora_dir]
+ return [shared.cmd_opts.lora_dir, os.path.join(paths.models_path, "LyCORIS")]
def create_user_metadata_editor(self, ui, tabname):
return LoraUserMetadataEditor(ui, tabname, self)
diff --git a/html/extra-networks-card.html b/html/extra-networks-card.html
index eb8b1a67..39674666 100644
--- a/html/extra-networks-card.html
+++ b/html/extra-networks-card.html
@@ -1,8 +1,8 @@
{background_image}
- {edit_button}
{metadata_button}
+ {edit_button}
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js
index e453094a..5582a6e5 100644
--- a/javascript/extraNetworks.js
+++ b/javascript/extraNetworks.js
@@ -213,7 +213,7 @@ function popup(contents) {
globalPopupInner.classList.add('global-popup-inner');
globalPopup.appendChild(globalPopupInner);
- gradioApp().appendChild(globalPopup);
+ gradioApp().querySelector('.main').appendChild(globalPopup);
}
globalPopupInner.innerHTML = '';
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 729f03d7..4d9382dd 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -290,6 +290,9 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
model.is_sdxl = hasattr(model, 'conditioner')
+ model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
+ model.is_sd1 = not model.is_sdxl and not model.is_sd2
+
if model.is_sdxl:
sd_models_xl.extend_sdxl(model)
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index 6c73998f..49612298 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -62,7 +62,8 @@ def get_single_card(page: str = "", tabname: str = "", name: str = ""):
page = next(iter([x for x in extra_pages if x.name == page]), None)
try:
- item = page.create_item(name)
+ item = page.create_item(name, enable_filter=False)
+ page.items[name] = item
except Exception as e:
errors.display(e, "creating item for extra network")
item = page.items.get(name)
diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py
index 01ff4e4b..63d4b503 100644
--- a/modules/ui_extra_networks_user_metadata.py
+++ b/modules/ui_extra_networks_user_metadata.py
@@ -42,6 +42,9 @@ class UserMetadataEditor:
return user_metadata
+ def create_extra_default_items_in_left_column(self):
+ pass
+
def create_default_editor_elems(self):
with gr.Row():
with gr.Column(scale=2):
@@ -49,6 +52,8 @@ class UserMetadataEditor:
self.edit_description = gr.Textbox(label="Description", lines=4)
self.html_filedata = gr.HTML()
+ self.create_extra_default_items_in_left_column()
+
with gr.Column(scale=1, min_width=0):
self.html_preview = gr.HTML()
@@ -111,7 +116,7 @@ class UserMetadataEditor:
table = '
'
- return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', ''),
+ return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', '')
def write_user_metadata(self, name, metadata):
item = self.page.items.get(name, {})
diff --git a/style.css b/style.css
index 8a66c3d2..e249cfd3 100644
--- a/style.css
+++ b/style.css
@@ -841,7 +841,7 @@ footer {
.extra-network-cards .card .card-button {
text-shadow: 2px 2px 3px black;
- padding: 0.25em;
+ padding: 0.25em 0.1em;
font-size: 200%;
width: 1.5em;
}
@@ -957,6 +957,10 @@ div.block.gradio-box.edit-user-metadata {
text-align: left;
}
+.edit-user-metadata .file-metadata th, .edit-user-metadata .file-metadata td{
+ padding: 0.3em 1em;
+}
+
.edit-user-metadata .wrap.translucent{
background: var(--body-background-fill);
}
--
cgit v1.2.3
From a99d5708e6d603e8f7cfd1b8c6595f8026219ba0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 17 Jul 2023 20:10:24 +0300
Subject: skip installing packages with pip if theyare already installed record
time it took to launch
---
modules/launch_utils.py | 46 +++++++++++++++++++++++++++++++++++++++++++++-
requirements_versions.txt | 4 ++--
webui.py | 9 +++++----
3 files changed, 52 insertions(+), 7 deletions(-)
(limited to 'modules')
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 434facbc..03552bc2 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -1,4 +1,5 @@
# this scripts installs necessary requirements and launches main program in webui.py
+import re
import subprocess
import os
import sys
@@ -9,6 +10,9 @@ from functools import lru_cache
from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
+from modules import timer
+
+timer.startup_timer.record("start")
args, _ = cmd_args.parser.parse_known_args()
@@ -226,6 +230,44 @@ def run_extensions_installers(settings_file):
run_extension_installer(os.path.join(extensions_dir, dirname_extension))
+re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
+
+
+def requrements_met(requirements_file):
+ """
+ Does a simple parse of a requirements.txt file to determine if all rerqirements in it
+ are already installed. Returns True if so, False if not installed or parsing fails.
+ """
+
+ import importlib.metadata
+ import packaging.version
+
+ with open(requirements_file, "r", encoding="utf8") as file:
+ for line in file:
+ if line.strip() == "":
+ continue
+
+ m = re.match(re_requirement, line)
+ if m is None:
+ return False
+
+ package = m.group(1).strip()
+ version_required = (m.group(2) or "").strip()
+
+ if version_required == "":
+ continue
+
+ try:
+ version_installed = importlib.metadata.version(package)
+ except Exception:
+ return False
+
+ if packaging.version.parse(version_required) != packaging.version.parse(version_installed):
+ return False
+
+ return True
+
+
def prepare_environment():
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
@@ -311,7 +353,9 @@ def prepare_environment():
if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)
- run_pip(f"install -r \"{requirements_file}\"", "requirements")
+
+ if not requrements_met(requirements_file):
+ run_pip(f"install -r \"{requirements_file}\"", "requirements")
run_extensions_installers(settings_file=args.ui_settings_file)
diff --git a/requirements_versions.txt b/requirements_versions.txt
index b826bf43..d07ab456 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -8,7 +8,7 @@ einops==0.4.1
fastapi==0.94.0
gfpgan==1.3.8
gradio==3.32.0
-httpcore<=0.15
+httpcore==0.15
inflection==0.5.1
jsonmerge==1.8.0
kornia==0.6.7
@@ -17,7 +17,7 @@ numpy==1.23.5
omegaconf==2.2.3
open-clip-torch==2.20.0
piexif==1.1.3
-psutil~=5.9.5
+psutil==5.9.5
pytorch_lightning==1.9.4
realesrgan==0.3.0
resize-right==0.0.2
diff --git a/webui.py b/webui.py
index 34c2fd18..2aafc09f 100644
--- a/webui.py
+++ b/webui.py
@@ -31,21 +31,22 @@ if log_level:
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
-from modules import paths, timer, import_hook, errors, devices # noqa: F401
-
+from modules import timer
startup_timer = timer.startup_timer
+startup_timer.record("launcher")
import torch
import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
-
-
startup_timer.record("import torch")
import gradio # noqa: F401
startup_timer.record("import gradio")
+from modules import paths, timer, import_hook, errors, devices # noqa: F401
+startup_timer.record("setup paths")
+
import ldm.modules.encoders.modules # noqa: F401
startup_timer.record("import ldm")
--
cgit v1.2.3
From 40a18d38a8fcb88d1c2947a2653b52cd2085536f Mon Sep 17 00:00:00 2001
From: lambertae
Date: Tue, 18 Jul 2023 00:32:01 -0400
Subject: add restart sampler
---
modules/sd_samplers_kdiffusion.py | 70 +++++++++++++++++++++++++++++++++++++--
1 file changed, 68 insertions(+), 2 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 71581b76..c63b677c 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -1,3 +1,5 @@
+# export PIP_CACHE_DIR=/scratch/dengm/cache
+# export XDG_CACHE_HOME=/scratch/dengm/cache
from collections import deque
import torch
import inspect
@@ -30,12 +32,76 @@ samplers_k_diffusion = [
('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}),
+ ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}),
]
+
+@torch.no_grad()
+def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = {0.1: [10, 2, 2]}):
+ """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)"""
+ '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}'''
+
+ from tqdm.auto import trange, tqdm
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ step_id = 0
+
+ from k_diffusion.sampling import to_d, append_zero
+
+ def heun_step(x, old_sigma, new_sigma):
+ nonlocal step_id
+ denoised = model(x, old_sigma * s_in, **extra_args)
+ d = to_d(x, old_sigma, denoised)
+ if callback is not None:
+ callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised})
+ dt = new_sigma - old_sigma
+ if new_sigma == 0:
+ # Euler method
+ x = x + d * dt
+ else:
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, new_sigma * s_in, **extra_args)
+ d_2 = to_d(x_2, new_sigma, denoised_2)
+ d_prime = (d + d_2) / 2
+ x = x + d_prime * dt
+ step_id += 1
+ return x
+ # print(sigmas)
+ temp_list = dict()
+ for key, value in restart_list.items():
+ temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value
+ restart_list = temp_list
+
+
+ def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
+ ramp = torch.linspace(0, 1, n).to(device)
+ min_inv_rho = (sigma_min ** (1 / rho))
+ max_inv_rho = (sigma_max ** (1 / rho))
+ if isinstance(min_inv_rho, torch.Tensor):
+ min_inv_rho = min_inv_rho.to(device)
+ if isinstance(max_inv_rho, torch.Tensor):
+ max_inv_rho = max_inv_rho.to(device)
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
+ return append_zero(sigmas).to(device)
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ x = heun_step(x, sigmas[i], sigmas[i+1])
+ if i + 1 in restart_list:
+ restart_steps, restart_times, restart_max = restart_list[i + 1]
+ min_idx = i + 1
+ max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end
+ for times in range(restart_times):
+ x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5
+ for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]):
+ x = heun_step(x, old_sigma, new_sigma)
+ return x
+
samplers_data_k_diffusion = [
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
- if hasattr(k_diffusion.sampling, funcname)
+ if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler')
]
sampler_extra_params = {
@@ -245,7 +311,7 @@ class KDiffusionSampler:
self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
- self.func = getattr(k_diffusion.sampling, self.funcname)
+ self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
--
cgit v1.2.3
From 15a94d6cf7fa075c09362e73c1239692d021c559 Mon Sep 17 00:00:00 2001
From: lambertae
Date: Tue, 18 Jul 2023 00:39:26 -0400
Subject: remove useless header
---
modules/sd_samplers_kdiffusion.py | 2 --
1 file changed, 2 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index c63b677c..7888d864 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -1,5 +1,3 @@
-# export PIP_CACHE_DIR=/scratch/dengm/cache
-# export XDG_CACHE_HOME=/scratch/dengm/cache
from collections import deque
import torch
import inspect
--
cgit v1.2.3
From f0e2098f1a533c88396536282c1d6cd7d847a51c Mon Sep 17 00:00:00 2001
From: brkirch
Date: Mon, 17 Jul 2023 23:39:38 -0400
Subject: Add support for `--upcast-sampling` with SD XL
---
modules/sd_hijack_unet.py | 8 +++++++-
modules/sd_models.py | 2 +-
2 files changed, 8 insertions(+), 2 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py
index ca1daf45..2101f1a0 100644
--- a/modules/sd_hijack_unet.py
+++ b/modules/sd_hijack_unet.py
@@ -39,7 +39,10 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
if isinstance(cond, dict):
for y in cond.keys():
- cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ if isinstance(cond[y], list):
+ cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ else:
+ cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
with devices.autocast():
return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
@@ -77,3 +80,6 @@ first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devi
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
+
+CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast)
+CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 4d9382dd..5813b550 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -326,7 +326,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
timer.record("apply half()")
- devices.dtype_unet = model.model.diffusion_model.dtype
+ devices.dtype_unet = torch.float16 if model.is_sdxl and not shared.cmd_opts.no_half else model.model.diffusion_model.dtype
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
model.first_stage_model.to(devices.dtype_vae)
--
cgit v1.2.3
From 37e048a7e2356f4caebfd976351112f03856f082 Mon Sep 17 00:00:00 2001
From: lambertae
Date: Tue, 18 Jul 2023 00:55:02 -0400
Subject: fix floating error
---
modules/sd_samplers_kdiffusion.py | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 7888d864..1bb25adf 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -89,11 +89,12 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
restart_steps, restart_times, restart_max = restart_list[i + 1]
min_idx = i + 1
max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
- sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end
- for times in range(restart_times):
- x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5
- for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]):
- x = heun_step(x, old_sigma, new_sigma)
+ if max_idx < min_idx:
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end
+ for times in range(restart_times):
+ x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5
+ for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]):
+ x = heun_step(x, old_sigma, new_sigma)
return x
samplers_data_k_diffusion = [
--
cgit v1.2.3
From 7bb0fbed136c6a345b211e09102659fd89362576 Mon Sep 17 00:00:00 2001
From: lambertae
Date: Tue, 18 Jul 2023 01:02:04 -0400
Subject: code styling
---
modules/sd_samplers_kdiffusion.py | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
(limited to 'modules')
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 1bb25adf..db7013f2 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -35,17 +35,15 @@ samplers_k_diffusion = [
@torch.no_grad()
-def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = {0.1: [10, 2, 2]}):
+def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.):
"""Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)"""
'''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}'''
-
- from tqdm.auto import trange, tqdm
+ restart_list = {0.1: [10, 2, 2]}
+ from tqdm.auto import trange
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
step_id = 0
-
from k_diffusion.sampling import to_d, append_zero
-
def heun_step(x, old_sigma, new_sigma):
nonlocal step_id
denoised = model(x, old_sigma * s_in, **extra_args)
@@ -70,8 +68,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
for key, value in restart_list.items():
temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value
restart_list = temp_list
-
-
def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
ramp = torch.linspace(0, 1, n).to(device)
min_inv_rho = (sigma_min ** (1 / rho))
@@ -82,7 +78,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
max_inv_rho = max_inv_rho.to(device)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device)
-
for i in trange(len(sigmas) - 1, disable=disable):
x = heun_step(x, sigmas[i], sigmas[i+1])
if i + 1 in restart_list:
@@ -91,7 +86,8 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
if max_idx < min_idx:
sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end
- for times in range(restart_times):
+ while restart_times > 0:
+ restart_times -= 1
x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5
for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]):
x = heun_step(x, old_sigma, new_sigma)
--
cgit v1.2.3
From d6668347c8b85b11b696ac56777cc396e34ee1f9 Mon Sep 17 00:00:00 2001
From: Leon Feng
Date: Tue, 18 Jul 2023 04:19:58 -0400
Subject: remove duplicate
---
modules/textual_inversion/logging.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/textual_inversion/logging.py b/modules/textual_inversion/logging.py
index 734a4b6f..a822a7a5 100644
--- a/modules/textual_inversion/logging.py
+++ b/modules/textual_inversion/logging.py
@@ -2,7 +2,7 @@ import datetime
import json
import os
-saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "clip_grad_mode", "clip_grad_value", "gradient_step", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file", "gradient_step", "latent_sampling_method"}
+saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "clip_grad_mode", "clip_grad_value", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file", "gradient_step", "latent_sampling_method"}
saved_params_ti = {"embedding_name", "num_vectors_per_token", "save_embedding_every", "save_image_with_stored_embedding"}
saved_params_hypernet = {"hypernetwork_name", "layer_structure", "activation_func", "weight_init", "add_layer_norm", "use_dropout", "save_hypernetwork_every"}
saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet
--
cgit v1.2.3
From 420cc8f68e6aca8a3a0f42ee0e626a6b03712763 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 11:48:40 +0300
Subject: also make None a valid option for options API for #11854
---
modules/api/models.py | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
(limited to 'modules')
diff --git a/modules/api/models.py b/modules/api/models.py
index b5683071..b55fa728 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,4 +1,6 @@
import inspect
+import types
+
from pydantic import BaseModel, Field, create_model
from typing import Any, Optional
from typing_extensions import Literal
@@ -207,11 +209,14 @@ class PreprocessResponse(BaseModel):
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
+ if key == 'sd_model_checkpoint':
+ value = None
optType = opts.typemap.get(type(metadata.default), type(value))
- if (metadata is not None):
- fields.update({key: (Optional[optType], Field(
- default=metadata.default ,description=metadata.label))})
+ if optType == types.NoneType:
+ pass
+ elif metadata is not None:
+ fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
else:
fields.update({key: (Optional[optType], Field())})
--
cgit v1.2.3
From 3c570421d3a2eb24528b5f5bb615dcb0c7717e4a Mon Sep 17 00:00:00 2001
From: wfjsw
Date: Tue, 18 Jul 2023 19:00:16 +0800
Subject: move start timer
---
launch.py | 4 +++-
modules/api/models.py | 2 +-
modules/launch_utils.py | 3 ---
3 files changed, 4 insertions(+), 5 deletions(-)
(limited to 'modules')
diff --git a/launch.py b/launch.py
index b103c8f3..e9667c88 100644
--- a/launch.py
+++ b/launch.py
@@ -1,4 +1,4 @@
-from modules import launch_utils
+from modules import launch_utils, timer
args = launch_utils.args
@@ -25,6 +25,8 @@ start = launch_utils.start
def main():
+ timer.startup_timer.record("start")
+
if not args.skip_prepare_environment:
prepare_environment()
diff --git a/modules/api/models.py b/modules/api/models.py
index b55fa728..96cfe920 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -213,7 +213,7 @@ for key, metadata in opts.data_labels.items():
value = None
optType = opts.typemap.get(type(metadata.default), type(value))
- if optType == types.NoneType:
+ if isinstance(optType, types.NoneType):
pass
elif metadata is not None:
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 03552bc2..ea995eda 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -10,9 +10,6 @@ from functools import lru_cache
from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
-from modules import timer
-
-timer.startup_timer.record("start")
args, _ = cmd_args.parser.parse_known_args()
--
cgit v1.2.3
From ed82f1c5f1677c85298f4d2c6c030a5551682c71 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 15:55:23 +0300
Subject: lint
---
modules/api/models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/api/models.py b/modules/api/models.py
index b55fa728..96cfe920 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -213,7 +213,7 @@ for key, metadata in opts.data_labels.items():
value = None
optType = opts.typemap.get(type(metadata.default), type(value))
- if optType == types.NoneType:
+ if isinstance(optType, types.NoneType):
pass
elif metadata is not None:
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
--
cgit v1.2.3
From 66c5f1bb1556a2d86d9f11aeb92f83d4a09832cc Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 17:41:37 +0300
Subject: return sd_model_checkpoint to None
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/shared.py b/modules/shared.py
index a256d090..6162938a 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -409,7 +409,7 @@ options_templates.update(options_section(('training', "Training"), {
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo("", "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
--
cgit v1.2.3
From b270ded268c92950a35a7a326da54496ef4151c8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 18:10:04 +0300
Subject: fix the issue with /sdapi/v1/options failing (this time for sure!)
fix automated tests downloading CLIP model
---
.github/workflows/run_tests.yaml | 1 +
modules/api/models.py | 6 ++----
modules/cmd_args.py | 1 +
modules/sd_models.py | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
(limited to 'modules')
diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml
index e9370cc0..3dafaf8d 100644
--- a/.github/workflows/run_tests.yaml
+++ b/.github/workflows/run_tests.yaml
@@ -41,6 +41,7 @@ jobs:
--skip-prepare-environment
--skip-torch-cuda-test
--test-server
+ --do-not-download-clip
--no-half
--disable-opt-split-attention
--use-cpu all
diff --git a/modules/api/models.py b/modules/api/models.py
index 96cfe920..4cd20a92 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -209,11 +209,9 @@ class PreprocessResponse(BaseModel):
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
- if key == 'sd_model_checkpoint':
- value = None
- optType = opts.typemap.get(type(metadata.default), type(value))
+ optType = opts.typemap.get(type(metadata.default), type(metadata.default))
- if isinstance(optType, types.NoneType):
+ if metadata.default is None:
pass
elif metadata is not None:
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index ae78f469..e401f641 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -15,6 +15,7 @@ parser.add_argument("--update-check", action='store_true', help="launch.py argum
parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
+parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 5813b550..fb31a793 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -494,7 +494,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
sd_model = None
try:
- with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
+ with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip):
sd_model = instantiate_from_config(sd_config.model)
except Exception:
pass
--
cgit v1.2.3
From 7f7db1700bda40ba3171a49b6a4ef38f868b7d0a Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 18:16:23 +0300
Subject: linter fix
---
modules/api/models.py | 1 -
1 file changed, 1 deletion(-)
(limited to 'modules')
diff --git a/modules/api/models.py b/modules/api/models.py
index 4cd20a92..bf97b1a3 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,5 +1,4 @@
import inspect
-import types
from pydantic import BaseModel, Field, create_model
from typing import Any, Optional
--
cgit v1.2.3
From 136c8859a49a35cbffe269aafc0bbdfca0b3561d Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 18 Jul 2023 20:11:30 +0300
Subject: add backwards compatibility --lyco-dir-backcompat option, use that
for LyCORIS directory instead of hardcoded value prevent running preload.py
for disabled extensions
---
CHANGELOG.md | 4 +---
extensions-builtin/Lora/networks.py | 4 ++--
extensions-builtin/Lora/preload.py | 1 +
extensions-builtin/Lora/ui_extra_networks_lora.py | 4 ++--
launch.py | 1 +
modules/script_loading.py | 5 +++--
modules/shared.py | 3 ++-
7 files changed, 12 insertions(+), 10 deletions(-)
(limited to 'modules')
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 007010da..792529ec 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -58,9 +58,7 @@
* fix: check fill size none zero when resize (fixes #11425)
* use submit and blur for quick settings textbox
* save img2img batch with images.save_image()
- *
-
-
+ * prevent running preload.py for disabled extensions
## 1.4.1
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 7b4c0312..af8188e3 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -11,7 +11,7 @@ import network_full
import torch
from typing import Union
-from modules import shared, devices, sd_models, errors, scripts, sd_hijack, paths
+from modules import shared, devices, sd_models, errors, scripts, sd_hijack
module_types = [
network_lora.ModuleTypeLora(),
@@ -399,7 +399,7 @@ def list_available_networks():
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
- candidates += list(shared.walk_files(os.path.join(paths.models_path, "LyCORIS"), allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
+ candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
for filename in candidates:
if os.path.isdir(filename):
continue
diff --git a/extensions-builtin/Lora/preload.py b/extensions-builtin/Lora/preload.py
index 863dc5c0..50961be3 100644
--- a/extensions-builtin/Lora/preload.py
+++ b/extensions-builtin/Lora/preload.py
@@ -4,3 +4,4 @@ from modules import paths
def preload(parser):
parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
+ parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py
index 4b32098b..3629e5c0 100644
--- a/extensions-builtin/Lora/ui_extra_networks_lora.py
+++ b/extensions-builtin/Lora/ui_extra_networks_lora.py
@@ -3,7 +3,7 @@ import os
import network
import networks
-from modules import shared, ui_extra_networks, paths
+from modules import shared, ui_extra_networks
from modules.ui_extra_networks import quote_js
from ui_edit_user_metadata import LoraUserMetadataEditor
@@ -72,7 +72,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
yield item
def allowed_directories_for_previews(self):
- return [shared.cmd_opts.lora_dir, os.path.join(paths.models_path, "LyCORIS")]
+ return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir_backcompat]
def create_user_metadata_editor(self, ui, tabname):
return LoraUserMetadataEditor(ui, tabname, self)
diff --git a/launch.py b/launch.py
index b103c8f3..1dbc4c6e 100644
--- a/launch.py
+++ b/launch.py
@@ -18,6 +18,7 @@ run_pip = launch_utils.run_pip
check_run_python = launch_utils.check_run_python
git_clone = launch_utils.git_clone
git_pull_recursive = launch_utils.git_pull_recursive
+list_extensions = launch_utils.list_extensions
run_extension_installer = launch_utils.run_extension_installer
prepare_environment = launch_utils.prepare_environment
configure_for_tests = launch_utils.configure_for_tests
diff --git a/modules/script_loading.py b/modules/script_loading.py
index 306a1f35..0d55f193 100644
--- a/modules/script_loading.py
+++ b/modules/script_loading.py
@@ -12,11 +12,12 @@ def load_module(path):
return module
-def preload_extensions(extensions_dir, parser):
+def preload_extensions(extensions_dir, parser, extension_list=None):
if not os.path.isdir(extensions_dir):
return
- for dirname in sorted(os.listdir(extensions_dir)):
+ extensions = extension_list if extension_list is not None else os.listdir(extensions_dir)
+ for dirname in sorted(extensions):
preload_script = os.path.join(extensions_dir, dirname, "preload.py")
if not os.path.isfile(preload_script):
continue
diff --git a/modules/shared.py b/modules/shared.py
index 6162938a..1ce7b49e 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -11,6 +11,7 @@ import gradio as gr
import torch
import tqdm
+import launch
import modules.interrogate
import modules.memmon
import modules.styles
@@ -26,7 +27,7 @@ demo = None
parser = cmd_args.parser
-script_loading.preload_extensions(extensions_dir, parser)
+script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
script_loading.preload_extensions(extensions_builtin_dir, parser)
if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
--
cgit v1.2.3
From c8b55f29e2838e67bd9e394f5dbca4350ccbb68f Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Wed, 19 Jul 2023 08:27:19 +0900
Subject: missing p save_image before-highres-fix
---
modules/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/processing.py b/modules/processing.py
index 6567b3cf..b89ca5c2 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -1029,7 +1029,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
image = sd_samplers.sample_to_image(image, index, approximation=0)
info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
- images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix")
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix")
if latent_scale_mode is not None:
for i in range(samples.shape[0]):
--
cgit v1.2.3
From cb7573489670cc7a042d24285e158b797c9558b2 Mon Sep 17 00:00:00 2001
From: yfzhou
Date: Wed, 19 Jul 2023 17:53:28 +0800
Subject: 【bug】reload altclip model error
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When using BertSeriesModelWithTransformation as the cond_stage_model, the undo_hijack should be performed using the FrozenXLMREmbedderWithCustomWords type; otherwise, it will result in a failed model reload.
---
modules/sd_hijack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 3b6f95ce..928233ab 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -203,7 +203,7 @@ class StableDiffusionModelHijack:
ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward
def undo_hijack(self, m):
- if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
+ if type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
--
cgit v1.2.3
From 4334d25978ded517a76359e9e92b8101610cc35f Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Wed, 19 Jul 2023 15:49:31 +0300
Subject: bugfix: model name was added together with directory name to infotext
and to [model_name] filename pattern
---
CHANGELOG.md | 1 +
modules/images.py | 2 +-
modules/processing.py | 2 +-
3 files changed, 3 insertions(+), 2 deletions(-)
(limited to 'modules')
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 792529ec..a561252c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -59,6 +59,7 @@
* use submit and blur for quick settings textbox
* save img2img batch with images.save_image()
* prevent running preload.py for disabled extensions
+ * fix: previously, model name was added together with directory name to infotext and to [model_name] filename pattern; directory name is now not included
## 1.4.1
diff --git a/modules/images.py b/modules/images.py
index fb5d2e75..38aa933d 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -363,7 +363,7 @@ class FilenameGenerator:
'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
- 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.model_name, replace_spaces=False),
+ 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False),
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime