From a68f4690307b0f94404b85513429e18ee3936589 Mon Sep 17 00:00:00 2001
From: catboxanon <122327233+catboxanon@users.noreply.github.com>
Date: Mon, 24 Jul 2023 17:54:59 -0400
Subject: Fix to parse TE in some LoRAs
---
extensions-builtin/Lora/networks.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index af8188e3..3a8cfa3b 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -161,7 +161,7 @@ def load_network(name, network_on_disk):
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts:
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
- sd_module = shared.sd_model.network_layer_mapping.get(key, None)
+ sd_module = shared.sd_model.network_layer_mapping.get(key, None) or shared.sd_model.network_layer_mapping.get(key[2:], None)
if sd_module is None:
keys_failed_to_match[key_network] = key
--
cgit v1.2.3
From d0bf509fa14babebedbaef121ef54599003aa457 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 25 Jul 2023 16:18:10 +0300
Subject: fix for #11963
---
extensions-builtin/Lora/networks.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 3a8cfa3b..17cbe1bb 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -161,7 +161,12 @@ def load_network(name, network_on_disk):
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts:
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
- sd_module = shared.sd_model.network_layer_mapping.get(key, None) or shared.sd_model.network_layer_mapping.get(key[2:], None)
+ sd_module = shared.sd_model.network_layer_mapping.get(key, None)
+
+ # some SD1 Loras also have correct compvis keys
+ if sd_module is None:
+ key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")
+ sd_module = shared.sd_model.network_layer_mapping.get(key, None)
if sd_module is None:
keys_failed_to_match[key_network] = key
--
cgit v1.2.3
From 91a131aa6ca26d35f7879d8240e4f6e321130160 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Thu, 27 Jul 2023 09:00:47 +0300
Subject: update lora extension to work with python 3.8
---
extensions-builtin/Lora/network.py | 1 +
1 file changed, 1 insertion(+)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py
index 8ecfa29a..0a18d69e 100644
--- a/extensions-builtin/Lora/network.py
+++ b/extensions-builtin/Lora/network.py
@@ -1,3 +1,4 @@
+from __future__ import annotations
import os
from collections import namedtuple
import enum
--
cgit v1.2.3
From 362789a3793025c698fa42372fd66c3c4f2d6413 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Fri, 4 Aug 2023 07:50:17 +0300
Subject: gradio 3.39
---
extensions-builtin/Lora/ui_edit_user_metadata.py | 2 +-
modules/gradio_extensons.py | 60 ++++++++++++++++++++++++
modules/scripts.py | 60 ------------------------
modules/shared.py | 3 +-
modules/ui.py | 24 +++++-----
modules/ui_checkpoint_merger.py | 2 +-
modules/ui_common.py | 2 +-
modules/ui_components.py | 2 +-
modules/ui_extensions.py | 8 ++--
modules/ui_postprocessing.py | 2 +-
requirements.txt | 2 +-
requirements_versions.txt | 2 +-
style.css | 12 ++++-
13 files changed, 95 insertions(+), 86 deletions(-)
create mode 100644 modules/gradio_extensons.py
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py
index 2ca997f7..390d9dde 100644
--- a/extensions-builtin/Lora/ui_edit_user_metadata.py
+++ b/extensions-builtin/Lora/ui_edit_user_metadata.py
@@ -167,7 +167,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False)
with gr.Column(scale=1, min_width=120):
- generate_random_prompt = gr.Button('Generate').style(full_width=True, size="lg")
+ generate_random_prompt = gr.Button('Generate', size="lg", scale=1)
self.edit_notes = gr.TextArea(label='Notes', lines=4)
diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py
new file mode 100644
index 00000000..5af7fd8e
--- /dev/null
+++ b/modules/gradio_extensons.py
@@ -0,0 +1,60 @@
+import gradio as gr
+
+from modules import scripts
+
+def add_classes_to_gradio_component(comp):
+ """
+ this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
+ """
+
+ comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
+
+ if getattr(comp, 'multiselect', False):
+ comp.elem_classes.append('multiselect')
+
+
+def IOComponent_init(self, *args, **kwargs):
+ self.webui_tooltip = kwargs.pop('tooltip', None)
+
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.before_component(self, **kwargs)
+
+ scripts.script_callbacks.before_component_callback(self, **kwargs)
+
+ res = original_IOComponent_init(self, *args, **kwargs)
+
+ add_classes_to_gradio_component(self)
+
+ scripts.script_callbacks.after_component_callback(self, **kwargs)
+
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.after_component(self, **kwargs)
+
+ return res
+
+
+def Block_get_config(self):
+ config = original_Block_get_config(self)
+
+ webui_tooltip = getattr(self, 'webui_tooltip', None)
+ if webui_tooltip:
+ config["webui_tooltip"] = webui_tooltip
+
+ return config
+
+
+def BlockContext_init(self, *args, **kwargs):
+ res = original_BlockContext_init(self, *args, **kwargs)
+
+ add_classes_to_gradio_component(self)
+
+ return res
+
+
+original_IOComponent_init = gr.components.IOComponent.__init__
+original_Block_get_config = gr.blocks.Block.get_config
+original_BlockContext_init = gr.blocks.BlockContext.__init__
+
+gr.components.IOComponent.__init__ = IOComponent_init
+gr.blocks.Block.get_config = Block_get_config
+gr.blocks.BlockContext.__init__ = BlockContext_init
diff --git a/modules/scripts.py b/modules/scripts.py
index edf7347e..f7d060aa 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -631,63 +631,3 @@ def reload_script_body_only():
reload_scripts = load_scripts # compatibility alias
-
-
-def add_classes_to_gradio_component(comp):
- """
- this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
- """
-
- comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
-
- if getattr(comp, 'multiselect', False):
- comp.elem_classes.append('multiselect')
-
-
-
-def IOComponent_init(self, *args, **kwargs):
- self.webui_tooltip = kwargs.pop('tooltip', None)
-
- if scripts_current is not None:
- scripts_current.before_component(self, **kwargs)
-
- script_callbacks.before_component_callback(self, **kwargs)
-
- res = original_IOComponent_init(self, *args, **kwargs)
-
- add_classes_to_gradio_component(self)
-
- script_callbacks.after_component_callback(self, **kwargs)
-
- if scripts_current is not None:
- scripts_current.after_component(self, **kwargs)
-
- return res
-
-
-def Block_get_config(self):
- config = original_Block_get_config(self)
-
- webui_tooltip = getattr(self, 'webui_tooltip', None)
- if webui_tooltip:
- config["webui_tooltip"] = webui_tooltip
-
- return config
-
-
-original_IOComponent_init = gr.components.IOComponent.__init__
-original_Block_get_config = gr.components.Block.get_config
-gr.components.IOComponent.__init__ = IOComponent_init
-gr.components.Block.get_config = Block_get_config
-
-
-def BlockContext_init(self, *args, **kwargs):
- res = original_BlockContext_init(self, *args, **kwargs)
-
- add_classes_to_gradio_component(self)
-
- return res
-
-
-original_BlockContext_init = gr.blocks.BlockContext.__init__
-gr.blocks.BlockContext.__init__ = BlockContext_init
diff --git a/modules/shared.py b/modules/shared.py
index 7103b4ca..cec030f7 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -385,7 +385,8 @@ options_templates.update(options_section(('face-restoration', "Face restoration"
}))
options_templates.update(options_section(('system', "System"), {
- "show_warnings": OptionInfo(False, "Show warnings in console."),
+ "show_warnings": OptionInfo(False, "Show warnings in console.").needs_restart(),
+ "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_restart(),
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
diff --git a/modules/ui.py b/modules/ui.py
index 03306ba9..822a7660 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -12,6 +12,7 @@ import numpy as np
from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
+from modules import gradio_extensons # noqa: F401
from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path
@@ -34,6 +35,7 @@ from modules.generation_parameters_copypaste import image_from_url_text
create_setting_component = ui_settings.create_setting_component
warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning)
+warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else "ignore", category=gr.deprecation.GradioDeprecationWarning)
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
@@ -146,7 +148,6 @@ def interrogate_deepbooru(image):
def create_seed_inputs(target_interface):
with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed")
- seed.style(container=False)
random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed')
reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed')
@@ -158,7 +159,6 @@ def create_seed_inputs(target_interface):
with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed")
- subseed.style(container=False)
random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed")
reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed")
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength")
@@ -408,7 +408,7 @@ def create_ui():
from modules import ui_extra_networks
extra_networks_ui = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'txt2img')
- with gr.Row().style(equal_height=False):
+ with gr.Row(equal_height=False):
with gr.Column(variant='compact', elem_id="txt2img_settings"):
scripts.scripts_txt2img.prepare_ui()
@@ -636,7 +636,7 @@ def create_ui():
from modules import ui_extra_networks
extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'img2img')
- with FormRow().style(equal_height=False):
+ with FormRow(equal_height=False):
with gr.Column(variant='compact', elem_id="img2img_settings"):
copy_image_buttons = []
copy_image_destinations = {}
@@ -658,19 +658,19 @@ def create_ui():
img2img_selected_tab = gr.State(0)
with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
- init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height)
+ init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height)
add_copy_image_controls('img2img', init_img)
with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
- sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
+ sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height)
add_copy_image_controls('sketch', sketch)
with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
- init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
+ init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height)
add_copy_image_controls('inpaint', init_img_with_mask)
with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
- inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
+ inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height)
inpaint_color_sketch_orig = gr.State(None)
add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
@@ -993,7 +993,7 @@ def create_ui():
ui_postprocessing.create_ui()
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
- with gr.Row().style(equal_height=False):
+ with gr.Row(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
@@ -1018,10 +1018,10 @@ def create_ui():
modelmerger_ui = ui_checkpoint_merger.UiCheckpointMerger()
with gr.Blocks(analytics_enabled=False) as train_interface:
- with gr.Row().style(equal_height=False):
+ with gr.Row(equal_height=False):
gr.HTML(value="
See wiki for detailed explanation.
")
- with gr.Row(variant="compact").style(equal_height=False):
+ with gr.Row(variant="compact", equal_height=False):
with gr.Tabs(elem_id="train_tabs"):
with gr.Tab(label="Create embedding", id="create_embedding"):
@@ -1181,7 +1181,7 @@ def create_ui():
with gr.Column(elem_id='ti_gallery_container'):
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
- gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
+ gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery', columns=4)
gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py
index 4863d861..f9c5dd6b 100644
--- a/modules/ui_checkpoint_merger.py
+++ b/modules/ui_checkpoint_merger.py
@@ -29,7 +29,7 @@ def modelmerger(*args):
class UiCheckpointMerger:
def __init__(self):
with gr.Blocks(analytics_enabled=False) as modelmerger_interface:
- with gr.Row().style(equal_height=False):
+ with gr.Row(equal_height=False):
with gr.Column(variant='compact'):
self.interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description")
diff --git a/modules/ui_common.py b/modules/ui_common.py
index ba75fa73..eefe0c0e 100644
--- a/modules/ui_common.py
+++ b/modules/ui_common.py
@@ -134,7 +134,7 @@ Requested path was: {f}
with gr.Column(variant='panel', elem_id=f"{tabname}_results"):
with gr.Group(elem_id=f"{tabname}_gallery_container"):
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(columns=4)
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4)
generation_info = None
with gr.Column():
diff --git a/modules/ui_components.py b/modules/ui_components.py
index 64451df7..8f8a7088 100644
--- a/modules/ui_components.py
+++ b/modules/ui_components.py
@@ -35,7 +35,7 @@ class FormColumn(FormComponent, gr.Column):
class FormGroup(FormComponent, gr.Group):
- """Same as gr.Row but fits inside gradio forms"""
+ """Same as gr.Group but fits inside gradio forms"""
def get_block_name(self):
return "group"
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index bd28bfcf..15a8b0bf 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -533,8 +533,8 @@ def create_ui():
apply = gr.Button(value=apply_label, variant="primary")
check = gr.Button(value="Check for updates")
extensions_disable_all = gr.Radio(label="Disable all extensions", choices=["none", "extra", "all"], value=shared.opts.disable_all_extensions, elem_id="extensions_disable_all")
- extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False)
- extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False)
+ extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False, container=False)
+ extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False, container=False)
html = ""
@@ -569,7 +569,7 @@ def create_ui():
with gr.Row():
refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary")
extensions_index_url = os.environ.get('WEBUI_EXTENSIONS_INDEX', "https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json")
- available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL").style(container=False)
+ available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL", container=False)
extension_to_install = gr.Text(elem_id="extension_to_install", visible=False)
install_extension_button = gr.Button(elem_id="install_extension_button", visible=False)
@@ -578,7 +578,7 @@ def create_ui():
sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order",'update time', 'create time', "stars"], type="index")
with gr.Row():
- search_extensions_text = gr.Text(label="Search").style(container=False)
+ search_extensions_text = gr.Text(label="Search", container=False)
install_result = gr.HTML()
available_extensions_table = gr.HTML()
diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py
index c7dc1154..802e1ce7 100644
--- a/modules/ui_postprocessing.py
+++ b/modules/ui_postprocessing.py
@@ -6,7 +6,7 @@ import modules.generation_parameters_copypaste as parameters_copypaste
def create_ui():
tab_index = gr.State(value=0)
- with gr.Row().style(equal_height=False, variant='compact'):
+ with gr.Row(equal_height=False, variant='compact'):
with gr.Column(variant='compact'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single:
diff --git a/requirements.txt b/requirements.txt
index b3f8a7f4..afdc6ee2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,7 +7,7 @@ blendmodes
clean-fid
einops
gfpgan
-gradio==3.32.0
+gradio==3.39.0
inflection
jsonmerge
kornia
diff --git a/requirements_versions.txt b/requirements_versions.txt
index d07ab456..82b8732d 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -7,7 +7,7 @@ clean-fid==0.1.35
einops==0.4.1
fastapi==0.94.0
gfpgan==1.3.8
-gradio==3.32.0
+gradio==3.39.0
httpcore==0.15
inflection==0.5.1
jsonmerge==1.8.0
diff --git a/style.css b/style.css
index cf8470e4..86b4f61e 100644
--- a/style.css
+++ b/style.css
@@ -8,6 +8,7 @@
--checkbox-label-gap: 0.25em 0.1em;
--section-header-text-size: 12pt;
--block-background-fill: transparent;
+
}
.block.padded:not(.gradio-accordion) {
@@ -42,7 +43,8 @@ div.form{
.block.gradio-radio,
.block.gradio-checkboxgroup,
.block.gradio-number,
-.block.gradio-colorpicker
+.block.gradio-colorpicker,
+div.gradio-group
{
border-width: 0 !important;
box-shadow: none !important;
@@ -133,6 +135,11 @@ a{
cursor: pointer;
}
+div.styler{
+ border: none;
+ background: var(--background-fill-primary);
+}
+
/* general styled components */
@@ -164,7 +171,7 @@ a{
.checkboxes-row > div{
flex: 0;
white-space: nowrap;
- min-width: auto;
+ min-width: auto !important;
}
button.custom-button{
@@ -388,6 +395,7 @@ div#extras_scale_to_tab div.form{
#quicksettings > div, #quicksettings > fieldset{
max-width: 24em;
min-width: 24em;
+ width: 24em;
padding: 0;
border: none;
box-shadow: none;
--
cgit v1.2.3
From 7a64601428378c30e92efc00af7729db1b22dfb0 Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Sat, 5 Aug 2023 14:21:28 +0900
Subject: need Reload UI not Restart
---
.../scripts/extra_options_section.py | 4 +-
modules/shared.py | 44 +++++++++++-----------
2 files changed, 25 insertions(+), 23 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
index a05e10d8..7bb0a1bb 100644
--- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py
+++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
@@ -43,6 +43,6 @@ class ExtraOptionsSection(scripts.Script):
shared.options_templates.update(shared.options_section(('ui', "User interface"), {
- "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(),
- "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion")
+ "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_reload_ui(),
+ "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion").needs_restart()
}))
diff --git a/modules/shared.py b/modules/shared.py
index 8245250a..fb317972 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -286,7 +286,9 @@ class OptionInfo:
self.comment_after += " (requires restart)"
return self
-
+ def needs_reload_ui(self):
+ self.comment_after += " (requires Reload UI)"
+ return self
def options_section(section_identifier, options_dict):
@@ -392,8 +394,8 @@ options_templates.update(options_section(('face-restoration', "Face restoration"
}))
options_templates.update(options_section(('system', "System"), {
- "show_warnings": OptionInfo(False, "Show warnings in console.").needs_restart(),
- "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_restart(),
+ "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(),
+ "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(),
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
@@ -427,7 +429,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
"sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
"sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
- "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_restart(),
+ "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(),
"enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
@@ -451,10 +453,10 @@ options_templates.update(options_section(('img2img', "img2img"), {
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"),
"img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}),
- "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(),
- "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_restart(),
- "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_restart(),
- "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_restart(),
+ "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(),
+ "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(),
+ "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(),
+ "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(),
"return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
"return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
}))
@@ -503,15 +505,15 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
"extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
- "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(),
+ "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(),
"textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
"textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks),
}))
options_templates.update(options_section(('ui', "User interface"), {
- "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(),
- "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(),
+ "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
+ "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_reload_ui(),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
@@ -521,19 +523,19 @@ options_templates.update(options_section(('ui', "User interface"), {
"js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"),
"js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
- "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_restart(),
- "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_restart(),
+ "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(),
+ "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(),
"keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
"keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"),
- "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_restart(),
- "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(),
- "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(),
- "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_restart(),
- "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_restart(),
- "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_restart(),
- "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(),
+ "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(),
+ "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(),
+ "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(),
+ "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(),
+ "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(),
+ "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(),
+ "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
}))
@@ -564,7 +566,7 @@ options_templates.update(options_section(('ui', "Live previews"), {
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
- "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_restart(),
+ "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(),
"eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"),
"eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"),
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
--
cgit v1.2.3
From 4c72377bbf227276914c4012b339f0b3da8b366b Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 7 Aug 2023 09:42:13 +0300
Subject: Options in main UI update
- correctly read values from pasted infotext
- setting for column count
- infotext paste: do not add a field to override settings if some other component is already handling it
---
.../scripts/extra_options_section.py | 39 +++++++++++++++++-----
modules/generation_parameters_copypaste.py | 5 +++
modules/shared.py | 2 +-
3 files changed, 37 insertions(+), 9 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
index 7bb0a1bb..d5c29bf2 100644
--- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py
+++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
@@ -1,5 +1,7 @@
+import math
+
import gradio as gr
-from modules import scripts, shared, ui_components, ui_settings
+from modules import scripts, shared, ui_components, ui_settings, generation_parameters_copypaste
from modules.ui_components import FormColumn
@@ -19,15 +21,33 @@ class ExtraOptionsSection(scripts.Script):
def ui(self, is_img2img):
self.comps = []
self.setting_names = []
+ self.infotext_fields = []
+
+ mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping}
with gr.Blocks() as interface:
- with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(), gr.Row():
- for setting_name in shared.opts.extra_options:
- with FormColumn():
- comp = ui_settings.create_setting_component(setting_name)
+ with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group():
+
+ row_count = math.ceil(len(shared.opts.extra_options) / shared.opts.extra_options_cols)
+
+ for row in range(row_count):
+ with gr.Row():
+ for col in range(shared.opts.extra_options_cols):
+ index = row * shared.opts.extra_options_cols + col
+ if index >= len(shared.opts.extra_options):
+ break
+
+ setting_name = shared.opts.extra_options[index]
- self.comps.append(comp)
- self.setting_names.append(setting_name)
+ with FormColumn():
+ comp = ui_settings.create_setting_component(setting_name)
+
+ self.comps.append(comp)
+ self.setting_names.append(setting_name)
+
+ setting_infotext_name = mapping.get(setting_name)
+ if setting_infotext_name is not None:
+ self.infotext_fields.append((comp, setting_infotext_name))
def get_settings_values():
return [ui_settings.get_value_for_setting(key) for key in self.setting_names]
@@ -44,5 +64,8 @@ class ExtraOptionsSection(scripts.Script):
shared.options_templates.update(shared.options_section(('ui', "User interface"), {
"extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_reload_ui(),
- "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion").needs_restart()
+ "extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(),
+ "extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui()
}))
+
+
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index e71c9601..5758e6f3 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -414,10 +414,15 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
return res
if override_settings_component is not None:
+ already_handled_fields = {key: 1 for _, key in paste_fields}
+
def paste_settings(params):
vals = {}
for param_name, setting_name in infotext_to_setting_name_mapping:
+ if param_name in already_handled_fields:
+ continue
+
v = params.get(param_name, None)
if v is None:
continue
diff --git a/modules/shared.py b/modules/shared.py
index 115e5276..4d854928 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -612,7 +612,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'),
's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'),
- 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
+ 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"),
'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"),
'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"),
--
cgit v1.2.3
From 01997f45ba089af24b03a5f614147bb0f9d8d824 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 7 Aug 2023 18:49:23 +0300
Subject: fix extra_options_section misbehaving when there's just one
extra_options element
---
.../extra-options-section/scripts/extra_options_section.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
index d5c29bf2..588b64d2 100644
--- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py
+++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
@@ -50,7 +50,8 @@ class ExtraOptionsSection(scripts.Script):
self.infotext_fields.append((comp, setting_infotext_name))
def get_settings_values():
- return [ui_settings.get_value_for_setting(key) for key in self.setting_names]
+ res = [ui_settings.get_value_for_setting(key) for key in self.setting_names]
+ return res[0] if len(res) == 1 else res
interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
--
cgit v1.2.3
From bc7906e6d62bb3ed158304c581e941862355f6de Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Tue, 8 Aug 2023 21:28:16 +0300
Subject: Ability to automatically expand a picture that does not fit in the
screen
---
.../canvas-zoom-and-pan/javascript/zoom.js | 29 +++++++++++++++++++++-
.../canvas-zoom-and-pan/scripts/hotkey_config.py | 1 +
2 files changed, 29 insertions(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 30199dcd..7369e897 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -201,7 +201,8 @@ onUiLoaded(async() => {
canvas_hotkey_overlap: "KeyO",
canvas_disabled_functions: [],
canvas_show_tooltip: true,
- canvas_blur_prompt: false
+ canvas_blur_prompt: false,
+ canvas_auto_expand: false
};
const functionMap = {
@@ -648,8 +649,34 @@ onUiLoaded(async() => {
mouseY = e.offsetY;
}
+ // Simulation of the function to put a long image into the screen.
+ // We define the size of the canvas, make a fullscreen to reveal the image, then reduce it to fit into the element.
+ // We hide the image and show it to the user when it is ready.
+ function autoExpand(e) {
+ const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
+ const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch;
+ if (canvas && isMainTab) {
+ if (canvas && parseInt(targetElement.style.width) > 862 || parseInt(canvas.width) < 862) {
+ return;
+ }
+ if (canvas) {
+ targetElement.style.visibility = "hidden";
+ setTimeout(() => {
+ fitToScreen();
+ resetZoom();
+ targetElement.style.visibility = "visible";
+ }, 10);
+ }
+ }
+ }
+
targetElement.addEventListener("mousemove", getMousePosition);
+ // Apply auto expand if enabled
+ if (hotkeysConfig.canvas_auto_expand) {
+ targetElement.addEventListener("mousemove", autoExpand);
+ }
+
// Handle events only inside the targetElement
let isKeyDownHandlerAttached = false;
diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
index 380176ce..9a51ff2a 100644
--- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
+++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
@@ -9,6 +9,7 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
+ "canvas_auto_expand": shared.OptionInfo(False, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
}))
--
cgit v1.2.3
From a74c014425d412c0588618abe0a3e19e4f8f5902 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Wed, 9 Aug 2023 00:06:51 +0300
Subject: auto-expand enable by default
---
extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
index 9a51ff2a..2d8d2d1c 100644
--- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
+++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
@@ -9,7 +9,7 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
- "canvas_auto_expand": shared.OptionInfo(False, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
+ "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
}))
--
cgit v1.2.3
From e12a1be1ca23377a3d5774fe8d52664a9c2c0fb9 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Wed, 9 Aug 2023 00:14:19 +0300
Subject: auto-expand enable by default for js
---
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 7369e897..eb49a01d 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -201,8 +201,8 @@ onUiLoaded(async() => {
canvas_hotkey_overlap: "KeyO",
canvas_disabled_functions: [],
canvas_show_tooltip: true,
+ canvas_auto_expand: true,
canvas_blur_prompt: false,
- canvas_auto_expand: false
};
const functionMap = {
--
cgit v1.2.3
From eed963e97261ee03bffe59e3d343dcf53d82dbfd Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Wed, 9 Aug 2023 16:54:49 +0300
Subject: Lora cache in memory
---
extensions-builtin/Lora/networks.py | 22 +++++++++++++++++++---
extensions-builtin/Lora/scripts/lora_script.py | 3 +++
2 files changed, 22 insertions(+), 3 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 17cbe1bb..bc722e90 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -195,6 +195,15 @@ def load_network(name, network_on_disk):
return net
+def purge_networks_from_memory():
+ while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0:
+ name = next(iter(networks_in_memory))
+ networks_in_memory.pop(name, None)
+
+ devices.torch_gc()
+
+
+
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
already_loaded = {}
@@ -212,15 +221,19 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
failed_to_load_networks = []
- for i, name in enumerate(names):
+ for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):
net = already_loaded.get(name, None)
- network_on_disk = networks_on_disk[i]
-
if network_on_disk is not None:
+ if net is None:
+ net = networks_in_memory.get(name)
+
if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:
try:
net = load_network(name, network_on_disk)
+
+ networks_in_memory.pop(name, None)
+ networks_in_memory[name] = net
except Exception as e:
errors.display(e, f"loading network {network_on_disk.filename}")
continue
@@ -242,6 +255,8 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
if failed_to_load_networks:
sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks))
+ purge_networks_from_memory()
+
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
weights_backup = getattr(self, "network_weights_backup", None)
@@ -462,6 +477,7 @@ def infotext_pasted(infotext, params):
available_networks = {}
available_network_aliases = {}
loaded_networks = []
+networks_in_memory = {}
available_network_hash_lookup = {}
forbidden_network_aliases = {}
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index cd28afc9..6ab8b6e7 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -65,6 +65,7 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
+ "lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
}))
@@ -121,3 +122,5 @@ def infotext_pasted(infotext, d):
script_callbacks.on_infotext_pasted(infotext_pasted)
+
+shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory)
--
cgit v1.2.3
From 4a64d340010a907a6f5a0f53da023c46f794a590 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Wed, 9 Aug 2023 18:40:45 +0300
Subject: fix auto-expand
---
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index eb49a01d..e7616b98 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -42,6 +42,11 @@ onUiLoaded(async() => {
}
}
+ // Detect whether the element has a horizontal scroll bar
+ function hasHorizontalScrollbar(element) {
+ return element.scrollWidth > element.clientWidth;
+ }
+
// Function for defining the "Ctrl", "Shift" and "Alt" keys
function isModifierKey(event, key) {
switch (key) {
@@ -650,16 +655,14 @@ onUiLoaded(async() => {
}
// Simulation of the function to put a long image into the screen.
- // We define the size of the canvas, make a fullscreen to reveal the image, then reduce it to fit into the element.
+ // We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element.
// We hide the image and show it to the user when it is ready.
function autoExpand(e) {
const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch;
+
if (canvas && isMainTab) {
- if (canvas && parseInt(targetElement.style.width) > 862 || parseInt(canvas.width) < 862) {
- return;
- }
- if (canvas) {
+ if (hasHorizontalScrollbar(targetElement)) {
targetElement.style.visibility = "hidden";
setTimeout(() => {
fitToScreen();
--
cgit v1.2.3
From ed01d2ee3b22f7d01f218e3b7a3571dc7a2c54c0 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Thu, 10 Aug 2023 13:45:25 +0300
Subject: a another fix, a different approach
---
.../canvas-zoom-and-pan/javascript/zoom.js | 23 ++++++++++++++++++++--
1 file changed, 21 insertions(+), 2 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index e7616b98..30a74834 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -12,6 +12,7 @@ onUiLoaded(async() => {
"Sketch": elementIDs.sketch
};
+
// Helper functions
// Get active tab
function getActiveTab(elements, all = false) {
@@ -657,17 +658,20 @@ onUiLoaded(async() => {
// Simulation of the function to put a long image into the screen.
// We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element.
// We hide the image and show it to the user when it is ready.
- function autoExpand(e) {
+
+ targetElement.isExpanded = false;
+ function autoExpand() {
const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch;
if (canvas && isMainTab) {
- if (hasHorizontalScrollbar(targetElement)) {
+ if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) {
targetElement.style.visibility = "hidden";
setTimeout(() => {
fitToScreen();
resetZoom();
targetElement.style.visibility = "visible";
+ targetElement.isExpanded = true;
}, 10);
}
}
@@ -675,9 +679,24 @@ onUiLoaded(async() => {
targetElement.addEventListener("mousemove", getMousePosition);
+ //observers
+ // Creating an observer with a callback function to handle DOM changes
+ const observer = new MutationObserver((mutationsList, observer) => {
+ for (let mutation of mutationsList) {
+ // If the style attribute of the canvas has changed, by observation it happens only when the picture changes
+ if (mutation.type === 'attributes' && mutation.attributeName === 'style' &&
+ mutation.target.tagName.toLowerCase() === 'canvas') {
+ targetElement.isExpanded = false;
+ setTimeout(resetZoom, 10);
+ }
+ }
+ });
+
// Apply auto expand if enabled
if (hotkeysConfig.canvas_auto_expand) {
targetElement.addEventListener("mousemove", autoExpand);
+ // Set up an observer to track attribute changes
+ observer.observe(targetElement, {attributes: true, childList: true, subtree: true});
}
// Handle events only inside the targetElement
--
cgit v1.2.3
From 045f7408926aec4bf7e8fcb09333a3f45783d239 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Thu, 10 Aug 2023 16:17:52 +0300
Subject: Height fix
---
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 5 +++++
1 file changed, 5 insertions(+)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 30a74834..72c8ba87 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -378,6 +378,11 @@ onUiLoaded(async() => {
toggleOverlap("off");
fullScreenMode = false;
+ const closeBtn = targetElement.querySelector("button[aria-label='Remove Image']");
+ if (closeBtn) {
+ closeBtn.addEventListener("click", resetZoom);
+ }
+
if (
canvas &&
parseFloat(canvas.style.width) > 865 &&
--
cgit v1.2.3
From 4fafc34e498130dcbb2d1a44fbc55fdba31e32d4 Mon Sep 17 00:00:00 2001
From: catboxanon <122327233+catboxanon@users.noreply.github.com>
Date: Thu, 10 Aug 2023 23:42:58 -0400
Subject: Fix to make LoRA old method setting work
---
extensions-builtin/Lora/networks.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index bc722e90..7e3415ac 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -357,7 +357,7 @@ def network_forward(module, input, original_forward):
if module is None:
continue
- y = module.forward(y, input)
+ y = module.forward(input, y)
return y
--
cgit v1.2.3
From bd4da4474bef5c9c1f690c62b971704ee73d2860 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Sun, 13 Aug 2023 02:27:39 +0800
Subject: Add extra norm module into built-in lora ext
refer to LyCORIS 1.9.0.dev6
add new option and module for training norm layer
(Which is reported to be good for style)
---
extensions-builtin/Lora/network.py | 7 ++-
extensions-builtin/Lora/network_norm.py | 29 ++++++++++++
extensions-builtin/Lora/networks.py | 64 ++++++++++++++++++++++----
extensions-builtin/Lora/scripts/lora_script.py | 16 +++++++
4 files changed, 105 insertions(+), 11 deletions(-)
create mode 100644 extensions-builtin/Lora/network_norm.py
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py
index 0a18d69e..b7b89061 100644
--- a/extensions-builtin/Lora/network.py
+++ b/extensions-builtin/Lora/network.py
@@ -133,7 +133,7 @@ class NetworkModule:
return 1.0
- def finalize_updown(self, updown, orig_weight, output_shape):
+ def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
if self.bias is not None:
updown = updown.reshape(self.bias.shape)
updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype)
@@ -145,7 +145,10 @@ class NetworkModule:
if orig_weight.size().numel() == updown.size().numel():
updown = updown.reshape(orig_weight.shape)
- return updown * self.calc_scale() * self.multiplier()
+ if ex_bias is None:
+ ex_bias = 0
+
+ return updown * self.calc_scale() * self.multiplier(), ex_bias * self.multiplier()
def calc_updown(self, target):
raise NotImplementedError()
diff --git a/extensions-builtin/Lora/network_norm.py b/extensions-builtin/Lora/network_norm.py
new file mode 100644
index 00000000..dab8b684
--- /dev/null
+++ b/extensions-builtin/Lora/network_norm.py
@@ -0,0 +1,29 @@
+import network
+
+
+class ModuleTypeNorm(network.ModuleType):
+ def create_module(self, net: network.Network, weights: network.NetworkWeights):
+ if all(x in weights.w for x in ["w_norm", "b_norm"]):
+ return NetworkModuleNorm(net, weights)
+
+ return None
+
+
+class NetworkModuleNorm(network.NetworkModule):
+ def __init__(self, net: network.Network, weights: network.NetworkWeights):
+ super().__init__(net, weights)
+ print("NetworkModuleNorm")
+
+ self.w_norm = weights.w.get("w_norm")
+ self.b_norm = weights.w.get("b_norm")
+
+ def calc_updown(self, orig_weight):
+ output_shape = self.w_norm.shape
+ updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype)
+
+ if self.b_norm is not None:
+ ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype)
+ else:
+ ex_bias = None
+
+ return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 7e3415ac..74cefe43 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -7,6 +7,7 @@ import network_hada
import network_ia3
import network_lokr
import network_full
+import network_norm
import torch
from typing import Union
@@ -19,6 +20,7 @@ module_types = [
network_ia3.ModuleTypeIa3(),
network_lokr.ModuleTypeLokr(),
network_full.ModuleTypeFull(),
+ network_norm.ModuleTypeNorm(),
]
@@ -31,6 +33,8 @@ suffix_conversion = {
"resnets": {
"conv1": "in_layers_2",
"conv2": "out_layers_3",
+ "norm1": "in_layers_0",
+ "norm2": "out_layers_0",
"time_emb_proj": "emb_layers_1",
"conv_shortcut": "skip_connection",
}
@@ -258,20 +262,25 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
purge_networks_from_memory()
-def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
+def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
weights_backup = getattr(self, "network_weights_backup", None)
+ bias_backup = getattr(self, "network_bias_backup", None)
- if weights_backup is None:
+ if weights_backup is None and bias_backup is None:
return
- if isinstance(self, torch.nn.MultiheadAttention):
- self.in_proj_weight.copy_(weights_backup[0])
- self.out_proj.weight.copy_(weights_backup[1])
- else:
- self.weight.copy_(weights_backup)
+ if weights_backup is not None:
+ if isinstance(self, torch.nn.MultiheadAttention):
+ self.in_proj_weight.copy_(weights_backup[0])
+ self.out_proj.weight.copy_(weights_backup[1])
+ else:
+ self.weight.copy_(weights_backup)
+ if bias_backup is not None:
+ self.bias.copy_(bias_backup)
-def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
+
+def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
"""
Applies the currently selected set of networks to the weights of torch layer self.
If weights already have this particular set of networks applied, does nothing.
@@ -294,6 +303,11 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
self.network_weights_backup = weights_backup
+ bias_backup = getattr(self, "network_bias_backup", None)
+ if bias_backup is None and getattr(self, 'bias', None) is not None:
+ bias_backup = self.bias.to(devices.cpu, copy=True)
+ self.network_bias_backup = bias_backup
+
if current_names != wanted_names:
network_restore_weights_from_backup(self)
@@ -301,13 +315,15 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
module = net.modules.get(network_layer_name, None)
if module is not None and hasattr(self, 'weight'):
with torch.no_grad():
- updown = module.calc_updown(self.weight)
+ updown, ex_bias = module.calc_updown(self.weight)
if len(self.weight.shape) == 4 and self.weight.shape[1] == 9:
# inpainting model. zero pad updown to make channel[1] 4 to 9
updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
self.weight += updown
+ if getattr(self, 'bias', None) is not None:
+ self.bias += ex_bias
continue
module_q = net.modules.get(network_layer_name + "_q_proj", None)
@@ -397,6 +413,36 @@ def network_Conv2d_load_state_dict(self, *args, **kwargs):
return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs)
+def network_GroupNorm_forward(self, input):
+ if shared.opts.lora_functional:
+ return network_forward(self, input, torch.nn.GroupNorm_forward_before_network)
+
+ network_apply_weights(self)
+
+ return torch.nn.GroupNorm_forward_before_network(self, input)
+
+
+def network_GroupNorm_load_state_dict(self, *args, **kwargs):
+ network_reset_cached_weight(self)
+
+ return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs)
+
+
+def network_LayerNorm_forward(self, input):
+ if shared.opts.lora_functional:
+ return network_forward(self, input, torch.nn.LayerNorm_forward_before_network)
+
+ network_apply_weights(self)
+
+ return torch.nn.LayerNorm_forward_before_network(self, input)
+
+
+def network_LayerNorm_load_state_dict(self, *args, **kwargs):
+ network_reset_cached_weight(self)
+
+ return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs)
+
+
def network_MultiheadAttention_forward(self, *args, **kwargs):
network_apply_weights(self)
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index 6ab8b6e7..dc307f8c 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -40,6 +40,18 @@ if not hasattr(torch.nn, 'Conv2d_forward_before_network'):
if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'):
torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict
+if not hasattr(torch.nn, 'GroupNorm_forward_before_network'):
+ torch.nn.GroupNorm_forward_before_network = torch.nn.GroupNorm.forward
+
+if not hasattr(torch.nn, 'GroupNorm_load_state_dict_before_network'):
+ torch.nn.GroupNorm_load_state_dict_before_network = torch.nn.GroupNorm._load_from_state_dict
+
+if not hasattr(torch.nn, 'LayerNorm_forward_before_network'):
+ torch.nn.LayerNorm_forward_before_network = torch.nn.LayerNorm.forward
+
+if not hasattr(torch.nn, 'LayerNorm_load_state_dict_before_network'):
+ torch.nn.LayerNorm_load_state_dict_before_network = torch.nn.LayerNorm._load_from_state_dict
+
if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'):
torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward
@@ -50,6 +62,10 @@ torch.nn.Linear.forward = networks.network_Linear_forward
torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict
torch.nn.Conv2d.forward = networks.network_Conv2d_forward
torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict
+torch.nn.GroupNorm.forward = networks.network_GroupNorm_forward
+torch.nn.GroupNorm._load_from_state_dict = networks.network_GroupNorm_load_state_dict
+torch.nn.LayerNorm.forward = networks.network_LayerNorm_forward
+torch.nn.LayerNorm._load_from_state_dict = networks.network_LayerNorm_load_state_dict
torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward
torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict
--
cgit v1.2.3
From a2b83050965a1a117f2762d3b5fa8b4841777e8f Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Sun, 13 Aug 2023 02:35:04 +0800
Subject: return None if no ex_bias
---
extensions-builtin/Lora/network.py | 6 +++---
extensions-builtin/Lora/networks.py | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py
index b7b89061..d8e8dfb7 100644
--- a/extensions-builtin/Lora/network.py
+++ b/extensions-builtin/Lora/network.py
@@ -145,10 +145,10 @@ class NetworkModule:
if orig_weight.size().numel() == updown.size().numel():
updown = updown.reshape(orig_weight.shape)
- if ex_bias is None:
- ex_bias = 0
+ if ex_bias is not None:
+ ex_bias = ex_bias * self.multiplier()
- return updown * self.calc_scale() * self.multiplier(), ex_bias * self.multiplier()
+ return updown * self.calc_scale() * self.multiplier(), ex_bias
def calc_updown(self, target):
raise NotImplementedError()
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 74cefe43..ba621139 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -322,7 +322,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
self.weight += updown
- if getattr(self, 'bias', None) is not None:
+ if ex_bias is not None and getattr(self, 'bias', None) is not None:
self.bias += ex_bias
continue
--
cgit v1.2.3
From 5881dcb8873b3f87b9c6545e9cb8d1d77023f4fe Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Sun, 13 Aug 2023 02:36:02 +0800
Subject: remove debug print
---
extensions-builtin/Lora/network_norm.py | 1 -
1 file changed, 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/network_norm.py b/extensions-builtin/Lora/network_norm.py
index dab8b684..ce450158 100644
--- a/extensions-builtin/Lora/network_norm.py
+++ b/extensions-builtin/Lora/network_norm.py
@@ -12,7 +12,6 @@ class ModuleTypeNorm(network.ModuleType):
class NetworkModuleNorm(network.NetworkModule):
def __init__(self, net: network.Network, weights: network.NetworkWeights):
super().__init__(net, weights)
- print("NetworkModuleNorm")
self.w_norm = weights.w.get("w_norm")
self.b_norm = weights.w.get("b_norm")
--
cgit v1.2.3
From 7fa5ee54b15904bef6598800df76ba1291d44ec6 Mon Sep 17 00:00:00 2001
From: catboxanon <122327233+catboxanon@users.noreply.github.com>
Date: Sun, 13 Aug 2023 02:32:54 -0400
Subject: Support search and display of hashes for all extra network items
---
extensions-builtin/Lora/ui_extra_networks_lora.py | 3 ++-
modules/ui_extra_networks_checkpoints.py | 1 +
modules/ui_extra_networks_hypernets.py | 6 +++++-
modules/ui_extra_networks_textual_inversion.py | 3 ++-
modules/ui_extra_networks_user_metadata.py | 4 +++-
5 files changed, 13 insertions(+), 4 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py
index 3629e5c0..55409a78 100644
--- a/extensions-builtin/Lora/ui_extra_networks_lora.py
+++ b/extensions-builtin/Lora/ui_extra_networks_lora.py
@@ -25,9 +25,10 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
item = {
"name": name,
"filename": lora_on_disk.filename,
+ "shorthash": lora_on_disk.shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
- "search_term": self.search_terms_from_path(lora_on_disk.filename),
+ "search_term": self.search_terms_from_path(lora_on_disk.filename) + " " + (lora_on_disk.hash or ""),
"local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": lora_on_disk.metadata,
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py
index 77885022..ebb5249f 100644
--- a/modules/ui_extra_networks_checkpoints.py
+++ b/modules/ui_extra_networks_checkpoints.py
@@ -19,6 +19,7 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
return {
"name": checkpoint.name_for_extra,
"filename": checkpoint.filename,
+ "shorthash": checkpoint.shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py
index 514a4562..4cedf085 100644
--- a/modules/ui_extra_networks_hypernets.py
+++ b/modules/ui_extra_networks_hypernets.py
@@ -2,6 +2,7 @@ import os
from modules import shared, ui_extra_networks
from modules.ui_extra_networks import quote_js
+from modules.hashes import sha256_from_cache
class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
@@ -14,13 +15,16 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
def create_item(self, name, index=None, enable_filter=True):
full_path = shared.hypernetworks[name]
path, ext = os.path.splitext(full_path)
+ sha256 = sha256_from_cache(full_path, f'hypernet/{name}')
+ shorthash = sha256[0:10] if sha256 else None
return {
"name": name,
"filename": full_path,
+ "shorthash": shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
- "search_term": self.search_terms_from_path(path),
+ "search_term": self.search_terms_from_path(path) + " " + (sha256 or ""),
"prompt": quote_js(f""),
"local_preview": f"{path}.preview.{shared.opts.samples_format}",
"sort_keys": {'default': index, **self.get_sort_keys(path + ext)},
diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py
index 73134698..55ef0ea7 100644
--- a/modules/ui_extra_networks_textual_inversion.py
+++ b/modules/ui_extra_networks_textual_inversion.py
@@ -19,9 +19,10 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
return {
"name": name,
"filename": embedding.filename,
+ "shorthash": embedding.shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
- "search_term": self.search_terms_from_path(embedding.filename),
+ "search_term": self.search_terms_from_path(embedding.filename) + " " + (embedding.hash or ""),
"prompt": quote_js(embedding.name),
"local_preview": f"{path}.preview.{shared.opts.samples_format}",
"sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)},
diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py
index cda471e4..b11622a1 100644
--- a/modules/ui_extra_networks_user_metadata.py
+++ b/modules/ui_extra_networks_user_metadata.py
@@ -93,11 +93,13 @@ class UserMetadataEditor:
item = self.page.items.get(name, {})
try:
filename = item["filename"]
+ shorthash = item.get("shorthash", None)
stats = os.stat(filename)
params = [
('Filename: ', os.path.basename(filename)),
('File size: ', sysinfo.pretty_bytes(stats.st_size)),
+ ('Hash: ', shorthash),
('Modified: ', datetime.datetime.fromtimestamp(stats.st_mtime).strftime('%Y-%m-%d %H:%M')),
]
@@ -115,7 +117,7 @@ class UserMetadataEditor:
errors.display(e, f"reading metadata info for {name}")
params = []
- table = ''
+ table = ''
return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', '')
--
cgit v1.2.3
From d8419762c1454ba51baa710d9ce8e762efc056ef Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 13 Aug 2023 15:07:37 +0300
Subject: Lora: output warnings in UI rather than fail for unfitting loras;
switch to logging for error output in console
---
extensions-builtin/Lora/extra_networks_lora.py | 12 ++++-
extensions-builtin/Lora/networks.py | 61 ++++++++++++++++----------
extensions-builtin/Lora/scripts/lora_script.py | 6 +--
modules/processing.py | 13 +++---
4 files changed, 58 insertions(+), 34 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py
index ba2945c6..32e32cab 100644
--- a/extensions-builtin/Lora/extra_networks_lora.py
+++ b/extensions-builtin/Lora/extra_networks_lora.py
@@ -1,4 +1,4 @@
-from modules import extra_networks, shared
+from modules import extra_networks, shared, sd_hijack
import networks
@@ -6,9 +6,14 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
def __init__(self):
super().__init__('lora')
+ self.errors = {}
+ """mapping of network names to the number of errors the network had during operation"""
+
def activate(self, p, params_list):
additional = shared.opts.sd_lora
+ self.errors.clear()
+
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
p.all_prompts = [x + f"" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
@@ -56,4 +61,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
def deactivate(self, p):
- pass
+ if self.errors:
+ p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
+
+ self.errors.clear()
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index ba621139..c252ed9e 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -1,3 +1,4 @@
+import logging
import os
import re
@@ -194,7 +195,7 @@ def load_network(name, network_on_disk):
net.modules[key] = net_module
if keys_failed_to_match:
- print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}")
+ logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}")
return net
@@ -207,7 +208,6 @@ def purge_networks_from_memory():
devices.torch_gc()
-
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
already_loaded = {}
@@ -248,7 +248,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
if net is None:
failed_to_load_networks.append(name)
- print(f"Couldn't find network with name {name}")
+ logging.info(f"Couldn't find network with name {name}")
continue
net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0
@@ -257,7 +257,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
loaded_networks.append(net)
if failed_to_load_networks:
- sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks))
+ sd_hijack.model_hijack.comments.append("Networks not found: " + ", ".join(failed_to_load_networks))
purge_networks_from_memory()
@@ -314,17 +314,22 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
for net in loaded_networks:
module = net.modules.get(network_layer_name, None)
if module is not None and hasattr(self, 'weight'):
- with torch.no_grad():
- updown, ex_bias = module.calc_updown(self.weight)
+ try:
+ with torch.no_grad():
+ updown, ex_bias = module.calc_updown(self.weight)
- if len(self.weight.shape) == 4 and self.weight.shape[1] == 9:
- # inpainting model. zero pad updown to make channel[1] 4 to 9
- updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
+ if len(self.weight.shape) == 4 and self.weight.shape[1] == 9:
+ # inpainting model. zero pad updown to make channel[1] 4 to 9
+ updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
- self.weight += updown
- if ex_bias is not None and getattr(self, 'bias', None) is not None:
- self.bias += ex_bias
- continue
+ self.weight += updown
+ if ex_bias is not None and getattr(self, 'bias', None) is not None:
+ self.bias += ex_bias
+ except RuntimeError as e:
+ logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
+ extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
+
+ continue
module_q = net.modules.get(network_layer_name + "_q_proj", None)
module_k = net.modules.get(network_layer_name + "_k_proj", None)
@@ -332,21 +337,28 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
module_out = net.modules.get(network_layer_name + "_out_proj", None)
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
- with torch.no_grad():
- updown_q = module_q.calc_updown(self.in_proj_weight)
- updown_k = module_k.calc_updown(self.in_proj_weight)
- updown_v = module_v.calc_updown(self.in_proj_weight)
- updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
- updown_out = module_out.calc_updown(self.out_proj.weight)
-
- self.in_proj_weight += updown_qkv
- self.out_proj.weight += updown_out
- continue
+ try:
+ with torch.no_grad():
+ updown_q = module_q.calc_updown(self.in_proj_weight)
+ updown_k = module_k.calc_updown(self.in_proj_weight)
+ updown_v = module_v.calc_updown(self.in_proj_weight)
+ updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
+ updown_out = module_out.calc_updown(self.out_proj.weight)
+
+ self.in_proj_weight += updown_qkv
+ self.out_proj.weight += updown_out
+
+ except RuntimeError as e:
+ logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
+ extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
+
+ continue
if module is None:
continue
- print(f'failed to calculate network weights for layer {network_layer_name}')
+ logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation")
+ extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
self.network_current_names = wanted_names
@@ -519,6 +531,7 @@ def infotext_pasted(infotext, params):
if added:
params["Prompt"] += "\n" + "".join(added)
+extra_network_lora = None
available_networks = {}
available_network_aliases = {}
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index dc307f8c..4c6e774a 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -23,9 +23,9 @@ def unload():
def before_ui():
ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
- extra_network = extra_networks_lora.ExtraNetworkLora()
- extra_networks.register_extra_network(extra_network)
- extra_networks.register_extra_network_alias(extra_network, "lyco")
+ networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora()
+ extra_networks.register_extra_network(networks.extra_network_lora)
+ extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco")
if not hasattr(torch.nn, 'Linear_forward_before_network'):
diff --git a/modules/processing.py b/modules/processing.py
index 007a4e05..10749aa2 100755
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -157,6 +157,7 @@ class StableDiffusionProcessing:
cached_uc = [None, None]
cached_c = [None, None]
+ comments: dict = None
sampler: sd_samplers_common.Sampler | None = field(default=None, init=False)
is_using_inpainting_conditioning: bool = field(default=False, init=False)
paste_to: tuple | None = field(default=None, init=False)
@@ -196,6 +197,8 @@ class StableDiffusionProcessing:
if self.sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
+ self.comments = {}
+
self.sampler_noise_scheduler_override = None
self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond
self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn
@@ -226,6 +229,9 @@ class StableDiffusionProcessing:
def sd_model(self, value):
pass
+ def comment(self, text):
+ self.comments[text] = 1
+
def txt2img_image_conditioning(self, x, width=None, height=None):
self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
@@ -429,7 +435,7 @@ class Processed:
self.subseed = subseed
self.subseed_strength = p.subseed_strength
self.info = info
- self.comments = comments
+ self.comments = "".join(f"{comment}\n" for comment in p.comments)
self.width = p.width
self.height = p.height
self.sampler_name = p.sampler_name
@@ -720,8 +726,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
modules.sd_hijack.model_hijack.clear_comments()
- comments = {}
-
p.setup_prompts()
if type(seed) == list:
@@ -801,7 +805,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
p.setup_conds()
for comment in model_hijack.comments:
- comments[comment] = 1
+ p.comment(comment)
p.extra_generation_params.update(model_hijack.extra_generation_params)
@@ -930,7 +934,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
images_list=output_images,
seed=p.all_seeds[0],
info=infotexts[0],
- comments="".join(f"{comment}\n" for comment in comments),
subseed=p.all_subseeds[0],
index_of_first_image=index_of_first_image,
infotexts=infotexts,
--
cgit v1.2.3
From db40d26d086b6e980bd28ce7eb3008ec09b1f0e8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 13 Aug 2023 16:38:10 +0300
Subject: linter
---
extensions-builtin/Lora/extra_networks_lora.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py
index 32e32cab..005ff32c 100644
--- a/extensions-builtin/Lora/extra_networks_lora.py
+++ b/extensions-builtin/Lora/extra_networks_lora.py
@@ -1,4 +1,4 @@
-from modules import extra_networks, shared, sd_hijack
+from modules import extra_networks, shared
import networks
--
cgit v1.2.3
From d9cc27cb29926c9cc5dce331da8fbaf996cf4973 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 14 Aug 2023 13:32:51 +0800
Subject: Fix MHA updown err and support ex-bias for no-bias layer
---
extensions-builtin/Lora/networks.py | 37 +++++++++++++++++++++++++++++--------
1 file changed, 29 insertions(+), 8 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index ba621139..1645b822 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -277,7 +277,15 @@ def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Li
self.weight.copy_(weights_backup)
if bias_backup is not None:
- self.bias.copy_(bias_backup)
+ if isinstance(self, torch.nn.MultiheadAttention):
+ self.out_proj.bias.copy_(bias_backup)
+ else:
+ self.bias.copy_(bias_backup)
+ else:
+ if isinstance(self, torch.nn.MultiheadAttention):
+ self.out_proj.bias = None
+ else:
+ self.bias = None
def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
@@ -305,7 +313,12 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
bias_backup = getattr(self, "network_bias_backup", None)
if bias_backup is None and getattr(self, 'bias', None) is not None:
- bias_backup = self.bias.to(devices.cpu, copy=True)
+ if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
+ bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
+ elif getattr(self, 'bias', None) is not None:
+ bias_backup = self.bias.to(devices.cpu, copy=True)
+ else:
+ bias_backup = None
self.network_bias_backup = bias_backup
if current_names != wanted_names:
@@ -322,8 +335,11 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
self.weight += updown
- if ex_bias is not None and getattr(self, 'bias', None) is not None:
- self.bias += ex_bias
+ if ex_bias is not None and hasattr(self, 'bias'):
+ if self.bias is None:
+ self.bias = torch.nn.Parameter(ex_bias)
+ else:
+ self.bias += ex_bias
continue
module_q = net.modules.get(network_layer_name + "_q_proj", None)
@@ -333,14 +349,19 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
with torch.no_grad():
- updown_q = module_q.calc_updown(self.in_proj_weight)
- updown_k = module_k.calc_updown(self.in_proj_weight)
- updown_v = module_v.calc_updown(self.in_proj_weight)
+ updown_q, _ = module_q.calc_updown(self.in_proj_weight)
+ updown_k, _ = module_k.calc_updown(self.in_proj_weight)
+ updown_v, _ = module_v.calc_updown(self.in_proj_weight)
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
- updown_out = module_out.calc_updown(self.out_proj.weight)
+ updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight)
self.in_proj_weight += updown_qkv
self.out_proj.weight += updown_out
+ if ex_bias is not None:
+ if self.out_proj.bias is None:
+ self.out_proj.bias = torch.nn.Parameter(ex_bias)
+ else:
+ self.out_proj.bias += ex_bias
continue
if module is None:
--
cgit v1.2.3
From f70ded89365f71d42b6a60a561e8fccfdd25c159 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 14 Aug 2023 13:53:40 +0800
Subject: remove "if bias exist" check
---
extensions-builtin/Lora/networks.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 96d14344..22fdff4a 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -312,7 +312,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
self.network_weights_backup = weights_backup
bias_backup = getattr(self, "network_bias_backup", None)
- if bias_backup is None and getattr(self, 'bias', None) is not None:
+ if bias_backup is None:
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
elif getattr(self, 'bias', None) is not None:
--
cgit v1.2.3
From a2e213bc7ba29495559c9e53b734047ea879b343 Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Mon, 14 Aug 2023 18:50:22 +0900
Subject: separate Extra options
---
.../extra-options-section/scripts/extra_options_section.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
index 588b64d2..983f87ff 100644
--- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py
+++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
@@ -22,22 +22,23 @@ class ExtraOptionsSection(scripts.Script):
self.comps = []
self.setting_names = []
self.infotext_fields = []
+ extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img
mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping}
with gr.Blocks() as interface:
- with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group():
+ with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and extra_options else gr.Group():
- row_count = math.ceil(len(shared.opts.extra_options) / shared.opts.extra_options_cols)
+ row_count = math.ceil(len(extra_options) / shared.opts.extra_options_cols)
for row in range(row_count):
with gr.Row():
for col in range(shared.opts.extra_options_cols):
index = row * shared.opts.extra_options_cols + col
- if index >= len(shared.opts.extra_options):
+ if index >= len(extra_options):
break
- setting_name = shared.opts.extra_options[index]
+ setting_name = extra_options[index]
with FormColumn():
comp = ui_settings.create_setting_component(setting_name)
@@ -64,7 +65,8 @@ class ExtraOptionsSection(scripts.Script):
shared.options_templates.update(shared.options_section(('ui', "User interface"), {
- "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_reload_ui(),
+ "extra_options_txt2img": shared.OptionInfo([], "Options in main UI - txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(),
+ "extra_options_img2img": shared.OptionInfo([], "Options in main UI - img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(),
"extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(),
"extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui()
}))
--
cgit v1.2.3
From aa57a89a21d7c0e59a2006a642c3838264c35051 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 15 Aug 2023 23:41:46 +0800
Subject: full module with ex_bias
---
extensions-builtin/Lora/network_full.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/network_full.py b/extensions-builtin/Lora/network_full.py
index 109b4c2c..bf6930e9 100644
--- a/extensions-builtin/Lora/network_full.py
+++ b/extensions-builtin/Lora/network_full.py
@@ -14,9 +14,14 @@ class NetworkModuleFull(network.NetworkModule):
super().__init__(net, weights)
self.weight = weights.w.get("diff")
+ self.ex_bias = weights.w.get("diff_b")
def calc_updown(self, orig_weight):
output_shape = self.weight.shape
updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype)
+ if self.ex_bias is not None:
+ ex_bias = self.ex_bias.to(orig_weight.device, dtype=orig_weight.dtype)
+ else:
+ ex_bias = None
- return self.finalize_updown(updown, orig_weight, output_shape)
+ return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
--
cgit v1.2.3
From f01682ee01e81e8ef84fd6fffe8f7aa17233285d Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 15 Aug 2023 19:23:27 +0300
Subject: store patches for Lora in a specialized module
---
extensions-builtin/Lora/lora_patches.py | 31 +++++++++++++
extensions-builtin/Lora/networks.py | 32 +++++++------
extensions-builtin/Lora/scripts/lora_script.py | 52 ++-------------------
modules/patches.py | 64 ++++++++++++++++++++++++++
4 files changed, 118 insertions(+), 61 deletions(-)
create mode 100644 extensions-builtin/Lora/lora_patches.py
create mode 100644 modules/patches.py
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/lora_patches.py b/extensions-builtin/Lora/lora_patches.py
new file mode 100644
index 00000000..b394d8e9
--- /dev/null
+++ b/extensions-builtin/Lora/lora_patches.py
@@ -0,0 +1,31 @@
+import torch
+
+import networks
+from modules import patches
+
+
+class LoraPatches:
+ def __init__(self):
+ self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward)
+ self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict)
+ self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward)
+ self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict)
+ self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward)
+ self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict)
+ self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward)
+ self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict)
+ self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward)
+ self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict)
+
+ def undo(self):
+ self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward')
+ self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict')
+ self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward')
+ self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict')
+ self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward')
+ self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict')
+ self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward')
+ self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict')
+ self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward')
+ self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict')
+
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 22fdff4a..9fca36b6 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -2,6 +2,7 @@ import logging
import os
import re
+import lora_patches
import network
import network_lora
import network_hada
@@ -418,74 +419,74 @@ def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
def network_Linear_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.Linear_forward_before_network)
+ return network_forward(self, input, originals.Linear_forward)
network_apply_weights(self)
- return torch.nn.Linear_forward_before_network(self, input)
+ return originals.Linear_forward(self, input)
def network_Linear_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.Linear_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.Linear_load_state_dict(self, *args, **kwargs)
def network_Conv2d_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.Conv2d_forward_before_network)
+ return network_forward(self, input, originals.Conv2d_forward)
network_apply_weights(self)
- return torch.nn.Conv2d_forward_before_network(self, input)
+ return originals.Conv2d_forward(self, input)
def network_Conv2d_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.Conv2d_load_state_dict(self, *args, **kwargs)
def network_GroupNorm_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.GroupNorm_forward_before_network)
+ return network_forward(self, input, originals.GroupNorm_forward)
network_apply_weights(self)
- return torch.nn.GroupNorm_forward_before_network(self, input)
+ return originals.GroupNorm_forward(self, input)
def network_GroupNorm_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.GroupNorm_load_state_dict(self, *args, **kwargs)
def network_LayerNorm_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.LayerNorm_forward_before_network)
+ return network_forward(self, input, originals.LayerNorm_forward)
network_apply_weights(self)
- return torch.nn.LayerNorm_forward_before_network(self, input)
+ return originals.LayerNorm_forward(self, input)
def network_LayerNorm_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.LayerNorm_load_state_dict(self, *args, **kwargs)
def network_MultiheadAttention_forward(self, *args, **kwargs):
network_apply_weights(self)
- return torch.nn.MultiheadAttention_forward_before_network(self, *args, **kwargs)
+ return originals.MultiheadAttention_forward(self, *args, **kwargs)
def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.MultiheadAttention_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
def list_available_networks():
@@ -552,6 +553,9 @@ def infotext_pasted(infotext, params):
if added:
params["Prompt"] += "\n" + "".join(added)
+
+originals: lora_patches.LoraPatches = None
+
extra_network_lora = None
available_networks = {}
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index 4c6e774a..546fb55e 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -7,17 +7,14 @@ from fastapi import FastAPI
import network
import networks
import lora # noqa:F401
+import lora_patches
import extra_networks_lora
import ui_extra_networks_lora
-from modules import script_callbacks, ui_extra_networks, extra_networks, shared
+from modules import script_callbacks, ui_extra_networks, extra_networks, shared, patches
+
def unload():
- torch.nn.Linear.forward = torch.nn.Linear_forward_before_network
- torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_network
- torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_network
- torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_network
- torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_network
- torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_network
+ networks.originals.undo()
def before_ui():
@@ -28,46 +25,7 @@ def before_ui():
extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco")
-if not hasattr(torch.nn, 'Linear_forward_before_network'):
- torch.nn.Linear_forward_before_network = torch.nn.Linear.forward
-
-if not hasattr(torch.nn, 'Linear_load_state_dict_before_network'):
- torch.nn.Linear_load_state_dict_before_network = torch.nn.Linear._load_from_state_dict
-
-if not hasattr(torch.nn, 'Conv2d_forward_before_network'):
- torch.nn.Conv2d_forward_before_network = torch.nn.Conv2d.forward
-
-if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'):
- torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict
-
-if not hasattr(torch.nn, 'GroupNorm_forward_before_network'):
- torch.nn.GroupNorm_forward_before_network = torch.nn.GroupNorm.forward
-
-if not hasattr(torch.nn, 'GroupNorm_load_state_dict_before_network'):
- torch.nn.GroupNorm_load_state_dict_before_network = torch.nn.GroupNorm._load_from_state_dict
-
-if not hasattr(torch.nn, 'LayerNorm_forward_before_network'):
- torch.nn.LayerNorm_forward_before_network = torch.nn.LayerNorm.forward
-
-if not hasattr(torch.nn, 'LayerNorm_load_state_dict_before_network'):
- torch.nn.LayerNorm_load_state_dict_before_network = torch.nn.LayerNorm._load_from_state_dict
-
-if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'):
- torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward
-
-if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_network'):
- torch.nn.MultiheadAttention_load_state_dict_before_network = torch.nn.MultiheadAttention._load_from_state_dict
-
-torch.nn.Linear.forward = networks.network_Linear_forward
-torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict
-torch.nn.Conv2d.forward = networks.network_Conv2d_forward
-torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict
-torch.nn.GroupNorm.forward = networks.network_GroupNorm_forward
-torch.nn.GroupNorm._load_from_state_dict = networks.network_GroupNorm_load_state_dict
-torch.nn.LayerNorm.forward = networks.network_LayerNorm_forward
-torch.nn.LayerNorm._load_from_state_dict = networks.network_LayerNorm_load_state_dict
-torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward
-torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict
+networks.originals = lora_patches.LoraPatches()
script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules)
script_callbacks.on_script_unloaded(unload)
diff --git a/modules/patches.py b/modules/patches.py
new file mode 100644
index 00000000..348235e7
--- /dev/null
+++ b/modules/patches.py
@@ -0,0 +1,64 @@
+from collections import defaultdict
+
+
+def patch(key, obj, field, replacement):
+ """Replaces a function in a module or a class.
+
+ Also stores the original function in this module, possible to be retrieved via original(key, obj, field).
+ If the function is already replaced by this caller (key), an exception is raised -- use undo() before that.
+
+ Arguments:
+ key: identifying information for who is doing the replacement. You can use __name__.
+ obj: the module or the class
+ field: name of the function as a string
+ replacement: the new function
+
+ Returns:
+ the original function
+ """
+
+ patch_key = (obj, field)
+ if patch_key in originals[key]:
+ raise RuntimeError(f"patch for {field} is already applied")
+
+ original_func = getattr(obj, field)
+ originals[key][patch_key] = original_func
+
+ setattr(obj, field, replacement)
+
+ return original_func
+
+
+def undo(key, obj, field):
+ """Undoes the peplacement by the patch().
+
+ If the function is not replaced, raises an exception.
+
+ Arguments:
+ key: identifying information for who is doing the replacement. You can use __name__.
+ obj: the module or the class
+ field: name of the function as a string
+
+ Returns:
+ Always None
+ """
+
+ patch_key = (obj, field)
+
+ if patch_key not in originals[key]:
+ raise RuntimeError(f"there is no patch for {field} to undo")
+
+ original_func = originals[key].pop(patch_key)
+ setattr(obj, field, original_func)
+
+ return None
+
+
+def original(key, obj, field):
+ """Returns the original function for the patch created by the patch() function"""
+ patch_key = (obj, field)
+
+ return originals[key].get(patch_key, None)
+
+
+originals = defaultdict(dict)
--
cgit v1.2.3
From 85fcb7b8dfe7b3dd06931943f095c77f1043dc25 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Tue, 15 Aug 2023 19:24:55 +0300
Subject: lint
---
extensions-builtin/Lora/scripts/lora_script.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index 546fb55e..ef23968c 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -1,6 +1,5 @@
import re
-import torch
import gradio as gr
from fastapi import FastAPI
@@ -10,7 +9,7 @@ import lora # noqa:F401
import lora_patches
import extra_networks_lora
import ui_extra_networks_lora
-from modules import script_callbacks, ui_extra_networks, extra_networks, shared, patches
+from modules import script_callbacks, ui_extra_networks, extra_networks, shared
def unload():
--
cgit v1.2.3
From 86221269f98ef9b21a6e6c9d04b86e2fb5cb33d3 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Wed, 16 Aug 2023 09:55:35 +0300
Subject: RAM optimization round 2
---
extensions-builtin/Lora/networks.py | 5 +++-
modules/sd_disable_initialization.py | 51 +++++++++++++++++++++++++++++++-----
2 files changed, 48 insertions(+), 8 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 9fca36b6..96f935b2 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -304,7 +304,10 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks)
weights_backup = getattr(self, "network_weights_backup", None)
- if weights_backup is None:
+ if weights_backup is None and wanted_names != ():
+ if current_names != ():
+ raise RuntimeError("no backup weights found and current weights are not unchanged")
+
if isinstance(self, torch.nn.MultiheadAttention):
weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
else:
diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py
index 695c5736..719eeb93 100644
--- a/modules/sd_disable_initialization.py
+++ b/modules/sd_disable_initialization.py
@@ -168,22 +168,59 @@ class LoadStateDictOnMeta(ReplaceHelper):
device = self.device
def load_from_state_dict(original, self, state_dict, prefix, *args, **kwargs):
- params = [(name, param) for name, param in self._parameters.items() if param is not None and param.is_meta]
+ used_param_keys = []
+
+ for name, param in self._parameters.items():
+ if param is None:
+ continue
+
+ key = prefix + name
+ sd_param = sd.pop(key, None)
+ if sd_param is not None:
+ state_dict[key] = sd_param
+ used_param_keys.append(key)
- for name, param in params:
if param.is_meta:
- self._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device), requires_grad=param.requires_grad)
+ dtype = sd_param.dtype if sd_param is not None else param.dtype
+ self._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device, dtype=dtype), requires_grad=param.requires_grad)
+
+ for name in self._buffers:
+ key = prefix + name
+
+ sd_param = sd.pop(key, None)
+ if sd_param is not None:
+ state_dict[key] = sd_param
+ used_param_keys.append(key)
original(self, state_dict, prefix, *args, **kwargs)
- for name, _ in params:
- key = prefix + name
- if key in sd:
- del sd[key]
+ for key in used_param_keys:
+ state_dict.pop(key, None)
+
+ def load_state_dict(original, self, state_dict, strict=True):
+ """torch makes a lot of copies of the dictionary with weights, so just deleting entries from state_dict does not help
+ because the same values are stored in multiple copies of the dict. The trick used here is to give torch a dict with
+ all weights on meta device, i.e. deleted, and then it doesn't matter how many copies torch makes.
+
+ In _load_from_state_dict, the correct weight will be obtained from a single dict with the right weights (sd).
+
+ The dangerous thing about this is if _load_from_state_dict is not called, (if some exotic module overloads
+ the function and does not call the original) the state dict will just fail to load because weights
+ would be on the meta device.
+ """
+
+ if state_dict == sd:
+ state_dict = {k: v.to(device="meta", dtype=v.dtype) for k, v in state_dict.items()}
+
+ original(self, state_dict, strict=strict)
+ module_load_state_dict = self.replace(torch.nn.Module, 'load_state_dict', lambda *args, **kwargs: load_state_dict(module_load_state_dict, *args, **kwargs))
+ module_load_from_state_dict = self.replace(torch.nn.Module, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(module_load_from_state_dict, *args, **kwargs))
linear_load_from_state_dict = self.replace(torch.nn.Linear, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(linear_load_from_state_dict, *args, **kwargs))
conv2d_load_from_state_dict = self.replace(torch.nn.Conv2d, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(conv2d_load_from_state_dict, *args, **kwargs))
mha_load_from_state_dict = self.replace(torch.nn.MultiheadAttention, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(mha_load_from_state_dict, *args, **kwargs))
+ layer_norm_load_from_state_dict = self.replace(torch.nn.LayerNorm, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(layer_norm_load_from_state_dict, *args, **kwargs))
+ group_norm_load_from_state_dict = self.replace(torch.nn.GroupNorm, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(group_norm_load_from_state_dict, *args, **kwargs))
def __exit__(self, exc_type, exc_val, exc_tb):
self.restore()
--
cgit v1.2.3
From a3c8510c053b4f5f09f4230949271054591814df Mon Sep 17 00:00:00 2001
From: catboxanon <122327233+catboxanon@users.noreply.github.com>
Date: Sun, 20 Aug 2023 00:37:39 -0400
Subject: Add resize-handler extension
---
.../resize-handle/javascript/resize-handle.js | 106 +++++++++++++++++++++
extensions-builtin/resize-handle/style.css | 10 ++
2 files changed, 116 insertions(+)
create mode 100644 extensions-builtin/resize-handle/javascript/resize-handle.js
create mode 100644 extensions-builtin/resize-handle/style.css
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/resize-handle/javascript/resize-handle.js b/extensions-builtin/resize-handle/javascript/resize-handle.js
new file mode 100644
index 00000000..a07a01d2
--- /dev/null
+++ b/extensions-builtin/resize-handle/javascript/resize-handle.js
@@ -0,0 +1,106 @@
+onUiLoaded(async() => {
+ const GRADIO_MIN_WIDTH = 320;
+ const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr';
+ const PAD = 16;
+ const DEBOUNCE_TIME = 100;
+
+ const R = {
+ tracking: false,
+ parent: null,
+ parentWidth: null,
+ leftCol: null,
+ leftColStartWidth: null,
+ screenX: null,
+ };
+
+ let resizeTimer;
+
+ const leftCols = [
+ gradioApp().querySelector('#txt2img_settings'),
+ gradioApp().querySelector('#img2img_settings'),
+ ];
+
+ function setLeftColGridTemplate(el, width) {
+ el.style.gridTemplateColumns = `${width}px 16px 1fr`;
+ }
+
+ function displayResizeHandle(parent) {
+ if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) {
+ parent.style.display = 'flex';
+ if (R.handle != null) {
+ R.handle.style.opacity = '0';
+ }
+ return false;
+ } else {
+ parent.style.display = 'grid';
+ if (R.handle != null) {
+ R.handle.style.opacity = '100';
+ }
+ return true;
+ }
+ }
+
+ function setup() {
+ for (const leftCol of leftCols) {
+ const parent = leftCol.parentElement;
+ const rightCol = parent.lastElementChild;
+
+ if (!displayResizeHandle(parent)) {
+ return;
+ }
+
+ parent.style.display = 'grid';
+ parent.style.gap = '0';
+ parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS;
+
+ const resizeHandle = document.createElement('div');
+ resizeHandle.classList.add('resize-handle');
+ parent.insertBefore(resizeHandle, rightCol);
+
+ resizeHandle.addEventListener('mousedown', (evt) => {
+ R.tracking = true;
+ R.parent = parent;
+ R.parentWidth = parent.offsetWidth;
+ R.handle = resizeHandle;
+ R.leftCol = leftCol;
+ R.leftColStartWidth = leftCol.offsetWidth;
+ R.screenX = evt.screenX;
+ });
+ }
+ }
+
+ window.addEventListener('mousemove', (evt) => {
+ if (R.tracking) {
+ const delta = R.screenX - evt.screenX;
+ const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH);
+ setLeftColGridTemplate(R.parent, leftColWidth);
+ }
+ });
+
+ window.addEventListener('mouseup', () => R.tracking = false);
+
+ window.addEventListener('resize', () => {
+ clearTimeout(resizeTimer);
+
+ resizeTimer = setTimeout(() => {
+ for (const leftCol of leftCols) {
+ const parent = leftCol.parentElement;
+
+ if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) {
+ const oldParentWidth = R.parentWidth;
+ const newParentWidth = parent.offsetWidth;
+ const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]);
+
+ const ratio = newParentWidth / oldParentWidth;
+
+ const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH);
+ setLeftColGridTemplate(parent, newWidthL);
+
+ R.parentWidth = newParentWidth;
+ }
+ }
+ }, DEBOUNCE_TIME);
+ });
+
+ setup();
+});
diff --git a/extensions-builtin/resize-handle/style.css b/extensions-builtin/resize-handle/style.css
new file mode 100644
index 00000000..0e18267a
--- /dev/null
+++ b/extensions-builtin/resize-handle/style.css
@@ -0,0 +1,10 @@
+.resize-handle{
+ cursor: col-resize;
+ grid-column: 2 / 3;
+ min-width: 8px !important;
+ max-width: 8px !important;
+ height: 100%;
+ border-left: 1px dashed var(--border-color-primary);
+ user-select: none;
+ margin-left: 8px;
+}
\ No newline at end of file
--
cgit v1.2.3
From db5c304e2968b5c16810900b9a63cfcf7e205e20 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 20 Aug 2023 13:38:35 +0300
Subject: make live previews play nice with window/slider resizes
---
extensions-builtin/mobile/javascript/mobile.js | 2 ++
javascript/progressbar.js | 21 ++++-----------------
style.css | 17 +++++++++--------
3 files changed, 15 insertions(+), 25 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/mobile/javascript/mobile.js b/extensions-builtin/mobile/javascript/mobile.js
index 12cae4b7..1b835a51 100644
--- a/extensions-builtin/mobile/javascript/mobile.js
+++ b/extensions-builtin/mobile/javascript/mobile.js
@@ -20,6 +20,8 @@ function reportWindowSize() {
var button = gradioApp().getElementById(tab + '_generate_box');
var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column');
target.insertBefore(button, target.firstElementChild);
+
+ gradioApp().getElementById(tab + '_results').classList.toggle('mobile', currentlyMobile);
}
}
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index 8a339982..a7c69937 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -69,7 +69,6 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
var dateStart = new Date();
var wasEverActive = false;
var parentProgressbar = progressbarContainer.parentNode;
- var parentGallery = gallery ? gallery.parentNode : null;
var divProgress = document.createElement('div');
divProgress.className = 'progressDiv';
@@ -80,16 +79,16 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
divProgress.appendChild(divInner);
parentProgressbar.insertBefore(divProgress, progressbarContainer);
- if (parentGallery) {
+ if (gallery) {
var livePreview = document.createElement('div');
livePreview.className = 'livePreview';
- parentGallery.insertBefore(livePreview, gallery);
+ gallery.insertBefore(livePreview, gallery.firstElementChild);
}
var removeProgressBar = function() {
setTitle("");
parentProgressbar.removeChild(divProgress);
- if (parentGallery) parentGallery.removeChild(livePreview);
+ if (gallery) gallery.removeChild(livePreview);
atEnd();
};
@@ -100,12 +99,6 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
return;
}
- var rect = progressbarContainer.getBoundingClientRect();
-
- if (rect.width) {
- divProgress.style.width = rect.width + "px";
- }
-
let progressText = "";
divInner.style.width = ((res.progress || 0) * 100.0) + '%';
@@ -151,17 +144,11 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
}, function() {
removeProgressBar();
});
- }
+ };
var funLivePreview = function(id_task, id_live_preview) {
request("./internal/progress", {id_task: id_task, id_live_preview: id_live_preview}, function(res) {
if (res.live_preview && gallery) {
- rect = gallery.getBoundingClientRect();
- if (rect.width) {
- livePreview.style.width = rect.width + "px";
- livePreview.style.height = rect.height + "px";
- }
-
var img = new Image();
img.onload = function() {
livePreview.appendChild(img);
diff --git a/style.css b/style.css
index 38a01e72..5cd9f9c2 100644
--- a/style.css
+++ b/style.css
@@ -499,11 +499,15 @@ table.popup-table .link{
/* live preview */
.progressDiv{
- position: relative;
+ position: absolute;
height: 20px;
background: #b4c0cc;
border-radius: 3px !important;
- margin-bottom: -3px;
+ top: -20px;
+}
+
+[id$=_results].mobile{
+ margin-top: 28px;
}
.dark .progressDiv{
@@ -528,12 +532,9 @@ table.popup-table .link{
.livePreview{
position: absolute;
z-index: 300;
- background-color: white;
- margin: -4px;
-}
-
-.dark .livePreview{
- background-color: rgb(17 24 39 / var(--tw-bg-opacity));
+ background: var(--background-fill-primary);
+ width: 100%;
+ height: 100%;
}
.livePreview img{
--
cgit v1.2.3
From 4a2bf65fea0a6d2c389c47161ce0960991e6f56b Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 20 Aug 2023 13:40:11 +0300
Subject: make mobile built-in extension actually do something
---
extensions-builtin/mobile/javascript/mobile.js | 4 ++++
1 file changed, 4 insertions(+)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/mobile/javascript/mobile.js b/extensions-builtin/mobile/javascript/mobile.js
index 1b835a51..652f07ac 100644
--- a/extensions-builtin/mobile/javascript/mobile.js
+++ b/extensions-builtin/mobile/javascript/mobile.js
@@ -26,3 +26,7 @@ function reportWindowSize() {
}
window.addEventListener("resize", reportWindowSize);
+
+onUiLoaded(function() {
+ reportWindowSize();
+});
--
cgit v1.2.3
From df595ae3135ef12c135f43ef2a0f792708aab4b3 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Mon, 21 Aug 2023 08:48:22 +0300
Subject: make resize handle available to extensions
---
.eslintrc.js | 4 +-
.../resize-handle/javascript/resize-handle.js | 106 --------------------
extensions-builtin/resize-handle/style.css | 10 --
javascript/resizeHandle.js | 109 +++++++++++++++++++++
modules/ui.py | 6 +-
modules/ui_components.py | 12 +++
style.css | 12 +++
7 files changed, 139 insertions(+), 120 deletions(-)
delete mode 100644 extensions-builtin/resize-handle/javascript/resize-handle.js
delete mode 100644 extensions-builtin/resize-handle/style.css
create mode 100644 javascript/resizeHandle.js
(limited to 'extensions-builtin')
diff --git a/.eslintrc.js b/.eslintrc.js
index e3b4fb76..4777c276 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -90,6 +90,8 @@ module.exports = {
// localStorage.js
localSet: "readonly",
localGet: "readonly",
- localRemove: "readonly"
+ localRemove: "readonly",
+ // resizeHandle.js
+ setupResizeHandle: "writable"
}
};
diff --git a/extensions-builtin/resize-handle/javascript/resize-handle.js b/extensions-builtin/resize-handle/javascript/resize-handle.js
deleted file mode 100644
index a07a01d2..00000000
--- a/extensions-builtin/resize-handle/javascript/resize-handle.js
+++ /dev/null
@@ -1,106 +0,0 @@
-onUiLoaded(async() => {
- const GRADIO_MIN_WIDTH = 320;
- const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr';
- const PAD = 16;
- const DEBOUNCE_TIME = 100;
-
- const R = {
- tracking: false,
- parent: null,
- parentWidth: null,
- leftCol: null,
- leftColStartWidth: null,
- screenX: null,
- };
-
- let resizeTimer;
-
- const leftCols = [
- gradioApp().querySelector('#txt2img_settings'),
- gradioApp().querySelector('#img2img_settings'),
- ];
-
- function setLeftColGridTemplate(el, width) {
- el.style.gridTemplateColumns = `${width}px 16px 1fr`;
- }
-
- function displayResizeHandle(parent) {
- if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) {
- parent.style.display = 'flex';
- if (R.handle != null) {
- R.handle.style.opacity = '0';
- }
- return false;
- } else {
- parent.style.display = 'grid';
- if (R.handle != null) {
- R.handle.style.opacity = '100';
- }
- return true;
- }
- }
-
- function setup() {
- for (const leftCol of leftCols) {
- const parent = leftCol.parentElement;
- const rightCol = parent.lastElementChild;
-
- if (!displayResizeHandle(parent)) {
- return;
- }
-
- parent.style.display = 'grid';
- parent.style.gap = '0';
- parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS;
-
- const resizeHandle = document.createElement('div');
- resizeHandle.classList.add('resize-handle');
- parent.insertBefore(resizeHandle, rightCol);
-
- resizeHandle.addEventListener('mousedown', (evt) => {
- R.tracking = true;
- R.parent = parent;
- R.parentWidth = parent.offsetWidth;
- R.handle = resizeHandle;
- R.leftCol = leftCol;
- R.leftColStartWidth = leftCol.offsetWidth;
- R.screenX = evt.screenX;
- });
- }
- }
-
- window.addEventListener('mousemove', (evt) => {
- if (R.tracking) {
- const delta = R.screenX - evt.screenX;
- const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH);
- setLeftColGridTemplate(R.parent, leftColWidth);
- }
- });
-
- window.addEventListener('mouseup', () => R.tracking = false);
-
- window.addEventListener('resize', () => {
- clearTimeout(resizeTimer);
-
- resizeTimer = setTimeout(() => {
- for (const leftCol of leftCols) {
- const parent = leftCol.parentElement;
-
- if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) {
- const oldParentWidth = R.parentWidth;
- const newParentWidth = parent.offsetWidth;
- const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]);
-
- const ratio = newParentWidth / oldParentWidth;
-
- const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH);
- setLeftColGridTemplate(parent, newWidthL);
-
- R.parentWidth = newParentWidth;
- }
- }
- }, DEBOUNCE_TIME);
- });
-
- setup();
-});
diff --git a/extensions-builtin/resize-handle/style.css b/extensions-builtin/resize-handle/style.css
deleted file mode 100644
index 0e18267a..00000000
--- a/extensions-builtin/resize-handle/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-.resize-handle{
- cursor: col-resize;
- grid-column: 2 / 3;
- min-width: 8px !important;
- max-width: 8px !important;
- height: 100%;
- border-left: 1px dashed var(--border-color-primary);
- user-select: none;
- margin-left: 8px;
-}
\ No newline at end of file
diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js
new file mode 100644
index 00000000..71023f9f
--- /dev/null
+++ b/javascript/resizeHandle.js
@@ -0,0 +1,109 @@
+(function() {
+ const GRADIO_MIN_WIDTH = 320;
+ const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr';
+ const PAD = 16;
+ const DEBOUNCE_TIME = 100;
+
+ const R = {
+ tracking: false,
+ parent: null,
+ parentWidth: null,
+ leftCol: null,
+ leftColStartWidth: null,
+ screenX: null,
+ };
+
+ let resizeTimer;
+ let parents = [];
+
+ function setLeftColGridTemplate(el, width) {
+ el.style.gridTemplateColumns = `${width}px 16px 1fr`;
+ }
+
+ function displayResizeHandle(parent) {
+ if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) {
+ parent.style.display = 'flex';
+ if (R.handle != null) {
+ R.handle.style.opacity = '0';
+ }
+ return false;
+ } else {
+ parent.style.display = 'grid';
+ if (R.handle != null) {
+ R.handle.style.opacity = '100';
+ }
+ return true;
+ }
+ }
+
+ function afterResize(parent) {
+ if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) {
+ const oldParentWidth = R.parentWidth;
+ const newParentWidth = parent.offsetWidth;
+ const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]);
+
+ const ratio = newParentWidth / oldParentWidth;
+
+ const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH);
+ setLeftColGridTemplate(parent, newWidthL);
+
+ R.parentWidth = newParentWidth;
+ }
+ }
+
+ function setup(parent) {
+ const leftCol = parent.firstElementChild;
+ const rightCol = parent.lastElementChild;
+
+ parents.push(parent);
+
+ parent.style.display = 'grid';
+ parent.style.gap = '0';
+ parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS;
+
+ const resizeHandle = document.createElement('div');
+ resizeHandle.classList.add('resize-handle');
+ parent.insertBefore(resizeHandle, rightCol);
+
+ resizeHandle.addEventListener('mousedown', (evt) => {
+ R.tracking = true;
+ R.parent = parent;
+ R.parentWidth = parent.offsetWidth;
+ R.handle = resizeHandle;
+ R.leftCol = leftCol;
+ R.leftColStartWidth = leftCol.offsetWidth;
+ R.screenX = evt.screenX;
+ });
+
+ afterResize(parent);
+ }
+
+ window.addEventListener('mousemove', (evt) => {
+ if (R.tracking) {
+ const delta = R.screenX - evt.screenX;
+ const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH);
+ setLeftColGridTemplate(R.parent, leftColWidth);
+ }
+ });
+
+ window.addEventListener('mouseup', () => R.tracking = false);
+
+
+ window.addEventListener('resize', () => {
+ clearTimeout(resizeTimer);
+
+ resizeTimer = setTimeout(function() {
+ for (const parent of parents) {
+ afterResize(parent);
+ }
+ }, DEBOUNCE_TIME);
+ });
+
+ setupResizeHandle = setup;
+})();
+
+onUiLoaded(function() {
+ for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) {
+ setupResizeHandle(elem);
+ }
+});
diff --git a/modules/ui.py b/modules/ui.py
index 01f77849..2b6a13cb 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -13,7 +13,7 @@ from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_grad
from modules import gradio_extensons # noqa: F401
from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks
-from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion
+from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow
from modules.paths import script_path
from modules.ui_common import create_refresh_button
from modules.ui_gradio_extensions import reload_javascript
@@ -333,7 +333,7 @@ def create_ui():
extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs")
extra_tabs.__enter__()
- with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row(equal_height=False):
+ with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False):
with gr.Column(variant='compact', elem_id="txt2img_settings"):
scripts.scripts_txt2img.prepare_ui()
@@ -549,7 +549,7 @@ def create_ui():
extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs")
extra_tabs.__enter__()
- with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow(equal_height=False):
+ with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False):
with gr.Column(variant='compact', elem_id="img2img_settings"):
copy_image_buttons = []
copy_image_destinations = {}
diff --git a/modules/ui_components.py b/modules/ui_components.py
index d08b2b99..55979f62 100644
--- a/modules/ui_components.py
+++ b/modules/ui_components.py
@@ -20,6 +20,18 @@ class ToolButton(FormComponent, gr.Button):
return "button"
+class ResizeHandleRow(gr.Row):
+ """Same as gr.Row but fits inside gradio forms"""
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ self.elem_classes.append("resize-handle-row")
+
+ def get_block_name(self):
+ return "row"
+
+
class FormRow(FormComponent, gr.Row):
"""Same as gr.Row but fits inside gradio forms"""
diff --git a/style.css b/style.css
index 46125864..4166a3df 100644
--- a/style.css
+++ b/style.css
@@ -1055,3 +1055,15 @@ div.accordions > div.input-accordion.input-accordion-open{
position: sticky;
top: 0.5em;
}
+
+
+.resize-handle{
+ cursor: col-resize;
+ grid-column: 2 / 3;
+ min-width: 8px !important;
+ max-width: 8px !important;
+ height: 100%;
+ border-left: 1px dashed var(--border-color-primary);
+ user-select: none;
+ margin-left: 8px;
+}
--
cgit v1.2.3
From 54fbdcf467254d21fa787384047c992eec788fba Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Tue, 22 Aug 2023 16:43:23 +0300
Subject: Improve integration, fix for new gradio
---
.../canvas-zoom-and-pan/javascript/zoom.js | 67 ++++++++++++++++++++--
extensions-builtin/canvas-zoom-and-pan/style.css | 3 +
2 files changed, 65 insertions(+), 5 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 72c8ba87..0e17bb15 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -15,6 +15,19 @@ onUiLoaded(async() => {
// Helper functions
// Get active tab
+
+ /**
+ * Waits for an element to be present in the DOM.
+ */
+ const waitForElement = (id) => new Promise(resolve => {
+ const checkForElement = () => {
+ const element = document.querySelector(id);
+ if (element) return resolve(element);
+ setTimeout(checkForElement, 100);
+ };
+ checkForElement();
+ });
+
function getActiveTab(elements, all = false) {
const tabs = elements.img2imgTabs.querySelectorAll("button");
@@ -393,9 +406,6 @@ onUiLoaded(async() => {
}
targetElement.style.width = "";
- if (canvas) {
- targetElement.style.height = canvas.style.height;
- }
}
// Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
@@ -558,7 +568,7 @@ onUiLoaded(async() => {
if (!canvas) return;
if (canvas.offsetWidth > 862) {
- targetElement.style.width = canvas.offsetWidth + "px";
+ targetElement.style.width = (canvas.offsetWidth + 2) + "px";
}
if (fullScreenMode) {
@@ -826,5 +836,52 @@ onUiLoaded(async() => {
applyZoomAndPan(elementIDs.inpaintSketch);
// Make the function global so that other extensions can take advantage of this solution
- window.applyZoomAndPan = applyZoomAndPan;
+ const applyZoomAndPanIntegration = async(id, elementIDs) => {
+ const mainEl = document.querySelector(id);
+ if (id.toLocaleLowerCase() === "none") {
+ for (const elementID of elementIDs) {
+ const el = await waitForElement(elementID);
+ if (!el) break;
+ applyZoomAndPan(elementID);
+ }
+ return;
+ }
+
+ if (!mainEl) return;
+ mainEl.addEventListener("click", async() => {
+ for (const elementID of elementIDs) {
+ const el = await waitForElement(elementID);
+ if (!el) break;
+ applyZoomAndPan(elementID);
+ }
+ }, {once: true});
+ };
+
+ window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image")
+
+ window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension
+
+ /*
+ The function `applyZoomAndPanIntegration` takes two arguments:
+
+ 1. `id`: A string identifier for the element to which zoom and pan functionality will be applied on click.
+ If the `id` value is "none", the functionality will be applied to all elements specified in the second argument without a click event.
+
+ 2. `elementIDs`: An array of string identifiers for elements. Zoom and pan functionality will be applied to each of these elements on click of the element specified by the first argument.
+ If "none" is specified in the first argument, the functionality will be applied to each of these elements without a click event.
+
+ Example usage:
+ applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]);
+ In this example, zoom and pan functionality will be applied to the element with the identifier "txt2img_controlnet_ControlNet_input_image" upon clicking the element with the identifier "txt2img_controlnet".
+ */
+
+ // More examples
+ // Add integration with ControlNet txt2img One TAB
+ // applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]);
+
+ // Add integration with ControlNet txt2img Tabs
+ // applyZoomAndPanIntegration("#txt2img_controlnet",Array.from({ length: 10 }, (_, i) => `#txt2img_controlnet_ControlNet-${i}_input_image`));
+
+ // Add integration with Inpaint Anything
+ // applyZoomAndPanIntegration("None", ["#ia_sam_image", "#ia_sel_mask"]);
});
diff --git a/extensions-builtin/canvas-zoom-and-pan/style.css b/extensions-builtin/canvas-zoom-and-pan/style.css
index 6bcc9570..5d8054e6 100644
--- a/extensions-builtin/canvas-zoom-and-pan/style.css
+++ b/extensions-builtin/canvas-zoom-and-pan/style.css
@@ -61,3 +61,6 @@
to {opacity: 1;}
}
+.styler {
+ overflow:inherit !important;
+}
\ No newline at end of file
--
cgit v1.2.3
From 8fd1558179913902bb43dd30f1270bb290f6ee83 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Wed, 23 Aug 2023 03:21:28 +0300
Subject: Removed the old code
---
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 0e17bb15..60057891 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -677,9 +677,8 @@ onUiLoaded(async() => {
targetElement.isExpanded = false;
function autoExpand() {
const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
- const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch;
-
- if (canvas && isMainTab) {
+
+ if (canvas) {
if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) {
targetElement.style.visibility = "hidden";
setTimeout(() => {
--
cgit v1.2.3
From 6a87e35bef2be9dabf17e1f58cfc5b4a218dd487 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Wed, 23 Aug 2023 03:35:09 +0300
Subject: lint
---
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 1 -
1 file changed, 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 60057891..71a81af1 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -677,7 +677,6 @@ onUiLoaded(async() => {
targetElement.isExpanded = false;
function autoExpand() {
const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
-
if (canvas) {
if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) {
targetElement.style.visibility = "hidden";
--
cgit v1.2.3
From 32e790a47e135461e9d95eb00ff7f53d5b4367a8 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Thu, 24 Aug 2023 01:40:06 +0300
Subject: Fixing and improving integration
---
.../canvas-zoom-and-pan/javascript/zoom.js | 58 +++++++++++++++++++---
1 file changed, 50 insertions(+), 8 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 71a81af1..ba10ff7b 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -48,7 +48,7 @@ onUiLoaded(async() => {
// Wait until opts loaded
async function waitForOpts() {
- for (;;) {
+ for (; ;) {
if (window.opts && Object.keys(window.opts).length) {
return window.opts;
}
@@ -269,7 +269,7 @@ onUiLoaded(async() => {
input?.addEventListener("input", () => restoreImgRedMask(elements));
}
- function applyZoomAndPan(elemId) {
+ function applyZoomAndPan(elemId, isExtension = true) {
const targetElement = gradioApp().querySelector(elemId);
if (!targetElement) {
@@ -381,6 +381,10 @@ onUiLoaded(async() => {
panY: 0
};
+ if (isExtension) {
+ targetElement.style.overflow = "hidden";
+ }
+
fixCanvas();
targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
@@ -396,11 +400,26 @@ onUiLoaded(async() => {
closeBtn.addEventListener("click", resetZoom);
}
+ if (canvas && isExtension) {
+ const parentElement = targetElement.closest('[id^="component-"]');
+ if (
+ canvas &&
+ parseFloat(canvas.style.width) > parentElement.offsetWidth &&
+ parseFloat(targetElement.style.width) > parentElement.offsetWidth
+ ) {
+ fitToElement();
+ return;
+ }
+
+ }
+
if (
canvas &&
+ !isExtension &&
parseFloat(canvas.style.width) > 865 &&
parseFloat(targetElement.style.width) > 865
) {
+ console.log(parseFloat(canvas.style.width), parseFloat(targetElement.style.width));
fitToElement();
return;
}
@@ -472,6 +491,10 @@ onUiLoaded(async() => {
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
toggleOverlap("on");
+ if (isExtension) {
+ targetElement.style.overflow = "visible";
+ }
+
return newZoomLevel;
}
@@ -494,7 +517,7 @@ onUiLoaded(async() => {
fullScreenMode = false;
elemData[elemId].zoomLevel = updateZoom(
elemData[elemId].zoomLevel +
- (operation === "+" ? delta : -delta),
+ (operation === "+" ? delta : -delta),
zoomPosX - targetElement.getBoundingClientRect().left,
zoomPosY - targetElement.getBoundingClientRect().top
);
@@ -511,10 +534,19 @@ onUiLoaded(async() => {
//Reset Zoom
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+ let parentElement;
+
+ if (isExtension) {
+ parentElement = targetElement.closest('[id^="component-"]');
+ } else {
+ parentElement = targetElement.parentElement;
+ }
+
+
// Get element and screen dimensions
const elementWidth = targetElement.offsetWidth;
const elementHeight = targetElement.offsetHeight;
- const parentElement = targetElement.parentElement;
+
const screenWidth = parentElement.clientWidth;
const screenHeight = parentElement.clientHeight;
@@ -565,9 +597,14 @@ onUiLoaded(async() => {
`${elemId} canvas[key="interface"]`
);
+ if (isExtension) {
+ targetElement.style.overflow = "visible";
+ }
+
+
if (!canvas) return;
- if (canvas.offsetWidth > 862) {
+ if (canvas.offsetWidth > 862 || isExtension) {
targetElement.style.width = (canvas.offsetWidth + 2) + "px";
}
@@ -816,6 +853,11 @@ onUiLoaded(async() => {
if (isMoving && elemId === activeElement) {
updatePanPosition(e.movementX, e.movementY);
targetElement.style.pointerEvents = "none";
+
+ if (isExtension) {
+ targetElement.style.overflow = "visible";
+ }
+
} else {
targetElement.style.pointerEvents = "auto";
}
@@ -829,9 +871,9 @@ onUiLoaded(async() => {
gradioApp().addEventListener("mousemove", handleMoveByKey);
}
- applyZoomAndPan(elementIDs.sketch);
- applyZoomAndPan(elementIDs.inpaint);
- applyZoomAndPan(elementIDs.inpaintSketch);
+ applyZoomAndPan(elementIDs.sketch, false);
+ applyZoomAndPan(elementIDs.inpaint, false);
+ applyZoomAndPan(elementIDs.inpaintSketch, false);
// Make the function global so that other extensions can take advantage of this solution
const applyZoomAndPanIntegration = async(id, elementIDs) => {
--
cgit v1.2.3
From fa68d66c982436c066487b3bb82b3a2e6c329fe4 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Thu, 24 Aug 2023 01:42:37 +0300
Subject: remove console.log
---
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 1 -
1 file changed, 1 deletion(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index ba10ff7b..23423891 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -419,7 +419,6 @@ onUiLoaded(async() => {
parseFloat(canvas.style.width) > 865 &&
parseFloat(targetElement.style.width) > 865
) {
- console.log(parseFloat(canvas.style.width), parseFloat(targetElement.style.width));
fitToElement();
return;
}
--
cgit v1.2.3
From c39efa6ba643cb8f13444197933fc65db46dfe71 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Thu, 24 Aug 2023 17:30:35 +0300
Subject: Zoom and Pan: Resize handler
---
.../canvas-zoom-and-pan/javascript/zoom.js | 48 +++++++++++++++++++---
1 file changed, 42 insertions(+), 6 deletions(-)
(limited to 'extensions-builtin')
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 23423891..45c7600a 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -385,6 +385,8 @@ onUiLoaded(async() => {
targetElement.style.overflow = "hidden";
}
+ targetElement.isZoomed = false;
+
fixCanvas();
targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
@@ -479,7 +481,7 @@ onUiLoaded(async() => {
// Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
function updateZoom(newZoomLevel, mouseX, mouseY) {
- newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15));
+ newZoomLevel = Math.max(0.1, Math.min(newZoomLevel, 15));
elemData[elemId].panX +=
mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
@@ -520,6 +522,8 @@ onUiLoaded(async() => {
zoomPosX - targetElement.getBoundingClientRect().left,
zoomPosY - targetElement.getBoundingClientRect().top
);
+
+ targetElement.isZoomed = true;
}
}
@@ -596,17 +600,16 @@ onUiLoaded(async() => {
`${elemId} canvas[key="interface"]`
);
- if (isExtension) {
- targetElement.style.overflow = "visible";
- }
-
-
if (!canvas) return;
if (canvas.offsetWidth > 862 || isExtension) {
targetElement.style.width = (canvas.offsetWidth + 2) + "px";
}
+ if (isExtension) {
+ targetElement.style.overflow = "visible";
+ }
+
if (fullScreenMode) {
resetZoom();
fullScreenMode = false;
@@ -867,7 +870,40 @@ onUiLoaded(async() => {
isMoving = false;
};
+ // Checks for extension
+ function checkForOutBox() {
+ const parentElement = targetElement.closest('[id^="component-"]');
+ if (parentElement.offsetWidth < targetElement.offsetWidth && !targetElement.isExpanded) {
+ resetZoom();
+ targetElement.isExpanded = true;
+ }
+
+ if (parentElement.offsetWidth < targetElement.offsetWidth && elemData[elemId].zoomLevel == 1) {
+ resetZoom();
+ }
+
+ if (parentElement.offsetWidth < targetElement.offsetWidth && targetElement.offsetWidth * elemData[elemId].zoomLevel > parentElement.offsetWidth && elemData[elemId].zoomLevel < 1 && !targetElement.isZoomed) {
+ resetZoom();
+ }
+ }
+
+ if (isExtension) {
+ targetElement.addEventListener("mousemove", checkForOutBox);
+ }
+
+
+ window.addEventListener('resize', (e) => {
+ resetZoom();
+
+ if (isExtension) {
+ targetElement.isExpanded = false;
+ targetElement.isZoomed = false;
+ }
+ });
+
gradioApp().addEventListener("mousemove", handleMoveByKey);
+
+
}
applyZoomAndPan(elementIDs.sketch, false);
--
cgit v1.2.3