From 4130e5db3d5c2fa7cbfe9e09e5eadba1ed958ab0 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 25 Aug 2023 10:12:19 +0900 Subject: img2img batch PNG info model hash --- modules/img2img.py | 12 +++++++++++- modules/ui.py | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 1519e132..c81c7ab9 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -10,6 +10,7 @@ from modules import images as imgutil from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state +from modules.sd_models import get_closet_checkpoint_match import modules.shared as shared import modules.processing as processing from modules.ui import plaintext_to_html @@ -41,7 +42,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal cfg_scale = p.cfg_scale sampler_name = p.sampler_name steps = p.steps - + override_settings = p.override_settings + sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None)) for i, image in enumerate(images): state.job = f"{i+1} out of {len(images)}" if state.skipped: @@ -104,6 +106,14 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal p.sampler_name = parsed_parameters.get("Sampler", sampler_name) p.steps = int(parsed_parameters.get("Steps", steps)) + model_info = get_closet_checkpoint_match(parsed_parameters.get("Model hash", None)) + if model_info is not None: + p.override_settings['sd_model_checkpoint'] = model_info.name + elif sd_model_checkpoint_override: + p.override_settings['sd_model_checkpoint'] = sd_model_checkpoint_override + else: + p.override_settings.pop("sd_model_checkpoint", None) + proc = modules.scripts.scripts_img2img.run(p, *args) if proc is None: if output_dir: diff --git a/modules/ui.py b/modules/ui.py index 2b6a13cb..9c5082c3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -614,7 +614,7 @@ def create_ui(): with gr.Accordion("PNG info", open=False): img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] -- cgit v1.2.3 From 3369fb27df6c1badd39bcb59b3f71c61a47d3d91 Mon Sep 17 00:00:00 2001 From: SpenserCai Date: Fri, 25 Aug 2023 22:15:35 +0800 Subject: support installed extensions list api --- modules/api/api.py | 20 ++++++++++++++++++++ modules/api/models.py | 9 +++++++++ 2 files changed, 29 insertions(+) diff --git a/modules/api/api.py b/modules/api/api.py index e6edffe7..0bcf5497 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -243,6 +243,7 @@ class Api: self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) + self.add_api_route("/sdapi/v1/extensions", self.get_extensions_list, methods=["GET"], response_model=List[models.ExtensionItem]) if shared.cmd_opts.api_server_stop: self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"]) @@ -769,6 +770,25 @@ class Api: except Exception as err: cuda = {'error': f'{err}'} return models.MemoryResponse(ram=ram, cuda=cuda) + + def get_extensions_list(self): + from modules import extensions + extensions.list_extensions() + ext_list = [] + for ext in extensions.extensions: + ext: extensions.Extension + ext.read_info_from_repo() + if ext.remote is not None: + ext_list.append({ + "name": ext.name, + "remote": ext.remote, + "branch": ext.branch, + "commit_hash":ext.commit_hash, + "commit_date":ext.commit_date, + "version":ext.version, + "enabled":ext.enabled + }) + return ext_list def launch(self, server_name, port, root_path): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 6a574771..731ab03d 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -311,3 +311,12 @@ class ScriptInfo(BaseModel): is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script") is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script") args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments") + +class ExtensionItem(BaseModel): + name: str = Field(title="Name", description="Extension name") + remote: str = Field(title="Remote", description="Extension Repository URL") + branch: str = Field(title="Branch", description="Extension Repository Branch") + commit_hash: str = Field(title="Commit Hash", description="Extension Repository Commit Hash") + version: str = Field(title="Version", description="Extension Version") + commit_date: str = Field(title="Commit Date", description="Extension Repository Commit Date") + enabled: bool = Field(title="Enabled", description="Flag specifying whether this extension is enabled") -- cgit v1.2.3 From dd07b5193efa547929629b310ef5c9ff0fc83a19 Mon Sep 17 00:00:00 2001 From: SpenserCai Date: Fri, 25 Aug 2023 22:23:17 +0800 Subject: fix format error --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 0bcf5497..785ee828 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -770,7 +770,7 @@ class Api: except Exception as err: cuda = {'error': f'{err}'} return models.MemoryResponse(ram=ram, cuda=cuda) - + def get_extensions_list(self): from modules import extensions extensions.list_extensions() -- cgit v1.2.3 From db56bdce33264aab5e6b565a41df503d785bbbff Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 25 Aug 2023 16:04:06 -0400 Subject: Don't show hidden samplers in dropdown for XYZ script --- scripts/xyz_grid.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index daaf761f..517d6332 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -238,9 +238,9 @@ axis_options = [ AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")), AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), - AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), - AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), - AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), + AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers if x.name not in opts.hide_samplers]), + AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img if x.name not in opts.hide_samplers]), + AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img if x.name not in opts.hide_samplers]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), AxisOption("Sigma Churn", float, apply_field("s_churn")), -- cgit v1.2.3 From bb90b0ff42ea55cbc73df15ea1ef8fd79af2e026 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 26 Aug 2023 06:33:48 +0300 Subject: fix defaults settings page breaking when any of main UI tabs are hidden --- modules/ui.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 9c5082c3..f4028475 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1279,11 +1279,8 @@ def create_ui(): with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): interface.render() - for interface, _label, ifid in interfaces: - if ifid in ["extensions", "settings"]: - continue - - loadsave.add_block(interface, ifid) + if ifid not in ["extensions", "settings"]: + loadsave.add_block(interface, ifid) loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) -- cgit v1.2.3 From 72ee347eabf04d1a238a738a03e7973cc2a46ca3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 26 Aug 2023 06:52:18 +0300 Subject: update pnginfo checkpoint to return dict with parsed values --- modules/api/api.py | 10 ++++------ modules/api/models.py | 3 ++- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 785ee828..844e31ee 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -17,7 +17,7 @@ from fastapi.encoders import jsonable_encoder from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, generation_parameters_copypaste from modules.api import models from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images @@ -474,9 +474,6 @@ class Api: return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) def pnginfoapi(self, req: models.PNGInfoRequest): - if(not req.image.strip()): - return models.PNGInfoResponse(info="") - image = decode_base64_to_image(req.image.strip()) if image is None: return models.PNGInfoResponse(info="") @@ -485,9 +482,10 @@ class Api: if geninfo is None: geninfo = "" - items = {**{'parameters': geninfo}, **items} + params = generation_parameters_copypaste.parse_generation_parameters(geninfo) + script_callbacks.infotext_pasted_callback(geninfo, params) - return models.PNGInfoResponse(info=geninfo, items=items) + return models.PNGInfoResponse(info=geninfo, items=items, parameters=params) def progressapi(self, req: models.ProgressRequest = Depends()): # copy from check_progress_call of ui.py diff --git a/modules/api/models.py b/modules/api/models.py index 731ab03d..94eca97d 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -178,7 +178,8 @@ class PNGInfoRequest(BaseModel): class PNGInfoResponse(BaseModel): info: str = Field(title="Image info", description="A string with the parameters used to generate the image") - items: dict = Field(title="Items", description="An object containing all the info the image had") + items: dict = Field(title="Items", description="A dictionary containing all the other fields the image had") + parameters: dict = Field(title="Parameters", description="A dictionary with parsed generation info fields") class ProgressRequest(BaseModel): skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization") -- cgit v1.2.3 From ec54257cb21bacd6281a5f9c6f74c2529fe446c5 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 26 Aug 2023 07:00:09 -0400 Subject: Hide broken image crop tool for now --- style.css | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/style.css b/style.css index d67b6336..5090f289 100644 --- a/style.css +++ b/style.css @@ -2,6 +2,14 @@ @import url('https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap'); +/* temporary fix to hide gradio crop tool until it's fixed https://github.com/gradio-app/gradio/issues/3810 */ + + +div.gradio-image button[aria-label="Edit"] { + display: none; +} + + /* general gradio fixes */ :root, .dark{ -- cgit v1.2.3 From 73f69a74534be17c020fd1a5e64dfce71981fc31 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 26 Aug 2023 07:04:11 -0400 Subject: Fix CSS whitespace --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index 5090f289..e336e79d 100644 --- a/style.css +++ b/style.css @@ -2,8 +2,8 @@ @import url('https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap'); -/* temporary fix to hide gradio crop tool until it's fixed https://github.com/gradio-app/gradio/issues/3810 */ +/* temporary fix to hide gradio crop tool until it's fixed https://github.com/gradio-app/gradio/issues/3810 */ div.gradio-image button[aria-label="Edit"] { display: none; -- cgit v1.2.3 From 168eac319d0f45c778d5b9d35dd5ce280f8d5094 Mon Sep 17 00:00:00 2001 From: Daniel Dengler Date: Sat, 26 Aug 2023 23:22:57 +0200 Subject: is_automatic is missing () for call --- modules/sd_vae.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 669097da..31306d8b 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -159,7 +159,7 @@ def resolve_vae_from_user_metadata(checkpoint_file) -> VaeResolution: def resolve_vae_near_checkpoint(checkpoint_file) -> VaeResolution: vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file) - if vae_near_checkpoint is not None and (not shared.opts.sd_vae_overrides_per_model_preferences or is_automatic): + if vae_near_checkpoint is not None and (not shared.opts.sd_vae_overrides_per_model_preferences or is_automatic()): return VaeResolution(vae_near_checkpoint, 'found near the checkpoint') return VaeResolution(resolved=False) -- cgit v1.2.3 From 9d8d279d0d4d103e1b7d0bad21a3eb835dbab9aa Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 26 Aug 2023 15:56:17 -0400 Subject: Prevent duplicate resize handler --- javascript/resizeHandle.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 2fd3c4d2..8c5c5169 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -134,6 +134,8 @@ onUiLoaded(function() { for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) { - setupResizeHandle(elem); + if (!elem.querySelector('.resize-handle')) { + setupResizeHandle(elem); + } } }); -- cgit v1.2.3 From b7f0e815624dab182aff406c8f227b39ec17452f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 27 Aug 2023 08:41:26 +0300 Subject: fix error that causes some extra networks to be disabled if both and are present in the prompt --- modules/extra_networks.py | 58 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 16 deletions(-) diff --git a/modules/extra_networks.py b/modules/extra_networks.py index fa28ac75..b9533677 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -1,6 +1,7 @@ import json import os import re +import logging from collections import defaultdict from modules import errors @@ -86,27 +87,55 @@ class ExtraNetwork: raise NotImplementedError -def activate(p, extra_network_data): - """call activate for extra networks in extra_network_data in specified order, then call - activate for all remaining registered networks with an empty argument list""" +def lookup_extra_networks(extra_network_data): + """returns a dict mapping ExtraNetwork objects to lists of arguments for those extra networks. - activated = [] + Example input: + { + 'lora': [], + 'lyco': [], + 'hypernet': [] + } + + Example output: + + { + : [, ], + : [] + } + """ - for extra_network_name, extra_network_args in extra_network_data.items(): + res = {} + + for extra_network_name, extra_network_args in list(extra_network_data.items()): extra_network = extra_network_registry.get(extra_network_name, None) + alias = extra_network_aliases.get(extra_network_name, None) - if extra_network is None: - extra_network = extra_network_aliases.get(extra_network_name, None) + if alias is not None and extra_network is None: + extra_network = alias if extra_network is None: - print(f"Skipping unknown extra network: {extra_network_name}") + logging.info(f"Skipping unknown extra network: {extra_network_name}") continue + res.setdefault(extra_network, []).extend(extra_network_args) + + return res + + +def activate(p, extra_network_data): + """call activate for extra networks in extra_network_data in specified order, then call + activate for all remaining registered networks with an empty argument list""" + + activated = [] + + for extra_network, extra_network_args in lookup_extra_networks(extra_network_data).items(): + try: extra_network.activate(p, extra_network_args) activated.append(extra_network) except Exception as e: - errors.display(e, f"activating extra network {extra_network_name} with arguments {extra_network_args}") + errors.display(e, f"activating extra network {extra_network.name} with arguments {extra_network_args}") for extra_network_name, extra_network in extra_network_registry.items(): if extra_network in activated: @@ -125,19 +154,16 @@ def deactivate(p, extra_network_data): """call deactivate for extra networks in extra_network_data in specified order, then call deactivate for all remaining registered networks""" - for extra_network_name in extra_network_data: - extra_network = extra_network_registry.get(extra_network_name, None) - if extra_network is None: - continue + data = lookup_extra_networks(extra_network_data) + for extra_network in data: try: extra_network.deactivate(p) except Exception as e: - errors.display(e, f"deactivating extra network {extra_network_name}") + errors.display(e, f"deactivating extra network {extra_network.name}") for extra_network_name, extra_network in extra_network_registry.items(): - args = extra_network_data.get(extra_network_name, None) - if args is not None: + if extra_network in data: continue try: -- cgit v1.2.3 From cb5f0823c6f7fadb5eb81b93e5a587a11856b478 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 27 Aug 2023 08:45:16 +0300 Subject: update gradio to 3.41.2 --- modules/errors.py | 2 +- requirements.txt | 2 +- requirements_versions.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/errors.py b/modules/errors.py index a56fd30c..8c339464 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -95,7 +95,7 @@ def check_versions(): expected_torch_version = "2.0.0" expected_xformers_version = "0.0.20" - expected_gradio_version = "3.41.0" + expected_gradio_version = "3.41.2" if version.parse(torch.__version__) < version.parse(expected_torch_version): print_error_explanation(f""" diff --git a/requirements.txt b/requirements.txt index 960fa0bd..80b43845 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ clean-fid einops fastapi>=0.90.1 gfpgan -gradio==3.41.0 +gradio==3.41.2 inflection jsonmerge kornia diff --git a/requirements_versions.txt b/requirements_versions.txt index 6c679e24..f8ae1f38 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -7,7 +7,7 @@ clean-fid==0.1.35 einops==0.4.1 fastapi==0.94.0 gfpgan==1.3.8 -gradio==3.41.0 +gradio==3.41.2 httpcore==0.15 inflection==0.5.1 jsonmerge==1.8.0 -- cgit v1.2.3 From f2c55523c0cde3dca5cc154c45046316396b14a6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 27 Aug 2023 08:45:25 +0300 Subject: update changelog --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c5e0f11..5e78b3d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,7 +67,7 @@ * make it possible to localize tooltips and placeholders ### Extensions and API: - * gradio 3.41.0 + * gradio 3.41.2 * also bump versions for packages: transformers, GitPython, accelerate, scikit-image, timm, tomesd * support tooltip kwarg for gradio elements: gr.Textbox(label='hello', tooltip='world') * properly clear the total console progressbar when using txt2img and img2img from API @@ -127,6 +127,9 @@ * set devices.dtype_unet correctly * run RealESRGAN on GPU for non-CUDA devices ([#12737](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737)) * prevent extra network buttons being obscured by description for very small card sizes ([#12745](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12745)) + * fix error that causes some extra networks to be disabled if both and are present in the prompt + * fix defaults settings page breaking when any of main UI tabs are hidden + * fix incorrect save/display of new values in Defaults page in settings ## 1.5.2 -- cgit v1.2.3 From bd5c16e8da5837b2b08fe6e329694553dd688a5f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 27 Aug 2023 09:19:02 +0300 Subject: fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working --- CHANGELOG.md | 1 + modules/generation_parameters_copypaste.py | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e78b3d2..1bbde234 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -130,6 +130,7 @@ * fix error that causes some extra networks to be disabled if both and are present in the prompt * fix defaults settings page breaking when any of main UI tabs are hidden * fix incorrect save/display of new values in Defaults page in settings + * fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working ## 1.5.2 diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 386517ac..2ca16055 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -32,6 +32,7 @@ class ParamBinding: def reset(): paste_fields.clear() + registered_param_bindings.clear() def quote(text): -- cgit v1.2.3 From 23c6b5f1242f89c37a094d7a9237491c1ca1da34 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 27 Aug 2023 09:39:37 +0300 Subject: fix style editing dialog breaking if it's opened in both img2img and txt2img tabs --- javascript/extraNetworks.js | 9 +++++++++ modules/ui_common.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 3bc723d3..ad1a4e00 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -249,6 +249,15 @@ function popup(contents) { globalPopup.style.display = "flex"; } +var storedPopupIds = {}; +function popupId(id) { + if(! storedPopupIds[id]){ + storedPopupIds[id] = gradioApp().getElementById(id); + } + + popup(storedPopupIds[id]); +} + function extraNetworksShowMetadata(text) { var elem = document.createElement('pre'); elem.classList.add('popup-metadata'); diff --git a/modules/ui_common.py b/modules/ui_common.py index eddc4bc8..84a7d7f2 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -261,7 +261,7 @@ def setup_dialog(button_show, dialog, *, button_close=None): fn=lambda: gr.update(visible=True), inputs=[], outputs=[dialog], - ).then(fn=None, _js="function(){ popup(gradioApp().getElementById('" + dialog.elem_id + "')); }") + ).then(fn=None, _js="function(){ popupId('" + dialog.elem_id + "'); }") if button_close: button_close.click(fn=None, _js="closePopup") -- cgit v1.2.3 From 897312de46352c39d03b6811844c128426fbae80 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 27 Aug 2023 09:44:13 +0300 Subject: update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bbde234..07798b5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -131,6 +131,10 @@ * fix defaults settings page breaking when any of main UI tabs are hidden * fix incorrect save/display of new values in Defaults page in settings * fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working + * fix an error that prevents VAE being reloaded after an option change if a VAE near the checkpoint exists ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737)) + * hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737)) + * don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737)) + * fix style editing dialog breaking if it's opened in both img2img and txt2img tabs ## 1.5.2 -- cgit v1.2.3 From 63d3150dc4f5c4452a4a385329eb8954f53d6451 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 27 Aug 2023 10:11:14 +0300 Subject: lint --- javascript/extraNetworks.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index ad1a4e00..493f31af 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -251,7 +251,7 @@ function popup(contents) { var storedPopupIds = {}; function popupId(id) { - if(! storedPopupIds[id]){ + if (!storedPopupIds[id]) { storedPopupIds[id] = gradioApp().getElementById(id); } -- cgit v1.2.3 From 896fde789ee69bd5dd2a829def0878793aa28079 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 27 Aug 2023 20:16:50 +0300 Subject: hide --gradio-auth and --api-auth values from /internal/sysinfo report --- modules/sysinfo.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/modules/sysinfo.py b/modules/sysinfo.py index 058e66ce..2db7551d 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -82,7 +82,7 @@ def get_dict(): "Data path": paths_internal.data_path, "Extensions dir": paths_internal.extensions_dir, "Checksum": checksum_token, - "Commandline": sys.argv, + "Commandline": get_argv(), "Torch env info": get_torch_sysinfo(), "Exceptions": get_exceptions(), "CPU": { @@ -123,6 +123,22 @@ def get_environment(): return {k: os.environ[k] for k in sorted(os.environ) if k in environment_whitelist} +def get_argv(): + res = [] + + for v in sys.argv: + if shared.cmd_opts.gradio_auth and shared.cmd_opts.gradio_auth == v: + res.append("") + continue + + if shared.cmd_opts.api_auth and shared.cmd_opts.api_auth == v: + res.append("") + continue + + res.append(v) + + return res + re_newline = re.compile(r"\r*\n") -- cgit v1.2.3 From e422f19ee9f78066ec023d6b3b0a307a677c6ad9 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 28 Aug 2023 03:27:07 +0900 Subject: non-local condition --- modules/shared_cmd_options.py | 2 +- webui.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/shared_cmd_options.py b/modules/shared_cmd_options.py index af24938b..dd93f520 100644 --- a/modules/shared_cmd_options.py +++ b/modules/shared_cmd_options.py @@ -15,4 +15,4 @@ else: cmd_opts, _ = parser.parse_known_args() -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access +cmd_opts.disable_extension_access = any([cmd_opts.share, cmd_opts.listen, cmd_opts.ngrok, cmd_opts.server_name]) and not cmd_opts.enable_insecure_extension_access diff --git a/webui.py b/webui.py index 5c827dae..12328423 100644 --- a/webui.py +++ b/webui.py @@ -74,7 +74,7 @@ def webui(): if shared.opts.auto_launch_browser == "Remote" or cmd_opts.autolaunch: auto_launch_browser = True elif shared.opts.auto_launch_browser == "Local": - auto_launch_browser = not any([cmd_opts.listen, cmd_opts.share, cmd_opts.ngrok]) + auto_launch_browser = not any([cmd_opts.listen, cmd_opts.share, cmd_opts.ngrok, cmd_opts.server_name]) app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, -- cgit v1.2.3 From 18e3e6d6abfc084324cc8ae13f70ba4af5ddc35f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 28 Aug 2023 03:42:02 +0900 Subject: consolidate local check --- modules/shared_cmd_options.py | 4 ++-- webui.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/shared_cmd_options.py b/modules/shared_cmd_options.py index dd93f520..c9626667 100644 --- a/modules/shared_cmd_options.py +++ b/modules/shared_cmd_options.py @@ -14,5 +14,5 @@ if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: else: cmd_opts, _ = parser.parse_known_args() - -cmd_opts.disable_extension_access = any([cmd_opts.share, cmd_opts.listen, cmd_opts.ngrok, cmd_opts.server_name]) and not cmd_opts.enable_insecure_extension_access +cmd_opts.webui_is_non_local = any([cmd_opts.share, cmd_opts.listen, cmd_opts.ngrok, cmd_opts.server_name]) +cmd_opts.disable_extension_access = cmd_opts.webui_is_non_local and not cmd_opts.enable_insecure_extension_access diff --git a/webui.py b/webui.py index 12328423..9ed20b30 100644 --- a/webui.py +++ b/webui.py @@ -74,7 +74,7 @@ def webui(): if shared.opts.auto_launch_browser == "Remote" or cmd_opts.autolaunch: auto_launch_browser = True elif shared.opts.auto_launch_browser == "Local": - auto_launch_browser = not any([cmd_opts.listen, cmd_opts.share, cmd_opts.ngrok, cmd_opts.server_name]) + auto_launch_browser = not cmd_opts.webui_is_non_local app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, -- cgit v1.2.3 From 2b8484a29d7d1dbfd69d97616e4617cb02006192 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 27 Aug 2023 16:25:26 -0400 Subject: Add missing infotext for RNG --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 83f56314..0f054f47 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -144,7 +144,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), + "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}, infotext="RNG").info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), "tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"), })) -- cgit v1.2.3 From f3d1631aab82d559294126a9230c979ef4c4e1d6 Mon Sep 17 00:00:00 2001 From: JaredTherriault Date: Sun, 27 Aug 2023 21:54:05 -0700 Subject: Offloading custom work -custom_statics works to do mass replace strings, intended for copy-pasting gen info from internet generations and replacing unsavory prompts with safer prompts for my own sanity -tried to implement this into generation_parameters_copypaste but it didn't work out this iteration, presumably because we return a string and the calling method is looking for an object type -updated webui-user.bat to set a custom temp directory (for disk space concerns) and to apply xformers (for generation speed) I probably won't be merging any of this work into the main repo since I don't want to mess with anyone else's prompts, this is just intended to keep my workspace safe from anything I don't want to see. Eventually this should be done in an extension which I could then publish, but I need to learn a lot more about the extension and callback systems in the main repo first. just uploading this to my fork for now so i don't lose the current progress. --- modules/custom_statics.py | 29 +++++++++++++++++++++++++++++ modules/generation_parameters_copypaste.py | 8 ++++++++ webui-user.bat | 4 ++-- 3 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 modules/custom_statics.py diff --git a/modules/custom_statics.py b/modules/custom_statics.py new file mode 100644 index 00000000..207bd5fb --- /dev/null +++ b/modules/custom_statics.py @@ -0,0 +1,29 @@ +import os +import gc +import re + +import modules.paths as paths + +class CustomStatics: + + @staticmethod + # loads a file with strings structured as below, on each line with a : between the search and replace strings, into a list + # search0:replace0 + # search string:replace string + # + # Then replaces all occurrences of the list's search strings with the list's replace strings in one go + def mass_replace_strings(input_string): + with open(os.path.join(paths.data_path, "custom_statics/Replacements.txt"), "r", encoding="utf8") as file: + replacements = file.readlines() + + replacement_dict = {} + for line in replacements: + search, replace = line.strip().split(":") + replacement_dict[search] = replace + + def replace(match_text): + return replacement_dict[match_text.group(0)] + + return re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in replacement_dict.keys()), replace, str(input_string)) + + return str(geninfo) \ No newline at end of file diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a3448be9..bd7b0018 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -370,6 +370,14 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, prompt = file.read() params = parse_generation_parameters(prompt) + + # This sanitizes unsavory prompt words when copying from another image + # for my own sanity. This is not intended to be contributed to the main repo, + # it's just so I don't have to see anything I'm not interested in when batch + # reproducing images from civit.ai or elsewhere when working on loras + # todo: make this work with the callback instead of forcing it here, this can be an extension when I feel like putting it together :D + from modules import custom_statics + params = custom_statics.CustomStatics.mass_replace_strings(params) script_callbacks.infotext_pasted_callback(prompt, params) res = [] diff --git a/webui-user.bat b/webui-user.bat index e5a257be..1ba2116d 100644 --- a/webui-user.bat +++ b/webui-user.bat @@ -1,8 +1,8 @@ @echo off - +set TEMP=G:\SD-temp set PYTHON= set GIT= set VENV_DIR= -set COMMANDLINE_ARGS= +set COMMANDLINE_ARGS= --xformers call webui.bat -- cgit v1.2.3 From f898833ea38718e87b39ab090b2a2325638559cb Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 28 Aug 2023 10:43:13 +0200 Subject: fix typos --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b796d150..007d6fde 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ A browser interface based on Gradio library for Stable Diffusion. - [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions - Now without any bad letters! - Load checkpoints in safetensors format -- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64 +- Eased resolution restriction: generated image's dimensions must be a multiple of 8 rather than 64 - Now with a license! - Reorder elements in the UI from settings screen @@ -100,7 +100,7 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) ### Installation on Windows 10/11 with NVidia-GPUs using release package -1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract it's contents. +1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents. 2. Run `update.bat`. 3. Run `run.bat`. > For more details see [Install-and-Run-on-NVidia-GPUs](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) -- cgit v1.2.3 From 99acbd5ebe3e43e7d07905c7fc274b321cc905be Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 28 Aug 2023 11:17:47 -0400 Subject: Don't print blank stdout in extension installers --- modules/launch_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 7e4d5a61..43b986c6 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -228,7 +228,9 @@ def run_extension_installer(extension_dir): env = os.environ.copy() env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}" - print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) + stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env) + if stdout: + print(stdout) except Exception as e: errors.report(str(e)) -- cgit v1.2.3 From 20df81b0cc146c117c8a8c002997941063b32f13 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 28 Aug 2023 11:26:50 -0400 Subject: Honor `--skip-install` for extension installers --- modules/launch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 7e4d5a61..27d7d617 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -404,7 +404,8 @@ def prepare_environment(): run_pip(f"install -r \"{requirements_file}\"", "requirements") startup_timer.record("install requirements") - run_extensions_installers(settings_file=args.ui_settings_file) + if not args.skip_install: + run_extensions_installers(settings_file=args.ui_settings_file) if args.update_check: version_check(commit) -- cgit v1.2.3 From 592b0dcfa705c42654eff48a40a51d8d6924f987 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 28 Aug 2023 12:09:37 -0400 Subject: Fix notification not playing when built-in webui tab is inactive --- javascript/notification.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/notification.js b/javascript/notification.js index 76c5715d..6d799561 100644 --- a/javascript/notification.js +++ b/javascript/notification.js @@ -15,7 +15,7 @@ onAfterUiUpdate(function() { } } - const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"][style*="display: block"] div[id$="_results"] .thumbnail-item > img'); + const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"] div[id$="_results"] .thumbnail-item > img'); if (galleryPreviews == null) return; -- cgit v1.2.3 From cd48308a2a37b1e838b1b0cc5e8e507a174b14fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 28 Aug 2023 22:22:35 +0300 Subject: always show NV as RNG source in infotext --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 7dc931ba..0138e5ac 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -689,7 +689,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio, "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), - "RNG": opts.randn_source if opts.randn_source != "GPU" and opts.randn_source != "NV" else None, + "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, "Tiling": "True" if p.tiling else None, **p.extra_generation_params, -- cgit v1.2.3 From 739686b1c521a77b0d3ce43c1f3b123df0317504 Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 29 Aug 2023 06:19:22 +0800 Subject: style: file-metadata word-break --- style.css | 2 ++ 1 file changed, 2 insertions(+) diff --git a/style.css b/style.css index e336e79d..bbfb7d39 100644 --- a/style.css +++ b/style.css @@ -1009,6 +1009,8 @@ div.block.gradio-box.edit-user-metadata { .edit-user-metadata .file-metadata th, .edit-user-metadata .file-metadata td{ padding: 0.3em 1em; + overflow-wrap: anywhere; + word-break: break-word; } .edit-user-metadata .wrap.translucent{ -- cgit v1.2.3 From 1bb21f35102326da28e1360750a879386c015716 Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 29 Aug 2023 06:25:16 +0800 Subject: feat: display file metadata path https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/12289 --- modules/ui_extra_networks_user_metadata.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index b11622a1..877d0285 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -2,10 +2,29 @@ import datetime import html import json import os.path +from pathlib import Path import gradio as gr from modules import generation_parameters_copypaste, images, sysinfo, errors +from modules.paths_internal import models_path + + +def windows_to_unix_style(path): + return Path(path).as_posix() + + +def exclude_root_path(root_path, path_to_exclude): + try: + relative_path = os.path.relpath(path_to_exclude, root_path) + # 如果路径已经在 root_path 之外,relpath 会返回绝对路径 + # 所以需要检查路径是否在 root_path 之内 + if not relative_path.startswith('..'): + return windows_to_unix_style(relative_path) + except ValueError: + pass + # 如果路径无法相对化,或者位于 root_path 之外,则返回原始路径 + return windows_to_unix_style(path_to_exclude) class UserMetadataEditor: @@ -98,6 +117,7 @@ class UserMetadataEditor: stats = os.stat(filename) params = [ ('Filename: ', os.path.basename(filename)), + ('Path: ', exclude_root_path(models_path, filename)), ('File size: ', sysinfo.pretty_bytes(stats.st_size)), ('Hash: ', shorthash), ('Modified: ', datetime.datetime.fromtimestamp(stats.st_mtime).strftime('%Y-%m-%d %H:%M')), -- cgit v1.2.3 From d83a1ba65be1b0fbdba8f10212193c52dc8f5e90 Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 29 Aug 2023 06:33:00 +0800 Subject: feat: display file metadata ss_output_name https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/12289 --- extensions-builtin/Lora/ui_edit_user_metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py index 390d9dde..c7011909 100644 --- a/extensions-builtin/Lora/ui_edit_user_metadata.py +++ b/extensions-builtin/Lora/ui_edit_user_metadata.py @@ -70,6 +70,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) metadata = item.get("metadata") or {} keys = { + 'ss_output_name': "Output name:", 'ss_sd_model_name': "Model:", 'ss_clip_skip': "Clip skip:", 'ss_network_module': "Kohya module:", -- cgit v1.2.3 From 02e7824e6a9c3d74af7b383dd66a3a1c231ef082 Mon Sep 17 00:00:00 2001 From: ibrainventures Date: Tue, 29 Aug 2023 02:04:07 +0200 Subject: [RC 1.6.1 - zoom is partly hidden] Update style.css If a image / batch result image is higher or wider than the current viewport, and is zoomed (left corner zoom icon) it is cutted off on the top and also to the left. This new rule seems to be the culprit. --- style.css | 7 ------- 1 file changed, 7 deletions(-) diff --git a/style.css b/style.css index e336e79d..56e2cb4c 100644 --- a/style.css +++ b/style.css @@ -660,13 +660,6 @@ table.popup-table .link{ min-height: 0; } -#modalImage{ - position: absolute; - top: 50%; - left: 50%; - transform: translateX(-50%) translateY(-50%); -} - .modalPrev, .modalNext { cursor: pointer; -- cgit v1.2.3 From 5070ab80042e0cafb1eb97f5da9d1d871cb85de6 Mon Sep 17 00:00:00 2001 From: dhwz Date: Tue, 29 Aug 2023 07:16:32 +0200 Subject: remove xformers Python version check --- modules/launch_utils.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 7e4d5a61..14252c3a 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -366,17 +366,7 @@ def prepare_environment(): startup_timer.record("install open_clip") if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers: - if platform.system() == "Windows": - if platform.python_version().startswith("3.10"): - run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True) - else: - print("Installation of xformers is not supported in this version of Python.") - print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness") - if not is_installed("xformers"): - exit(0) - elif platform.system() == "Linux": - run_pip(f"install -U -I --no-deps {xformers_package}", "xformers") - + run_pip(f"install -U -I --no-deps {xformers_package}", "xformers") startup_timer.record("install xformers") if not is_installed("ngrok") and args.ngrok: -- cgit v1.2.3 From 7ab16e99eedf3b5da7e596218a585f6966aee4d8 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 29 Aug 2023 01:51:13 -0400 Subject: Add option to align with sgm repo sampling implementation --- modules/sd_samplers_kdiffusion.py | 14 ++++++++++++-- modules/shared_options.py | 1 + scripts/xyz_grid.py | 1 + 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index b9e0d577..a8a2735f 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -144,7 +144,13 @@ class KDiffusionSampler(sd_samplers_common.Sampler): sigmas = self.get_sigmas(p, steps) sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] + if opts.sgm_noise_multiplier: + p.extra_generation_params["SGM noise multiplier"] = True + noise_multiplier = torch.sqrt(1.0 + sigma_sched[0] ** 2.0) + else: + noise_multiplier = sigma_sched[0] + + xi = x + noise * noise_multiplier if opts.img2img_extra_noise > 0: p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise @@ -197,7 +203,11 @@ class KDiffusionSampler(sd_samplers_common.Sampler): sigmas = self.get_sigmas(p, steps) - x = x * sigmas[0] + if opts.sgm_noise_multiplier: + p.extra_generation_params["SGM noise multiplier"] = True + x = x * torch.sqrt(1.0 + sigmas[0] ** 2.0) + else: + x = x * sigmas[0] extra_params_kwargs = self.initialize(p) parameters = inspect.signature(self.func).parameters diff --git a/modules/shared_options.py b/modules/shared_options.py index 83f56314..67f7a8df 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -309,6 +309,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}, infotext='ENSD').info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma", infotext='Discard penultimate sigma').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), + 'sgm_noise_multiplier': OptionInfo(False, "SGM noise multiplier", infotext='SGM noise multplier').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818").info("Match initial noise to official SDXL implementation - only useful for reproducing images"), 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}, infotext='UniPC variant'), 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'), 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 517d6332..939d8605 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -265,6 +265,7 @@ axis_options = [ AxisOption("Token merging ratio", float, apply_override('token_merging_ratio')), AxisOption("Token merging ratio high-res", float, apply_override('token_merging_ratio_hr')), AxisOption("Always discard next-to-last sigma", str, apply_override('always_discard_next_to_last_sigma', boolean=True), choices=boolean_choice(reverse=True)), + AxisOption("SGM noise multiplier", str, apply_override('sgm_noise_multiplier', boolean=True), choices=boolean_choice(reverse=True)), AxisOption("Refiner checkpoint", str, apply_field('refiner_checkpoint'), format_value=format_remove_path, confirm=confirm_checkpoints_or_none, cost=1.0, choices=lambda: ['None'] + sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Refiner switch at", float, apply_field('refiner_switch_at')), AxisOption("RNG source", str, apply_override("randn_source"), choices=lambda: ["GPU", "CPU", "NV"]), -- cgit v1.2.3 From b6c1a1bbbf29a3041025aa336f6f843ffd7c7d46 Mon Sep 17 00:00:00 2001 From: a666 <19142162+a666@users.noreply.github.com> Date: Fri, 25 Aug 2023 01:58:19 -0600 Subject: Fix some deprecated types --- modules/api/api.py | 26 +++++++++++++------------- modules/api/models.py | 24 +++++++++++------------- modules/gitpython_hack.py | 2 +- modules/prompt_parser.py | 7 +++---- modules/script_callbacks.py | 6 +++--- modules/sub_quadratic_attention.py | 4 ++-- modules/ui.py | 3 +-- 7 files changed, 34 insertions(+), 38 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 844e31ee..905ef9c9 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -29,7 +29,7 @@ from modules.sd_models import unload_model_weights, reload_model_weights, checkp from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices -from typing import Dict, List, Any +from typing import Any import piexif import piexif.helper from contextlib import closing @@ -221,15 +221,15 @@ class Api: self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel) self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel) - self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem]) - self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem]) - self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=List[models.LatentUpscalerModeItem]) - self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem]) - self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem]) - self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem]) - self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem]) - self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem]) - self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem]) + self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem]) + self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem]) + self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem]) + self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem]) + self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=list[models.SDVaeItem]) + self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=list[models.HypernetworkItem]) + self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=list[models.FaceRestorerItem]) + self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=list[models.RealesrganItem]) + self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=list[models.PromptStyleItem]) self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"]) @@ -242,8 +242,8 @@ class Api: self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) - self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) - self.add_api_route("/sdapi/v1/extensions", self.get_extensions_list, methods=["GET"], response_model=List[models.ExtensionItem]) + self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=list[models.ScriptInfo]) + self.add_api_route("/sdapi/v1/extensions", self.get_extensions_list, methods=["GET"], response_model=list[models.ExtensionItem]) if shared.cmd_opts.api_server_stop: self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"]) @@ -563,7 +563,7 @@ class Api: return options - def set_config(self, req: Dict[str, Any]): + def set_config(self, req: dict[str, Any]): checkpoint_name = req.get("sd_model_checkpoint", None) if checkpoint_name is not None and checkpoint_name not in checkpoint_aliases: raise RuntimeError(f"model {checkpoint_name!r} not found") diff --git a/modules/api/models.py b/modules/api/models.py index 94eca97d..a0d80af8 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,12 +1,10 @@ import inspect from pydantic import BaseModel, Field, create_model -from typing import Any, Optional -from typing_extensions import Literal +from typing import Any, Optional, Literal from inflection import underscore from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img from modules.shared import sd_upscalers, opts, parser -from typing import Dict, List API_NOT_ALLOWED = [ "self", @@ -130,12 +128,12 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( ).generate_model() class TextToImageResponse(BaseModel): - images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: dict info: str class ImageToImageResponse(BaseModel): - images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: dict info: str @@ -168,10 +166,10 @@ class FileData(BaseModel): name: str = Field(title="File name") class ExtrasBatchImagesRequest(ExtrasBaseRequest): - imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") + imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): - images: List[str] = Field(title="Images", description="The generated images in base64 format.") + images: list[str] = Field(title="Images", description="The generated images in base64 format.") class PNGInfoRequest(BaseModel): image: str = Field(title="Image", description="The base64 encoded PNG image") @@ -233,8 +231,8 @@ FlagsModel = create_model("Flags", **flags) class SamplerItem(BaseModel): name: str = Field(title="Name") - aliases: List[str] = Field(title="Aliases") - options: Dict[str, str] = Field(title="Options") + aliases: list[str] = Field(title="Aliases") + options: dict[str, str] = Field(title="Options") class UpscalerItem(BaseModel): name: str = Field(title="Name") @@ -285,8 +283,8 @@ class EmbeddingItem(BaseModel): vectors: int = Field(title="Vectors", description="The number of vectors in the embedding") class EmbeddingsResponse(BaseModel): - loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") - skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") + loaded: dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") + skipped: dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") class MemoryResponse(BaseModel): ram: dict = Field(title="RAM", description="System memory stats") @@ -304,14 +302,14 @@ class ScriptArg(BaseModel): minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI") maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI") step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI") - choices: Optional[List[str]] = Field(default=None, title="Choices", description="Possible values for the argument") + choices: Optional[list[str]] = Field(default=None, title="Choices", description="Possible values for the argument") class ScriptInfo(BaseModel): name: str = Field(default=None, title="Name", description="Script name") is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script") is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script") - args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments") + args: list[ScriptArg] = Field(title="Arguments", description="List of script's arguments") class ExtensionItem(BaseModel): name: str = Field(title="Name", description="Extension name") diff --git a/modules/gitpython_hack.py b/modules/gitpython_hack.py index e537c1df..b55f0640 100644 --- a/modules/gitpython_hack.py +++ b/modules/gitpython_hack.py @@ -23,7 +23,7 @@ class Git(git.Git): ) return self._parse_object_header(ret) - def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]: + def stream_object_data(self, ref: str) -> tuple[str, str, int, Git.CatFileContentStream]: # Not really streaming, per se; this buffers the entire object in memory. # Shouldn't be a problem for our use case, since we're only using this for # object headers (commit objects). diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 334efeef..ddf4d2dd 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -2,7 +2,6 @@ from __future__ import annotations import re from collections import namedtuple -from typing import List import lark # a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" @@ -240,14 +239,14 @@ def get_multicond_prompt_list(prompts: SdConditioning | list[str]): class ComposableScheduledPromptConditioning: def __init__(self, schedules, weight=1.0): - self.schedules: List[ScheduledPromptConditioning] = schedules + self.schedules: list[ScheduledPromptConditioning] = schedules self.weight: float = weight class MulticondLearnedConditioning: def __init__(self, shape, batch): self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS - self.batch: List[List[ComposableScheduledPromptConditioning]] = batch + self.batch: list[list[ComposableScheduledPromptConditioning]] = batch def get_multicond_learned_conditioning(model, prompts, steps, hires_steps=None, use_old_scheduling=False) -> MulticondLearnedConditioning: @@ -278,7 +277,7 @@ class DictWithShape(dict): return self["crossattn"].shape -def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step): +def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step): param = c[0][0].cond is_dict = isinstance(param, dict) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index fab23551..9c2a6541 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -1,7 +1,7 @@ import inspect import os from collections import namedtuple -from typing import Optional, Dict, Any +from typing import Optional, Any from fastapi import FastAPI from gradio import Blocks @@ -255,7 +255,7 @@ def image_grid_callback(params: ImageGridLoopParams): report_exception(c, 'image_grid') -def infotext_pasted_callback(infotext: str, params: Dict[str, Any]): +def infotext_pasted_callback(infotext: str, params: dict[str, Any]): for c in callback_map['callbacks_infotext_pasted']: try: c.callback(infotext, params) @@ -446,7 +446,7 @@ def on_infotext_pasted(callback): """register a function to be called before applying an infotext. The callback is called with two arguments: - infotext: str - raw infotext. - - result: Dict[str, any] - parsed infotext parameters. + - result: dict[str, any] - parsed infotext parameters. """ add_callback(callback_map['callbacks_infotext_pasted'], callback) diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index ae4ee4bb..4cb561ef 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -15,7 +15,7 @@ import torch from torch import Tensor from torch.utils.checkpoint import checkpoint import math -from typing import Optional, NamedTuple, List +from typing import Optional, NamedTuple def narrow_trunc( @@ -97,7 +97,7 @@ def _query_chunk_attention( ) return summarize_chunk(query, key_chunk, value_chunk) - chunks: List[AttnChunk] = [ + chunks: list[AttnChunk] = [ chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size) ] acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks))) diff --git a/modules/ui.py b/modules/ui.py index f4028475..9a569182 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1338,7 +1338,6 @@ checkpoint: N/A def setup_ui_api(app): from pydantic import BaseModel, Field - from typing import List class QuicksettingsHint(BaseModel): name: str = Field(title="Name of the quicksettings field") @@ -1347,7 +1346,7 @@ def setup_ui_api(app): def quicksettings_hint(): return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] - app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) + app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=list[QuicksettingsHint]) app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) -- cgit v1.2.3 From 04b90328c0aa86670cfe5d31612d341e29b5a258 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 29 Aug 2023 15:38:05 +0300 Subject: revert SGM noise multiplier change for img2img because it breaks hires fix --- modules/sd_samplers_kdiffusion.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index a8a2735f..72c352a6 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -144,13 +144,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): sigmas = self.get_sigmas(p, steps) sigma_sched = sigmas[steps - t_enc - 1:] - if opts.sgm_noise_multiplier: - p.extra_generation_params["SGM noise multiplier"] = True - noise_multiplier = torch.sqrt(1.0 + sigma_sched[0] ** 2.0) - else: - noise_multiplier = sigma_sched[0] - - xi = x + noise * noise_multiplier + xi = x + noise * sigma_sched[0] if opts.img2img_extra_noise > 0: p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise -- cgit v1.2.3 From ba7d0d225a97a7d79a150d1f649011f54552b3bf Mon Sep 17 00:00:00 2001 From: ibrainventures Date: Tue, 29 Aug 2023 15:31:01 +0200 Subject: Update style.css --- style.css | 3 +++ 1 file changed, 3 insertions(+) diff --git a/style.css b/style.css index 56e2cb4c..34869b89 100644 --- a/style.css +++ b/style.css @@ -621,6 +621,9 @@ table.popup-table .link{ .modalControls { display: flex; + position: absolute; + right: 0px; + left: 0px; gap: 1em; padding: 1em; background-color:rgba(0,0,0,0); -- cgit v1.2.3 From f564d8ed2c5c5644101c5670d2cec15b03ccb51b Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 29 Aug 2023 22:11:18 +0800 Subject: refactor: refactor function --- modules/ui_extra_networks_user_metadata.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index 877d0285..588f84c7 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -10,21 +10,12 @@ from modules import generation_parameters_copypaste, images, sysinfo, errors from modules.paths_internal import models_path -def windows_to_unix_style(path): - return Path(path).as_posix() - - -def exclude_root_path(root_path, path_to_exclude): - try: - relative_path = os.path.relpath(path_to_exclude, root_path) - # 如果路径已经在 root_path 之外,relpath 会返回绝对路径 - # 所以需要检查路径是否在 root_path 之内 - if not relative_path.startswith('..'): - return windows_to_unix_style(relative_path) - except ValueError: - pass - # 如果路径无法相对化,或者位于 root_path 之外,则返回原始路径 - return windows_to_unix_style(path_to_exclude) +def exclude_root_path(root_path, path): + path_object = Path(path) + if path_object.is_relative_to(root_path): + path_object = path_object.relative_to(root_path) + + return path_object.as_posix() class UserMetadataEditor: -- cgit v1.2.3 From cb2a4f24247c6159740813935a973a6fe1ccc30e Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 29 Aug 2023 22:47:10 +0800 Subject: chore: change extension time format --- modules/ui_extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index e0138267..83dcd303 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -177,7 +177,7 @@ def extension_table(): {remote} {ext.branch} {version_link} - {time.asctime(time.gmtime(ext.commit_date))} + {time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ext.commit_date))} {ext_status} """ -- cgit v1.2.3 From e3939f33394de31594f7c459a7bd352d206f7669 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 29 Aug 2023 12:19:10 -0400 Subject: Do not change quicksettings value when value returned is `None` --- modules/ui_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_settings.py b/modules/ui_settings.py index 6dde4b6a..8ff9c074 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -87,7 +87,7 @@ class UiSettings: if not opts.same_type(value, opts.data_labels[key].default): return gr.update(visible=True), opts.dumpjson() - if not opts.set(key, value): + if value is None or not opts.set(key, value): return gr.update(value=getattr(opts, key)), opts.dumpjson() opts.save(shared.config_filename) -- cgit v1.2.3 From 7e5fcdaf694343bd66d58af6644e47d5b8f8b879 Mon Sep 17 00:00:00 2001 From: dhwz Date: Tue, 29 Aug 2023 18:49:42 +0200 Subject: don't print empty lines --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 9aa0f071..05488fe6 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -228,7 +228,7 @@ def run_extension_installer(extension_dir): env = os.environ.copy() env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}" - stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env) + stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip() if stdout: print(stdout) except Exception as e: -- cgit v1.2.3 From 549b475be9b5cf1dca0a7bad2b6a6381e50e2b37 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 29 Aug 2023 14:22:04 -0400 Subject: Add noisy latent to ExtraNoiseParams for callback --- modules/script_callbacks.py | 7 +++++-- modules/sd_samplers_kdiffusion.py | 2 +- modules/sd_samplers_timesteps.py | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index fab23551..c99695eb 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -29,12 +29,15 @@ class ImageSaveParams: class ExtraNoiseParams: - def __init__(self, noise, x): + def __init__(self, noise, x, xi): self.noise = noise """Random noise generated by the seed""" self.x = x - """Latent image representation of the image""" + """Latent representation of the image""" + + self.xi = xi + """Noisy latent representation of the image""" class CFGDenoiserParams: diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 72c352a6..8a8c87e0 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -148,7 +148,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): if opts.img2img_extra_noise > 0: p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise - extra_noise_params = ExtraNoiseParams(noise, x) + extra_noise_params = ExtraNoiseParams(noise, x, xi) extra_noise_callback(extra_noise_params) noise = extra_noise_params.noise xi += noise * opts.img2img_extra_noise diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index 7a6cbd46..b17a8f93 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -107,7 +107,7 @@ class CompVisSampler(sd_samplers_common.Sampler): if opts.img2img_extra_noise > 0: p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise - extra_noise_params = ExtraNoiseParams(noise, x) + extra_noise_params = ExtraNoiseParams(noise, x, xi) extra_noise_callback(extra_noise_params) noise = extra_noise_params.noise xi += noise * opts.img2img_extra_noise * sqrt_alpha_cumprod -- cgit v1.2.3 From 9a4a1aac81df8c29d6dcce1abbf1b58fb7e4fc75 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 30 Aug 2023 08:05:18 +0300 Subject: get progressbar to display correctly in extensions tab --- javascript/extensions.js | 2 +- modules/ui_extensions.py | 8 ++++++-- style.css | 5 +++++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/javascript/extensions.js b/javascript/extensions.js index 1f7254c5..312131b7 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -33,7 +33,7 @@ function extensions_check() { var id = randomId(); - requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function() { + requestProgress(id, gradioApp().getElementById('extensions_installed_html'), null, function() { }); diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index e0138267..67a243c3 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -557,8 +557,12 @@ def create_ui(): msg = '"--disable-extra-extensions" was used, remove it to load all extensions again' html = f'{msg}' - info = gr.HTML(html) - extensions_table = gr.HTML('Loading...') + with gr.Row(): + info = gr.HTML(html) + + with gr.Row(elem_classes="progress-container"): + extensions_table = gr.HTML('Loading...', elem_id="extensions_installed_html") + ui.load(fn=extension_table, inputs=[], outputs=[extensions_table]) apply.click( diff --git a/style.css b/style.css index 92d3030e..fb4e2f1f 100644 --- a/style.css +++ b/style.css @@ -517,6 +517,11 @@ table.popup-table .link{ background: #b4c0cc; border-radius: 3px !important; top: -20px; + width: 100%; +} + +.progress-container{ + position: relative; } [id$=_results].mobile{ -- cgit v1.2.3 From edf3ad5aed9435e2ff3cc0f98895be6056f1f950 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 30 Aug 2023 08:22:06 +0300 Subject: go back to single path for filenames in extra networks metadata dialog --- modules/ui_extra_networks_user_metadata.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index 588f84c7..ae972fbb 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -2,22 +2,13 @@ import datetime import html import json import os.path -from pathlib import Path import gradio as gr -from modules import generation_parameters_copypaste, images, sysinfo, errors +from modules import generation_parameters_copypaste, images, sysinfo, errors, ui_extra_networks from modules.paths_internal import models_path -def exclude_root_path(root_path, path): - path_object = Path(path) - if path_object.is_relative_to(root_path): - path_object = path_object.relative_to(root_path) - - return path_object.as_posix() - - class UserMetadataEditor: def __init__(self, ui, tabname, page): @@ -99,6 +90,13 @@ class UserMetadataEditor: return preview + def relative_path(self, path): + for parent_path in self.page.allowed_directories_for_previews(): + if ui_extra_networks.path_is_parent(parent_path, path): + return os.path.relpath(path, parent_path) + + return os.path.basename(path) + def get_metadata_table(self, name): item = self.page.items.get(name, {}) try: @@ -107,8 +105,7 @@ class UserMetadataEditor: stats = os.stat(filename) params = [ - ('Filename: ', os.path.basename(filename)), - ('Path: ', exclude_root_path(models_path, filename)), + ('Filename: ', self.relative_path(filename)), ('File size: ', sysinfo.pretty_bytes(stats.st_size)), ('Hash: ', shorthash), ('Modified: ', datetime.datetime.fromtimestamp(stats.st_mtime).strftime('%Y-%m-%d %H:%M')), -- cgit v1.2.3 From f874b1bcad05d7ea4c3cc28df82904ac7c12e64f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 30 Aug 2023 08:54:31 +0300 Subject: keep order in list of checkpoints when loading model that doesn't have a checksum --- modules/sd_models.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 547e93c4..930d0bee 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -27,6 +27,24 @@ checkpoint_alisases = checkpoint_aliases # for compatibility with old name checkpoints_loaded = collections.OrderedDict() +def replace_key(d, key, new_key, value): + keys = list(d.keys()) + + d[new_key] = value + + if key not in keys: + return d + + index = keys.index(key) + keys[index] = new_key + + new_d = {k: d[k] for k in keys} + + d.clear() + d.update(new_d) + return d + + class CheckpointInfo: def __init__(self, filename): self.filename = filename @@ -91,9 +109,11 @@ class CheckpointInfo: if self.shorthash not in self.ids: self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]'] - checkpoints_list.pop(self.title, None) + old_title = self.title self.title = f'{self.name} [{self.shorthash}]' self.short_title = f'{self.name_for_extra} [{self.shorthash}]' + + replace_key(checkpoints_list, old_title, self.title, self) self.register() return self.shorthash -- cgit v1.2.3 From 28b084ca25387340ba07a5ffed8403d8d289cb70 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 30 Aug 2023 15:28:46 +0900 Subject: extension time format in system time zone --- modules/ui_extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 83557d7a..fa831f57 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -177,7 +177,7 @@ def extension_table(): {remote} {ext.branch} {version_link} - {time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ext.commit_date))} + {datetime.fromtimestamp(ext.commit_date) if ext.commit_date else ""} {ext_status} """ -- cgit v1.2.3 From 67cd4ec0aabb69d1133dfb18543ab080be855323 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 30 Aug 2023 15:37:13 +0900 Subject: lint --- modules/ui_extra_networks_user_metadata.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index ae972fbb..bfec140c 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -6,7 +6,6 @@ import os.path import gradio as gr from modules import generation_parameters_copypaste, images, sysinfo, errors, ui_extra_networks -from modules.paths_internal import models_path class UserMetadataEditor: -- cgit v1.2.3 From c985d23c52b541e5a5d0dcf2c3f3a0629cee23f9 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 30 Aug 2023 16:18:31 +0900 Subject: extension update time, convert to system time zone --- modules/ui_extensions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index fa831f57..2e8c1d6d 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -2,7 +2,7 @@ import json import os import threading import time -from datetime import datetime +from datetime import datetime, timezone import git @@ -442,7 +442,7 @@ sort_ordering = [ def get_date(info: dict, key): try: - return datetime.strptime(info.get(key), "%Y-%m-%dT%H:%M:%SZ").strftime("%Y-%m-%d") + return datetime.strptime(info.get(key), "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).astimezone().strftime("%Y-%m-%d") except (ValueError, TypeError): return '' -- cgit v1.2.3 From ae0b2cc1964486ba847290ad752d9a284b6d63ba Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 30 Aug 2023 18:22:50 +0300 Subject: add an option to choose how to combine hires fix and refiner --- modules/processing.py | 18 ++++++------------ modules/sd_samplers_common.py | 13 +++++++++++-- modules/shared_options.py | 1 + 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 0138e5ac..f696e925 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1148,18 +1148,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: decoded_samples = None - current = shared.sd_model.sd_checkpoint_info - try: - if self.hr_checkpoint_info is not None: - self.sampler = None - sd_models.reload_model_weights(info=self.hr_checkpoint_info) - devices.torch_gc() - - return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) - finally: - self.sampler = None - sd_models.reload_model_weights(info=current) - devices.torch_gc() + with sd_models.SkipWritingToConfig(): + sd_models.reload_model_weights(info=self.hr_checkpoint_info) + + devices.torch_gc() + + return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts): if shared.state.interrupted: diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 60fa161c..6c935a38 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -164,8 +164,17 @@ def apply_refiner(cfg_denoiser): if refiner_checkpoint_info is None or shared.sd_model.sd_checkpoint_info == refiner_checkpoint_info: return False - if getattr(cfg_denoiser.p, "enable_hr", False) and not cfg_denoiser.p.is_hr_pass: - return False + if getattr(cfg_denoiser.p, "enable_hr", False): + is_second_pass = cfg_denoiser.p.is_hr_pass + + if opts.hires_fix_refiner_pass == "first pass" and is_second_pass: + return False + + if opts.hires_fix_refiner_pass == "second pass" and not is_second_pass: + return False + + if opts.hires_fix_refiner_pass != "second pass": + cfg_denoiser.p.extra_generation_params['Hires refiner'] = opts.hires_fix_refiner_pass cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at diff --git a/modules/shared_options.py b/modules/shared_options.py index 78652ea2..00b273fa 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -146,6 +146,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}, infotext="RNG").info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), "tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"), + "hires_fix_refiner_pass": OptionInfo("second pass", "Hires fix: which pass to enable refiner for", gr.Radio, {"choices": ["first pass", "second pass", "both passes"]}, infotext="Hires refiner"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { -- cgit v1.2.3 From 6adf2b71c2c89f84d4aee1e230276dcd1a3fab62 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 30 Aug 2023 19:08:04 +0300 Subject: fix inpainting models in txt2img creating black pictures --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index f696e925..e08b6305 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -91,8 +91,8 @@ def create_binary_mask(image): def txt2img_image_conditioning(sd_model, x, width, height): if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models - # The "masked-image" in this case will just be all zeros since the entire image is masked. - image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) + # The "masked-image" in this case will just be all 0.5 since the entire image is masked. + image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5 image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method)) # Add the fake full 1s mask to the first dimension. -- cgit v1.2.3 From 541a3db05ba7241b466f7370533e2bef24dbe9de Mon Sep 17 00:00:00 2001 From: ljleb Date: Wed, 30 Aug 2023 21:38:21 -0400 Subject: fix generation params regex --- modules/generation_parameters_copypaste.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 2ca16055..d39f2eba 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -9,7 +9,7 @@ from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks, processing from PIL import Image -re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' +re_param_code = r'\s*([\w ]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) re_imagesize = re.compile(r"^(\d+)x(\d+)$") re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") -- cgit v1.2.3 From 41196ccbf7552274cf111de24a43ebfa836175a6 Mon Sep 17 00:00:00 2001 From: zixaphir Date: Wed, 30 Aug 2023 20:20:19 -0700 Subject: account for customizable extra network separators in remove code previous behavior only searched for leading spaces --- javascript/extraNetworks.js | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 493f31af..eb2b9ebd 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -141,9 +141,12 @@ function setupExtraNetworks() { onUiLoaded(setupExtraNetworks); var re_extranet = /<([^:]+:[^:]+):[\d.]+>(.*)/; -var re_extranet_g = /\s+<([^:]+:[^:]+):[\d.]+>/g; +var re_extranet_str = '<([^:]+:[^:]+):[\\d.]+>'; function tryToRemoveExtraNetworkFromPrompt(textarea, text) { + function reEscape(s) { + return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); + } var m = text.match(re_extranet); var replaced = false; var newTextareaText; @@ -151,7 +154,9 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text) { var extraTextAfterNet = m[2]; var partToSearch = m[1]; var foundAtPosition = -1; - newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, net, pos) { + var escapedSeparator = reEscape(opts.extra_networks_add_text_separator); + var re = new RegExp(escapedSeparator + re_extranet_str, 'g'); + newTextareaText = textarea.value.replaceAll(re, function(found, net, pos) { m = found.match(re_extranet); if (m[1] == partToSearch) { replaced = true; -- cgit v1.2.3 From 76b1ad7daf35f8667e07ff9cff9ef42b828b1b83 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Wed, 30 Aug 2023 23:07:18 -0600 Subject: Use default dropdown padding on mobile --- style.css | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/style.css b/style.css index fb4e2f1f..58eb29c1 100644 --- a/style.css +++ b/style.css @@ -83,8 +83,10 @@ div.compact{ white-space: nowrap; } -.gradio-dropdown ul.options li.item { - padding: 0.05em 0; +@media (pointer:fine) { + .gradio-dropdown ul.options li.item { + padding: 0.05em 0; + } } .gradio-dropdown ul.options li.item.selected { -- cgit v1.2.3 From 348c6022f330c6e64a6a0fb40fd2b3e65bf0ce6a Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 1 Sep 2023 00:55:56 +0900 Subject: Action to calculate all SD checkpoint hashes --- modules/ui_settings.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/modules/ui_settings.py b/modules/ui_settings.py index 8ff9c074..c6fe3604 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -5,6 +5,7 @@ from modules.call_queue import wrap_gradio_call from modules.shared import opts from modules.ui_components import FormRow from modules.ui_gradio_extensions import reload_javascript +from concurrent.futures import ThreadPoolExecutor, as_completed def get_value_for_setting(key): @@ -175,6 +176,9 @@ class UiSettings: with gr.Row(): unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model") reload_sd_model = gr.Button(value='Reload the last SD checkpoint back into VRAM', elem_id="sett_reload_sd_model") + with gr.Row(): + calculate_all_checkpoint_hash = gr.Button(value='Calculate hash for all checkpoint', elem_id="calculate_all_checkpoint_hash") + calculate_all_checkpoint_hash_threads = gr.Number(value=1, label="Number of parallel calculations", elem_id="calculate_all_checkpoint_hash_threads", precision=0, minimum=1) with gr.TabItem("Licenses", id="licenses", elem_id="settings_tab_licenses"): gr.HTML(shared.html("licenses.html"), elem_id="licenses") @@ -241,6 +245,21 @@ class UiSettings: outputs=[sysinfo_check_output], ) + def calculate_all_checkpoint_hash_fn(max_thread): + checkpoints_list = sd_models.checkpoints_list.values() + with ThreadPoolExecutor(max_workers=max_thread) as executor: + futures = [executor.submit(checkpoint.calculate_shorthash) for checkpoint in checkpoints_list] + completed = 0 + for _ in as_completed(futures): + completed += 1 + print(f"{completed} / {len(checkpoints_list)} ") + print("Finish calculating hash for all checkpoints") + + calculate_all_checkpoint_hash.click( + fn=calculate_all_checkpoint_hash_fn, + inputs=[calculate_all_checkpoint_hash_threads], + ) + self.interface = settings_interface def add_quicksettings(self): -- cgit v1.2.3 From 5681bf801664aa09fa02ab8b4e73f780d9563440 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 31 Aug 2023 14:57:16 -0400 Subject: More accurate check for enabling cuDNN benchmark on 16XX cards --- modules/devices.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/devices.py b/modules/devices.py index c01f0602..63c38eff 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -60,7 +60,8 @@ def enable_tf32(): # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407 - if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())): + device_id = (int(shared.cmd_opts.device_id) if shared.cmd_opts.device_id.isdigit() else 0) or torch.cuda.current_device() + if torch.cuda.get_device_capability(device_id) == (7, 5) and torch.cuda.get_device_name(device_id).startswith("NVIDIA GeForce GTX 16"): torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True -- cgit v1.2.3 From bd9b3d15e8f631f9475d14a5fd07560c177dc2f3 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 1 Sep 2023 04:05:58 +0900 Subject: fix batch img2img output dir with script --- modules/img2img.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index c81c7ab9..0c6d1af5 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -114,11 +114,14 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal else: p.override_settings.pop("sd_model_checkpoint", None) + if output_dir: + p.outpath_samples = output_dir + p.override_settings['save_to_dirs'] = False + proc = modules.scripts.scripts_img2img.run(p, *args) + if proc is None: if output_dir: - p.outpath_samples = output_dir - p.override_settings['save_to_dirs'] = False if p.n_iter > 1 or p.batch_size > 1: p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]' else: -- cgit v1.2.3 From 78c1a74660a2e25b3960beb42e3a6f8419c8b3c3 Mon Sep 17 00:00:00 2001 From: zixaphir Date: Thu, 31 Aug 2023 14:18:35 -0700 Subject: Account for edge case where user deleted leading separator. --- javascript/extraNetworks.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index eb2b9ebd..ca87bead 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -154,7 +154,7 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text) { var extraTextAfterNet = m[2]; var partToSearch = m[1]; var foundAtPosition = -1; - var escapedSeparator = reEscape(opts.extra_networks_add_text_separator); + var escapedSeparator = `(?:${reEscape(opts.extra_networks_add_text_separator)})?`; var re = new RegExp(escapedSeparator + re_extranet_str, 'g'); newTextareaText = textarea.value.replaceAll(re, function(found, net, pos) { m = found.match(re_extranet); -- cgit v1.2.3 From 737a013377dd698e620f39e405594f7688656af0 Mon Sep 17 00:00:00 2001 From: Beinsezii Date: Thu, 31 Aug 2023 15:03:08 -0700 Subject: WEBUI.SH Navi 3 torch 2.1.0 rc instead of nightly With the release candidates being out for both torch and vision, webui should default to these over nightly for a more stable experience. Stable release isn't excpected until October 4th: https://dev-discuss.pytorch.org/c/release-announcements/27 --- webui.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/webui.sh b/webui.sh index 3d0f87ee..29a6d311 100755 --- a/webui.sh +++ b/webui.sh @@ -141,9 +141,8 @@ case "$gpu_info" in *"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0 ;; *"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \ - export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.6" - # Navi 3 needs at least 5.5 which is only on the nightly chain, previous versions are no longer online (torch==2.1.0.dev-20230614+rocm5.5 torchvision==0.16.0.dev-20230614+rocm5.5 torchaudio==2.1.0.dev-20230614+rocm5.5) - # so switch to nightly rocm5.6 without explicit versions this time + export TORCH_COMMAND="pip install torch torchvision --index-url https://download.pytorch.org/whl/test/rocm5.6" + # Navi 3 needs at least 5.5 which is only on the torch 2.1.0 release candidates right now ;; *"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0 printf "\n%s\n" "${delimiter}" -- cgit v1.2.3 From 317d00b2a6f81eb58e33487abf05a8b84ef01dd0 Mon Sep 17 00:00:00 2001 From: AnyISalIn Date: Fri, 1 Sep 2023 21:45:11 +0800 Subject: fix: update shared.opts.data when add_option Signed-off-by: AnyISalIn --- modules/options.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/options.py b/modules/options.py index 758b1ce5..e75916d2 100644 --- a/modules/options.py +++ b/modules/options.py @@ -210,6 +210,7 @@ class Options: def add_option(self, key, info): self.data_labels[key] = info + self.data[key] = info.default def reorder(self): """reorder settings so that all items related to section always go together""" -- cgit v1.2.3 From bf0b08321688f65905168b6444d6d13b1a1d9d91 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 1 Sep 2023 16:14:33 -0600 Subject: Add button to copy prompt to style editor --- modules/ui_prompt_styles.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py index 85eb3a64..46a26573 100644 --- a/modules/ui_prompt_styles.py +++ b/modules/ui_prompt_styles.py @@ -4,6 +4,7 @@ from modules import shared, ui_common, ui_components, styles styles_edit_symbol = '\U0001f58c\uFE0F' # 🖌️ styles_materialize_symbol = '\U0001f4cb' # 📋 +styles_copy_symbol = '\U0001f4dd' # 📝 def select_style(name): @@ -62,6 +63,7 @@ class UiPromptStyles: self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.") ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles") self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") + self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.") with gr.Row(): self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3) @@ -102,6 +104,13 @@ class UiPromptStyles: outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], show_progress=False, ).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False) + + self.copy.click( + fn=lambda p, n: (p, n), + inputs=[main_ui_prompt, main_ui_negative_prompt], + outputs=[self.prompt, self.neg_prompt], + show_progress=False, + ) ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close) -- cgit v1.2.3 From d7e3ea68b3604aaec6607aad3272e999657e6331 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 1 Sep 2023 16:24:35 -0600 Subject: Remove whitespace --- modules/ui_prompt_styles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py index 46a26573..64d379ef 100644 --- a/modules/ui_prompt_styles.py +++ b/modules/ui_prompt_styles.py @@ -104,7 +104,7 @@ class UiPromptStyles: outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], show_progress=False, ).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False) - + self.copy.click( fn=lambda p, n: (p, n), inputs=[main_ui_prompt, main_ui_negative_prompt], -- cgit v1.2.3 From 3e67017dfb767f18f599f13e62fff9355ea14160 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 1 Sep 2023 17:01:08 -0600 Subject: Restore missing tooltips --- modules/processing_scripts/seed.py | 4 ++-- modules/ui.py | 12 ++++++------ modules/ui_extra_networks.py | 2 +- scripts/postprocessing_upscale.py | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py index 6b6ff987..dc9c2da5 100644 --- a/modules/processing_scripts/seed.py +++ b/modules/processing_scripts/seed.py @@ -29,8 +29,8 @@ class ScriptSeed(scripts.ScriptBuiltinUI): else: self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), min_width=100, precision=0) - random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), label='Random seed') - reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), label='Reuse seed') + random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), tooltip="Set seed to -1, which will cause a new random number to be used every time") + reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), tooltip="Reuse seed from last generation, mostly useful if it was randomized") seed_checkbox = gr.Checkbox(label='Extra', elem_id=self.elem_id("subseed_show"), value=False) diff --git a/modules/ui.py b/modules/ui.py index 579bab98..89173053 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -215,9 +215,9 @@ class Toprow: ) with gr.Row(elem_id=f"{id_part}_tools"): - self.paste = ToolButton(value=paste_symbol, elem_id="paste") - self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) + self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.") + self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt", tooltip="Clear prompt") + self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False, tooltip="Restore progress") self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) self.token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") @@ -348,7 +348,7 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", tooltip="Switch width/height") if opts.dimensions_and_batch_together: with gr.Column(elem_id="txt2img_column_batch"): @@ -661,8 +661,8 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 063bd7b8..21eed6a1 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -374,7 +374,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True) dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") - button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False) + button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False, tooltip="Invert sort order") button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False) checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index edb70ac0..eb42a29e 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -29,7 +29,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): upscaling_resize_w = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="extras_upscaling_resize_w") upscaling_resize_h = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="extras_upscaling_resize_h") with gr.Column(elem_id="upscaling_dimensions_row", scale=1, elem_classes="dimensions-tools"): - upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn") + upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn", tooltip="Switch width/height") upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") with FormRow(): -- cgit v1.2.3 From ba05e327896898eb73caec3ed710fe45d1e38732 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 2 Sep 2023 14:12:59 +0900 Subject: update cmd arg description --- modules/cmd_args.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index aab62286..a77c7e77 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -112,8 +112,8 @@ parser.add_argument("--skip-version-check", action='store_true', help="Do not ch parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') -parser.add_argument('--add-stop-route', action='store_true', help='add /_stop route to stop server') +parser.add_argument('--add-stop-route', action='store_true', help='does not do anything') parser.add_argument('--api-server-stop', action='store_true', help='enable server stop/restart/kill via api') parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn') parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False) -parser.add_argument("--disable-extra-extensions", action='store_true', help=" prevent all extensions except built-in from running regardless of any other settings", default=False) +parser.add_argument("--disable-extra-extensions", action='store_true', help="prevent all extensions except built-in from running regardless of any other settings", default=False) -- cgit v1.2.3 From 061a4a295dd53f089ac460bc2c1585491ef26f24 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 2 Sep 2023 18:11:08 +0900 Subject: Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 90 ++++++++++++++++++++++++++++------- 1 file changed, 74 insertions(+), 16 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index cf6a2be8..70c2e160 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -4,22 +4,45 @@ title: "[Bug]: " labels: ["bug-report"] body: + - type: markdown + attributes: + value: | + > The title the bug report should be short and descriptive + > Use relevant keywords for searchability + > Don't leave it blank but also don't put the entire error log in it - type: checkboxes attributes: - label: Is there an existing issue for this? - description: Please search to see if an issue already exists for the bug you encountered, and that it hasn't been fixed in a recent build/commit. + label: Checklist + description: | + Please perform basic debugging to see if extensions or configuration is the cause of the issue. + Basic debug procedure +  1. Disable all third-party extensions - check if extension is the cause +  2. Update extensions and webui - sometimes thing just need to be updated +  3. Backup and remove your config.json and ui-config.json - check if the issue is caused bed configuration +  4. delete venv with third-party extensions disable - sometimes extensions might cause wrong libraries to be installed +  5. try a fresh installation webui in a different directory - see if a clean installation solves the issue + Before making a issue report please check that the issue hasn't been reported recently options: - - label: I have searched the existing issues and checked the recent builds/commits - required: true + - label: The issue exist after disabling all extensions + - label: The issue exist on a clean installation of webui + - label: The issue is caused by an extension but it is caused by a bug in webui + - label: The issue exist in current version of webui + - label: The issue haven't been reported before recently + - label: The issue has been reported before but hasn't been fixed yet - type: markdown attributes: value: | - *Please fill this form with as much information as possible, don't forget to fill "What OS..." and "What browsers" and *provide screenshots if possible** + > Please fill this form with as much information as possible, don't forget to "Upload Sysinfo" and "What browsers" and provide screenshots if possible - type: textarea id: what-did attributes: label: What happened? description: Tell us what happened in a very clear and simple way + placeholder: | + I tried to use txt2img with XYZ grid with Sampler DPM++ SDE,DPM++ 2M SDE + it should generate a grid of 2 images but I only got 1 + + add screenshot or screen recording if necessary validations: required: true - type: textarea @@ -27,10 +50,10 @@ body: attributes: label: Steps to reproduce the problem description: Please provide us with precise step by step instructions on how to reproduce the bug - value: | - 1. Go to .... - 2. Press .... - 3. ... + placeholder: | + 1. Go to txt2img tab Select XYZ grid + 2. Set axis type Sampler and select DPM++ 2M SDE, DPM++ 3M SDE + 3. Set Sampling steps to 1 and click Generate button validations: required: true - type: textarea @@ -38,13 +61,9 @@ body: attributes: label: What should have happened? description: Tell us what you think the normal behavior should be - validations: - required: true - - type: textarea - id: sysinfo - attributes: - label: Sysinfo - description: System info file, generated by WebUI. You can generate it in settings, on the Sysinfo page. Drag the file into the field to upload it. If you submit your report without including the sysinfo file, the report will be closed. If needed, review the report to make sure it includes no personal information you don't want to share. If you can't start WebUI, you can use --dump-sysinfo commandline argument to generate the file. + placeholder: | + It should generate a grid of 2 images + this was working in webui version 1.x.x validations: required: true - type: dropdown @@ -58,13 +77,47 @@ body: - Brave - Apple Safari - Microsoft Edge + - Android + - iOS - Other + - type: textarea + id: sysinfo + attributes: + label: Sysinfo + description: System info file, generated by WebUI. You can generate it in settings, on the Sysinfo page. Drag the file into the field to upload it. If you submit your report without including the sysinfo file, the report will be closed. If needed, review the report to make sure it includes no personal information you don't want to share. If you can't start WebUI, you can use --dump-sysinfo commandline argument to generate the file. + placeholder: | + Upload the Sysinfo as a attached file + Don't paste it in as text + validations: + required: true - type: textarea id: logs attributes: label: Console logs description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service. render: Shell + placeholder: | + generating image for xyz plot: UnboundLocalError + Traceback (most recent call last): + File "B:\GitHub\stable-diffusion-webui\scripts\xyz_grid.py", line 698, in cell + res = process_images(pc) + File "B:\GitHub\stable-diffusion-webui\modules\processing.py", line 732, in process_images + res = process_images_inner(p) + File "B:\GitHub\stable-diffusion-webui\modules\processing.py", line 867, in process_images_inner + samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) + File "B:\GitHub\stable-diffusion-webui\modules\processing.py", line 1140, in sample + samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + File "B:\GitHub\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in sample + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + File "B:\GitHub\stable-diffusion-webui\modules\sd_samplers_common.py", line 261, in launch_sampling + return func() + File "B:\GitHub\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + File "B:\GitHub\stable-diffusion-webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context + return func(*args, **kwargs) + File "B:\GitHub\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 651, in sample_dpmpp_2m_sde + h_last = h + UnboundLocalError: local variable 'h' referenced before assignment validations: required: true - type: textarea @@ -72,3 +125,8 @@ body: attributes: label: Additional information description: Please provide us with any relevant additional info or context. + placeholder: | + Examples + I have updated the GPU driver recently + I suspect the issue is caused by XXXXX + I am using a VPN -- cgit v1.2.3 From a51721cb09aa8dc68beb08cf8f0a2602b41d052c Mon Sep 17 00:00:00 2001 From: uservar <63248296+uservar@users.noreply.github.com> Date: Sat, 2 Sep 2023 11:35:30 +0000 Subject: Fix bug with sigma min/max overrides. --- modules/shared_options.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 00b273fa..73588a22 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -305,8 +305,8 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}, infotext='Sigma tmax').info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling'), 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), - 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule max sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule min sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), + 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule min sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), + 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule max sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), 'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}, infotext='ENSD').info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma", infotext='Discard penultimate sigma').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), -- cgit v1.2.3 From aab385d01b4311726127397552d791f4d71b7147 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 3 Sep 2023 11:56:02 +0900 Subject: thread safe extra network list_items --- extensions-builtin/Lora/ui_extra_networks_lora.py | 10 +++++----- modules/ui_extra_networks.py | 2 ++ modules/ui_extra_networks_checkpoints.py | 6 +++--- modules/ui_extra_networks_hypernets.py | 5 +++-- modules/ui_extra_networks_textual_inversion.py | 5 +++-- 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index 55409a78..e9f30062 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -66,11 +66,11 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): return item def list_items(self): - for index, name in enumerate(networks.available_networks): - item = self.create_item(name, index) - - if item is not None: - yield item + with self.thread_lock: + for index, name in enumerate(networks.available_networks): + item = self.create_item(name, index) + if item is not None: + yield item def allowed_directories_for_previews(self): return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir_backcompat] diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 063bd7b8..564bab7f 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,6 +1,7 @@ import os.path import urllib.parse from pathlib import Path +from threading import Lock from modules import shared, ui_extra_networks_user_metadata, errors, extra_networks from modules.images import read_info_from_image, save_image_with_geninfo @@ -94,6 +95,7 @@ class ExtraNetworksPage: self.allow_negative_prompt = False self.metadata = {} self.items = {} + self.thread_lock = Lock() def refresh(self): pass diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index ca6c2607..2753214f 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -30,9 +30,9 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): } def list_items(self): - names = list(sd_models.checkpoints_list) - for index, name in enumerate(names): - yield self.create_item(name, index) + with self.thread_lock: + for index, name in enumerate(sd_models.checkpoints_list): + yield self.create_item(name, index) def allowed_directories_for_previews(self): return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None] diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index 4cedf085..411b4f11 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -31,8 +31,9 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): } def list_items(self): - for index, name in enumerate(shared.hypernetworks): - yield self.create_item(name, index) + with self.thread_lock: + for index, name in enumerate(shared.hypernetworks): + yield self.create_item(name, index) def allowed_directories_for_previews(self): return [shared.cmd_opts.hypernetwork_dir] diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index 55ef0ea7..d25b45d6 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -29,8 +29,9 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): } def list_items(self): - for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings): - yield self.create_item(name, index) + with self.thread_lock: + for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings): + yield self.create_item(name, index) def allowed_directories_for_previews(self): return list(sd_hijack.model_hijack.embedding_db.embedding_dirs) -- cgit v1.2.3 From f593cbfec417a3ea40589fe64e7f8806c9a81e5a Mon Sep 17 00:00:00 2001 From: AngelBottomless Date: Sun, 3 Sep 2023 21:07:36 +0900 Subject: fallback if exif data was invalid --- modules/images.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index eb644733..8512a46e 100644 --- a/modules/images.py +++ b/modules/images.py @@ -718,7 +718,12 @@ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]: geninfo = items.pop('parameters', None) if "exif" in items: - exif = piexif.load(items["exif"]) + exif_data = items["exif"] + try: + exif = piexif.load(exif_data) + except OSError: + # memory / exif was not valid so piexif tried to read from a file + exif = None exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'') try: exif_comment = piexif.helper.UserComment.load(exif_comment) -- cgit v1.2.3 From 8f3b02f09535f55d3673aa9ea589396b8614f799 Mon Sep 17 00:00:00 2001 From: JaredTherriault Date: Sun, 3 Sep 2023 13:31:42 -0700 Subject: Revert "Offloading custom work" This reverts commit f3d1631aab82d559294126a9230c979ef4c4e1d6. This work has been offloaded now into an extension called Prompt Control. --- modules/custom_statics.py | 29 ----------------------------- modules/generation_parameters_copypaste.py | 8 -------- webui-user.bat | 4 ++-- 3 files changed, 2 insertions(+), 39 deletions(-) delete mode 100644 modules/custom_statics.py diff --git a/modules/custom_statics.py b/modules/custom_statics.py deleted file mode 100644 index 207bd5fb..00000000 --- a/modules/custom_statics.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import gc -import re - -import modules.paths as paths - -class CustomStatics: - - @staticmethod - # loads a file with strings structured as below, on each line with a : between the search and replace strings, into a list - # search0:replace0 - # search string:replace string - # - # Then replaces all occurrences of the list's search strings with the list's replace strings in one go - def mass_replace_strings(input_string): - with open(os.path.join(paths.data_path, "custom_statics/Replacements.txt"), "r", encoding="utf8") as file: - replacements = file.readlines() - - replacement_dict = {} - for line in replacements: - search, replace = line.strip().split(":") - replacement_dict[search] = replace - - def replace(match_text): - return replacement_dict[match_text.group(0)] - - return re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in replacement_dict.keys()), replace, str(input_string)) - - return str(geninfo) \ No newline at end of file diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index bd7b0018..a3448be9 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -370,14 +370,6 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, prompt = file.read() params = parse_generation_parameters(prompt) - - # This sanitizes unsavory prompt words when copying from another image - # for my own sanity. This is not intended to be contributed to the main repo, - # it's just so I don't have to see anything I'm not interested in when batch - # reproducing images from civit.ai or elsewhere when working on loras - # todo: make this work with the callback instead of forcing it here, this can be an extension when I feel like putting it together :D - from modules import custom_statics - params = custom_statics.CustomStatics.mass_replace_strings(params) script_callbacks.infotext_pasted_callback(prompt, params) res = [] diff --git a/webui-user.bat b/webui-user.bat index 1ba2116d..e5a257be 100644 --- a/webui-user.bat +++ b/webui-user.bat @@ -1,8 +1,8 @@ @echo off -set TEMP=G:\SD-temp + set PYTHON= set GIT= set VENV_DIR= -set COMMANDLINE_ARGS= --xformers +set COMMANDLINE_ARGS= call webui.bat -- cgit v1.2.3 From 022639a145751d61db1c144e5e657aa6481e2bc0 Mon Sep 17 00:00:00 2001 From: JaredTherriault Date: Mon, 4 Sep 2023 17:37:48 -0700 Subject: Load comments from gif images to gather geninfo from gif outputs --- modules/images.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/images.py b/modules/images.py index eb644733..8c6e862f 100644 --- a/modules/images.py +++ b/modules/images.py @@ -728,6 +728,8 @@ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]: if exif_comment: items['exif comment'] = exif_comment geninfo = exif_comment + elif "comment" in items: # for gif + geninfo = items["comment"].decode('utf8', errors="ignore") for field in IGNORED_INFO_KEYS: items.pop(field, None) -- cgit v1.2.3 From 0c1c9e74cda6637ab1305b4c294b7719eb141927 Mon Sep 17 00:00:00 2001 From: liubo0902 <38622806+liubo0902@users.noreply.github.com> Date: Tue, 5 Sep 2023 15:06:47 +0800 Subject: Update localization.py --- modules/localization.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/modules/localization.py b/modules/localization.py index c1320288..3392b055 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -1,7 +1,7 @@ import json import os -from modules import errors, scripts +from modules import errors localizations = {} @@ -14,21 +14,27 @@ def list_localizations(dirname): if ext.lower() != ".json": continue - localizations[fn] = os.path.join(dirname, file) + fn = fn.replace(" ", "").replace("(", "_").replace(")","") + localizations[fn] = [os.path.join(dirname, file)] + from modules import scripts for file in scripts.list_scripts("localizations", ".json"): fn, ext = os.path.splitext(file.filename) - localizations[fn] = file.path + fn = fn.replace(" ", "").replace("(", "_").replace(")","") + if fn not in localizations: + localizations[fn] = [] + localizations[fn].append(file.path) def localization_js(current_localization_name: str) -> str: - fn = localizations.get(current_localization_name, None) + fns = localizations.get(current_localization_name, None) data = {} - if fn is not None: - try: - with open(fn, "r", encoding="utf8") as file: - data = json.load(file) - except Exception: - errors.report(f"Error loading localization from {fn}", exc_info=True) + if fns is not None: + for fn in fns: + try: + with open(fn, "r", encoding="utf8") as file: + data.update(json.load(file)) + except Exception: + errors.report(f"Error loading localization from {fn}", exc_info=True) return f"window.localization = {json.dumps(data)}" -- cgit v1.2.3 From ff7027ffc075ae44ddaa56014f900d392cf53ca8 Mon Sep 17 00:00:00 2001 From: liubo0902 <38622806+liubo0902@users.noreply.github.com> Date: Tue, 5 Sep 2023 15:08:59 +0800 Subject: Update localization.py --- modules/localization.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/localization.py b/modules/localization.py index 3392b055..262d49ee 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -1,7 +1,7 @@ import json import os -from modules import errors +from modules import errors, scripts localizations = {} @@ -17,7 +17,6 @@ def list_localizations(dirname): fn = fn.replace(" ", "").replace("(", "_").replace(")","") localizations[fn] = [os.path.join(dirname, file)] - from modules import scripts for file in scripts.list_scripts("localizations", ".json"): fn, ext = os.path.splitext(file.filename) fn = fn.replace(" ", "").replace("(", "_").replace(")","") -- cgit v1.2.3 From de5bb4ca88df44362c9263de7334b30156540e21 Mon Sep 17 00:00:00 2001 From: AngelBottomless Date: Tue, 5 Sep 2023 22:35:17 +0900 Subject: Fix #13080 - Hypernetwork/TI preview generation Fixes sampler name reference Same patch will be done for TI. --- modules/hypernetworks/hypernetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 70f1cbd2..65b63f2f 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -468,7 +468,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, shared.reload_hypernetworks() -def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_hypernetwork(id_task, hypernetwork_name:str, learn_rate:float, batch_size:int, gradient_step:int, data_root:str, log_directory:str, training_width:int, training_height:int, varsize:bool, steps:int, clip_grad_mode:str, clip_grad_value:float, shuffle_tags:bool, tag_drop_out:bool, latent_sampling_method:str, use_weight:bool, create_image_every:int, save_hypernetwork_every:int, template_filename:str, preview_from_txt2img:bool, preview_prompt:str, preview_negative_prompt:str, preview_steps:int, preview_sampler_name:str, preview_cfg_scale:float, preview_seed:int, preview_width:int, preview_height:int): from modules import images, processing save_hypernetwork_every = save_hypernetwork_every or 0 @@ -698,7 +698,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi p.prompt = preview_prompt p.negative_prompt = preview_negative_prompt p.steps = preview_steps - p.sampler_name = sd_samplers.samplers[preview_sampler_index].name + p.sampler_name = sd_samplers.samplers_map[preview_sampler_name.lower()] p.cfg_scale = preview_cfg_scale p.seed = preview_seed p.width = preview_width -- cgit v1.2.3 From 47033afa5c08e72b622348b0bcfd71fd1a66e2cb Mon Sep 17 00:00:00 2001 From: AngelBottomless Date: Tue, 5 Sep 2023 22:38:02 +0900 Subject: Fix preview for textual inversion training --- modules/textual_inversion/textual_inversion.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index aa79dc09..401a0a2a 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -386,7 +386,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat assert log_directory, "Log directory is empty" -def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_name, preview_cfg_scale, preview_seed, preview_width, preview_height): from modules import processing save_embedding_every = save_embedding_every or 0 @@ -590,7 +590,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st p.prompt = preview_prompt p.negative_prompt = preview_negative_prompt p.steps = preview_steps - p.sampler_name = sd_samplers.samplers[preview_sampler_index].name + p.sampler_name = sd_samplers.samplers_map[preview_sampler_name.lower()] p.cfg_scale = preview_cfg_scale p.seed = preview_seed p.width = preview_width -- cgit v1.2.3 From 25189b29afb04ff0c203e7f666c8acaead09dcde Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 5 Sep 2023 22:13:36 -0400 Subject: Grammar fixes --- .github/ISSUE_TEMPLATE/bug_report.yml | 62 +++++++++++++++++------------------ 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 70c2e160..a53db9f0 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,5 +1,5 @@ name: Bug Report -description: You think somethings is broken in the UI +description: You think something is broken in the UI title: "[Bug]: " labels: ["bug-report"] @@ -7,9 +7,9 @@ body: - type: markdown attributes: value: | - > The title the bug report should be short and descriptive - > Use relevant keywords for searchability - > Don't leave it blank but also don't put the entire error log in it + > The title of the bug report should be short and descriptive. + > Use relevant keywords for searchability. + > Do not leave it blank, but also do not put an entire error log in it. - type: checkboxes attributes: label: Checklist @@ -17,32 +17,32 @@ body: Please perform basic debugging to see if extensions or configuration is the cause of the issue. Basic debug procedure  1. Disable all third-party extensions - check if extension is the cause -  2. Update extensions and webui - sometimes thing just need to be updated -  3. Backup and remove your config.json and ui-config.json - check if the issue is caused bed configuration -  4. delete venv with third-party extensions disable - sometimes extensions might cause wrong libraries to be installed -  5. try a fresh installation webui in a different directory - see if a clean installation solves the issue - Before making a issue report please check that the issue hasn't been reported recently +  2. Update extensions and webui - sometimes things just need to be updated +  3. Backup and remove your config.json and ui-config.json - check if the issue is caused by bad configuration +  4. Delete venv with third-party extensions disabled - sometimes extensions might cause wrong libraries to be installed +  5. Try a fresh installation webui in a different directory - see if a clean installation solves the issue + Before making a issue report please, check that the issue hasn't been reported recently. options: - - label: The issue exist after disabling all extensions - - label: The issue exist on a clean installation of webui - - label: The issue is caused by an extension but it is caused by a bug in webui - - label: The issue exist in current version of webui - - label: The issue haven't been reported before recently - - label: The issue has been reported before but hasn't been fixed yet + - label: The issue exists after disabling all extensions + - label: The issue exists on a clean installation of webui + - label: The issue is caused by an extension, but I believe it is caused by a bug in the webui + - label: The issue exists in the current version of the webui + - label: The issue has not been reported before recently + - label: The issue has been reported before but has not been fixed yet - type: markdown attributes: value: | - > Please fill this form with as much information as possible, don't forget to "Upload Sysinfo" and "What browsers" and provide screenshots if possible + > Please fill this form with as much information as possible. Don't forget to "Upload Sysinfo" and "What browsers" and provide screenshots if possible - type: textarea id: what-did attributes: label: What happened? description: Tell us what happened in a very clear and simple way placeholder: | - I tried to use txt2img with XYZ grid with Sampler DPM++ SDE,DPM++ 2M SDE - it should generate a grid of 2 images but I only got 1 + I tried to use txt2img with the XYZ grid script, with DPM++ SDE, DPM++ 2M SDE samplers. + It should generate a grid of 2 images but I only got 1. - add screenshot or screen recording if necessary + (add screenshot or screen recording if necessary) validations: required: true - type: textarea @@ -51,9 +51,9 @@ body: label: Steps to reproduce the problem description: Please provide us with precise step by step instructions on how to reproduce the bug placeholder: | - 1. Go to txt2img tab Select XYZ grid - 2. Set axis type Sampler and select DPM++ 2M SDE, DPM++ 3M SDE - 3. Set Sampling steps to 1 and click Generate button + 1. Go to txt2img tab, select XYZ grid script + 2. Set axis type to `Sampler`, and select DPM++ 2M SDE, DPM++ 3M SDE + 3. Set `Sampling steps` to 1, click generate button validations: required: true - type: textarea @@ -62,8 +62,8 @@ body: label: What should have happened? description: Tell us what you think the normal behavior should be placeholder: | - It should generate a grid of 2 images - this was working in webui version 1.x.x + It should generate a grid of 2 images. + This was working in webui version 1.x.x validations: required: true - type: dropdown @@ -86,15 +86,15 @@ body: label: Sysinfo description: System info file, generated by WebUI. You can generate it in settings, on the Sysinfo page. Drag the file into the field to upload it. If you submit your report without including the sysinfo file, the report will be closed. If needed, review the report to make sure it includes no personal information you don't want to share. If you can't start WebUI, you can use --dump-sysinfo commandline argument to generate the file. placeholder: | - Upload the Sysinfo as a attached file - Don't paste it in as text + Upload the Sysinfo as a attached file. + Do not paste it in as text. validations: required: true - type: textarea id: logs attributes: label: Console logs - description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service. + description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service. render: Shell placeholder: | generating image for xyz plot: UnboundLocalError @@ -126,7 +126,7 @@ body: label: Additional information description: Please provide us with any relevant additional info or context. placeholder: | - Examples - I have updated the GPU driver recently - I suspect the issue is caused by XXXXX - I am using a VPN + Examples: + I have updated the GPU driver recently. + I suspect the issue is caused by XXXXX. + I am using a VPN. -- cgit v1.2.3 From 35d1c94549cf75e7e312372d90fee0acc2806426 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 6 Sep 2023 20:24:26 +0900 Subject: save_images_add_number_suffix --- modules/images.py | 10 +++++++++- modules/shared_options.py | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/modules/images.py b/modules/images.py index eb644733..10dcd9ab 100644 --- a/modules/images.py +++ b/modules/images.py @@ -661,7 +661,15 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i save_image_with_geninfo(image_to_save, info, temp_file_path, extension, existing_pnginfo=params.pnginfo, pnginfo_section_name=pnginfo_section_name) - os.replace(temp_file_path, filename_without_extension + extension) + full_file_name = filename_without_extension + extension + if shared.opts.save_images_add_number_suffix and os.path.exists(full_file_name): + count = 1 + while True: + full_file_name = f"{filename_without_extension}_{count}{extension}" + if not os.path.exists(full_file_name): + break + count += 1 + os.replace(temp_file_path, full_file_name) fullfn_without_extension, extension = os.path.splitext(params.filename) if hasattr(os, 'statvfs'): diff --git a/modules/shared_options.py b/modules/shared_options.py index 00b273fa..2f4caa9d 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -26,7 +26,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "samples_format": OptionInfo('png', 'File format for images'), "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), - + "save_images_add_number_suffix": OptionInfo(True, "Add number suffix when necessary", component_args=hide_dirs).info("prevent existing image from being override"), "grid_save": OptionInfo(True, "Always save all generated image grids"), "grid_format": OptionInfo('png', 'File format for grids'), "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), -- cgit v1.2.3 From 657404b75b2f214a97281afbec1adcb4313d24eb Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 6 Sep 2023 20:33:43 +0900 Subject: use original filename batch img2img with scripts --- modules/img2img.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 0c6d1af5..c1cae22f 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -117,15 +117,14 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal if output_dir: p.outpath_samples = output_dir p.override_settings['save_to_dirs'] = False + if p.n_iter > 1 or p.batch_size > 1: + p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]' + else: + p.override_settings['samples_filename_pattern'] = f'{image_path.stem}' proc = modules.scripts.scripts_img2img.run(p, *args) if proc is None: - if output_dir: - if p.n_iter > 1 or p.batch_size > 1: - p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]' - else: - p.override_settings['samples_filename_pattern'] = f'{image_path.stem}' process_images(p) -- cgit v1.2.3 From 340fce2113b6d68f06f5bb8c897be998f03b4c8c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 7 Sep 2023 10:01:16 +0900 Subject: enable console prompts in settings --- modules/img2img.py | 2 +- modules/shared_options.py | 1 + modules/txt2img.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index c81c7ab9..cbd80bac 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -199,7 +199,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s p.user = request.username - if shared.cmd_opts.enable_console_prompts: + if shared.opts.enable_console_prompts or shared.cmd_opts.enable_console_prompts: print(f"\nimg2img: {prompt}", file=shared.progress_print_out) if mask: diff --git a/modules/shared_options.py b/modules/shared_options.py index 00b273fa..44fb1670 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -100,6 +100,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration" options_templates.update(options_section(('system', "System"), { "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), + "enable_console_prompts": OptionInfo(False, "Print prompts to console when generating with txt2img and img2img."), "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), diff --git a/modules/txt2img.py b/modules/txt2img.py index 1ee592ad..379ef859 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -45,7 +45,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step p.user = request.username - if cmd_opts.enable_console_prompts: + if shared.opts.enable_console_prompts or cmd_opts.enable_console_prompts: print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) with closing(p): -- cgit v1.2.3 From 45881703c5b1c0499406a76fa49ec7bd408a4898 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 7 Sep 2023 12:11:36 +0900 Subject: consolidated allowed preview formats --- modules/ui_extra_networks.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 063bd7b8..2e816254 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -15,6 +15,11 @@ from modules.ui_components import ToolButton extra_pages = [] allowed_dirs = set() +allowed_preview_extensions = ["png", "jpg", "jpeg", "webp", "gif"] +if shared.opts.samples_format not in allowed_preview_extensions: + allowed_preview_extensions.append(shared.opts.samples_format) +allowed_preview_extensions_dot = ['.' + extension for extension in allowed_preview_extensions] + def register_page(page): """registers extra networks page for the UI; recommend doing it in on_before_ui() callback for extensions""" @@ -34,7 +39,7 @@ def fetch_file(filename: str = ""): raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") ext = os.path.splitext(filename)[1].lower() - if ext not in (".png", ".jpg", ".jpeg", ".webp", ".gif"): + if ext not in allowed_preview_extensions_dot: raise ValueError(f"File cannot be fetched: {filename}. Only png, jpg, webp, and gif.") # would profit from returning 304 @@ -273,11 +278,7 @@ class ExtraNetworksPage: Find a preview PNG for a given path (without extension) and call link_preview on it. """ - preview_extensions = ["png", "jpg", "jpeg", "webp"] - if shared.opts.samples_format not in preview_extensions: - preview_extensions.append(shared.opts.samples_format) - - potential_files = sum([[path + "." + ext, path + ".preview." + ext] for ext in preview_extensions], []) + potential_files = sum([[path + "." + ext, path + ".preview." + ext] for ext in allowed_preview_extensions], []) for file in potential_files: if os.path.isfile(file): -- cgit v1.2.3 From c3d51fc696bbe5f9ea2de63933234c90c55afbbd Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 7 Sep 2023 19:35:55 +0900 Subject: Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index a53db9f0..a423f052 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -86,8 +86,9 @@ body: label: Sysinfo description: System info file, generated by WebUI. You can generate it in settings, on the Sysinfo page. Drag the file into the field to upload it. If you submit your report without including the sysinfo file, the report will be closed. If needed, review the report to make sure it includes no personal information you don't want to share. If you can't start WebUI, you can use --dump-sysinfo commandline argument to generate the file. placeholder: | - Upload the Sysinfo as a attached file. - Do not paste it in as text. + 1. Go to WebUI Settings -> Sysinfo -> Download system info. + If WebUI fails to launch, use --dump-sysinfo commandline argument to generate the file + 2. Upload the Sysinfo as a attached file, Do NOT paste it in as plain text. validations: required: true - type: textarea -- cgit v1.2.3 From f11eec81e31bfc9195bbacda13b2a3ce7b98fd92 Mon Sep 17 00:00:00 2001 From: ibrainventures Date: Thu, 7 Sep 2023 23:19:52 +0200 Subject: (feat) Include Program Version in info response. Update processing.py This would help to organize / memorize the program version for the creation process. (as it is also unformated included inside the infotext). --- modules/processing.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index e124e7f0..0c191428 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -533,6 +533,7 @@ class Processed: self.all_seeds = all_seeds or p.all_seeds or [self.seed] self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed] self.infotexts = infotexts or [info] + self.version = program_version() def js(self): obj = { @@ -567,6 +568,7 @@ class Processed: "job_timestamp": self.job_timestamp, "clip_skip": self.clip_skip, "is_using_inpainting_conditioning": self.is_using_inpainting_conditioning, + "version": self.version, } return json.dumps(obj) -- cgit v1.2.3 From e4726cccf960257e1b456db84a59f28cea019c8f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 8 Sep 2023 09:46:34 +0900 Subject: parsing string to path --- modules/sd_models.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 930d0bee..9b0923de 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -49,11 +49,12 @@ class CheckpointInfo: def __init__(self, filename): self.filename = filename abspath = os.path.abspath(filename) + abs_ckpt_dir = os.path.abspath(shared.cmd_opts.ckpt_dir) if shared.cmd_opts.ckpt_dir is not None else None self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors" - if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir): - name = abspath.replace(shared.cmd_opts.ckpt_dir, '') + if abs_ckpt_dir and abspath.startswith(abs_ckpt_dir): + name = abspath.replace(abs_ckpt_dir, '') elif abspath.startswith(model_path): name = abspath.replace(model_path, '') else: -- cgit v1.2.3 From 63485b2c55d2e5d1d5fc64d3964120a7305a9aee Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 8 Sep 2023 10:00:27 +0900 Subject: option use short name for checkpoint dropdown --- modules/shared_items.py | 4 ++-- modules/shared_options.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/shared_items.py b/modules/shared_items.py index 84d69c8d..b1459f8c 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -44,9 +44,9 @@ def refresh_unet_list(): modules.sd_unet.list_unets() -def list_checkpoint_tiles(): +def list_checkpoint_tiles(use_short=False): import modules.sd_models - return modules.sd_models.checkpoint_tiles() + return modules.sd_models.checkpoint_tiles(use_short) def refresh_checkpoints(): diff --git a/modules/shared_options.py b/modules/shared_options.py index 00b273fa..7f71c517 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -133,7 +133,7 @@ options_templates.update(options_section(('training', "Training"), { })) options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'), + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles(shared.opts.sd_checkpoint_dropdown_use_short)}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'), "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), @@ -261,6 +261,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), + "sd_checkpoint_dropdown_use_short": OptionInfo(False, "Use short name for Stable Diffusion checkpoint dropdown"), "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), -- cgit v1.2.3 From 259768f27fc4da61000610bc81a16f0152b36550 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 9 Sep 2023 08:38:42 +0300 Subject: fix the bug in script-info API --- modules/scripts.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/scripts.py b/modules/scripts.py index e8518ad0..f1f17a5f 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -491,11 +491,15 @@ class ScriptRunner: arg_info = api_models.ScriptArg(label=control.label or "") - for field in ("value", "minimum", "maximum", "step", "choices"): + for field in ("value", "minimum", "maximum", "step"): v = getattr(control, field, None) if v is not None: setattr(arg_info, field, v) + choices = getattr(control, 'choices', None) # as of gradio 3.41, some items in choices are strings, and some are tuples where the first elem is the string + if choices is not None: + setattr(arg_info, 'choices', [x[0] if isinstance(x, tuple) else x for x in choices]) + api_args.append(arg_info) script.api_info = api_models.ScriptInfo( -- cgit v1.2.3 From 7b44b85730d392733a285fe7e5c9e077f7bbccd3 Mon Sep 17 00:00:00 2001 From: ljleb Date: Sat, 9 Sep 2023 02:01:12 -0400 Subject: refact --- modules/ui.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f4028475..0e78b6e1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1,4 +1,5 @@ import datetime +import functools import mimetypes import os import sys @@ -151,11 +152,14 @@ def connect_clear_prompt(button): ) -def update_token_counter(text, steps): +def update_token_counter(text, steps, is_positive=True): try: text, _ = extra_networks.parse_prompt(text) - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + if is_positive: + _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + else: + prompt_flat_list = [text] prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) except Exception: @@ -533,7 +537,7 @@ def create_ui(): ] toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) - toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(functools.partial(update_token_counter, is_positive=False)), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img') ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) -- cgit v1.2.3 From 3ca4655a18eb80cca5f806412f2cb2d56cc536e5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 9 Sep 2023 09:08:31 +0300 Subject: update for #12926 --- modules/images.py | 16 +++++++--------- modules/shared_options.py | 2 +- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/modules/images.py b/modules/images.py index 10dcd9ab..5cf3c825 100644 --- a/modules/images.py +++ b/modules/images.py @@ -661,15 +661,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i save_image_with_geninfo(image_to_save, info, temp_file_path, extension, existing_pnginfo=params.pnginfo, pnginfo_section_name=pnginfo_section_name) - full_file_name = filename_without_extension + extension - if shared.opts.save_images_add_number_suffix and os.path.exists(full_file_name): - count = 1 - while True: - full_file_name = f"{filename_without_extension}_{count}{extension}" - if not os.path.exists(full_file_name): - break - count += 1 - os.replace(temp_file_path, full_file_name) + filename = filename_without_extension + extension + if shared.opts.save_images_replace_action != "Replace": + n = 0 + while os.path.exists(filename): + n += 1 + filename = f"{filename_without_extension}-{n}{extension}" + os.replace(temp_file_path, filename) fullfn_without_extension, extension = os.path.splitext(params.filename) if hasattr(os, 'statvfs'): diff --git a/modules/shared_options.py b/modules/shared_options.py index 2f4caa9d..1befb6ea 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -26,7 +26,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "samples_format": OptionInfo('png', 'File format for images'), "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), - "save_images_add_number_suffix": OptionInfo(True, "Add number suffix when necessary", component_args=hide_dirs).info("prevent existing image from being override"), + "save_images_replace_action": OptionInfo("Replace", "Saving the image to an existing file", gr.Radio, {"choices": ["Replace", "Add number suffix"], **hide_dirs}), "grid_save": OptionInfo(True, "Always save all generated image grids"), "grid_format": OptionInfo('png', 'File format for grids'), "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), -- cgit v1.2.3 From 4c4d7dd01f77f021381a09cb18b4ca8a8b7734b1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 9 Sep 2023 09:15:09 +0300 Subject: fix whitespace for #13084 --- modules/hypernetworks/hypernetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 65b63f2f..be3e4648 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -468,7 +468,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, shared.reload_hypernetworks() -def train_hypernetwork(id_task, hypernetwork_name:str, learn_rate:float, batch_size:int, gradient_step:int, data_root:str, log_directory:str, training_width:int, training_height:int, varsize:bool, steps:int, clip_grad_mode:str, clip_grad_value:float, shuffle_tags:bool, tag_drop_out:bool, latent_sampling_method:str, use_weight:bool, create_image_every:int, save_hypernetwork_every:int, template_filename:str, preview_from_txt2img:bool, preview_prompt:str, preview_negative_prompt:str, preview_steps:int, preview_sampler_name:str, preview_cfg_scale:float, preview_seed:int, preview_width:int, preview_height:int): +def train_hypernetwork(id_task, hypernetwork_name: str, learn_rate: float, batch_size: int, gradient_step: int, data_root: str, log_directory: str, training_width: int, training_height: int, varsize: bool, steps: int, clip_grad_mode: str, clip_grad_value: float, shuffle_tags: bool, tag_drop_out: bool, latent_sampling_method: str, use_weight: bool, create_image_every: int, save_hypernetwork_every: int, template_filename: str, preview_from_txt2img: bool, preview_prompt: str, preview_negative_prompt: str, preview_steps: int, preview_sampler_name: str, preview_cfg_scale: float, preview_seed: int, preview_width: int, preview_height: int): from modules import images, processing save_hypernetwork_every = save_hypernetwork_every or 0 -- cgit v1.2.3 From 46375f059276cb2d4d1e47bf65f984c6466dc2a0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 9 Sep 2023 09:39:37 +0300 Subject: fix for crash when running #12924 without --device-id --- modules/devices.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/devices.py b/modules/devices.py index 63c38eff..1d4eb563 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -60,7 +60,7 @@ def enable_tf32(): # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407 - device_id = (int(shared.cmd_opts.device_id) if shared.cmd_opts.device_id.isdigit() else 0) or torch.cuda.current_device() + device_id = (int(shared.cmd_opts.device_id) if shared.cmd_opts.device_id is not None and shared.cmd_opts.device_id.isdigit() else 0) or torch.cuda.current_device() if torch.cuda.get_device_capability(device_id) == (7, 5) and torch.cuda.get_device_name(device_id).startswith("NVIDIA GeForce GTX 16"): torch.backends.cudnn.benchmark = True -- cgit v1.2.3 From 46ef1857098df7610c36e73903731e486feca927 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 9 Sep 2023 15:53:10 +0900 Subject: deprecate --enable-console-prompts use --enable-console-prompts as the default value for shared.opts.enable_console_prompts --- modules/cmd_args.py | 2 +- modules/img2img.py | 2 +- modules/shared_options.py | 2 +- modules/txt2img.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index aab62286..fe4d4ecc 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -90,7 +90,7 @@ parser.add_argument("--autolaunch", action='store_true', help="open the webui UR parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None) parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False) -parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) +parser.add_argument("--enable-console-prompts", action='store_true', help="does not do anything", default=False) # Legacy compatibility, use as default value shared.opts.enable_console_prompts parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)") diff --git a/modules/img2img.py b/modules/img2img.py index cbd80bac..72ee7bc2 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -199,7 +199,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s p.user = request.username - if shared.opts.enable_console_prompts or shared.cmd_opts.enable_console_prompts: + if shared.opts.enable_console_prompts: print(f"\nimg2img: {prompt}", file=shared.progress_print_out) if mask: diff --git a/modules/shared_options.py b/modules/shared_options.py index 44fb1670..1ea5c8f8 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -100,7 +100,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration" options_templates.update(options_section(('system', "System"), { "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), - "enable_console_prompts": OptionInfo(False, "Print prompts to console when generating with txt2img and img2img."), + "enable_console_prompts": OptionInfo(shared.cmd_opts.enable_console_prompts, "Print prompts to console when generating with txt2img and img2img."), "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), diff --git a/modules/txt2img.py b/modules/txt2img.py index 379ef859..721206dd 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -45,7 +45,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step p.user = request.username - if shared.opts.enable_console_prompts or cmd_opts.enable_console_prompts: + if shared.opts.enable_console_prompts: print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) with closing(p): -- cgit v1.2.3 From c68aabc852151633016d3d5c84b433041f09d96e Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 9 Sep 2023 15:59:22 +0900 Subject: lint --- modules/txt2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/txt2img.py b/modules/txt2img.py index 721206dd..e4e18ceb 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -3,7 +3,7 @@ from contextlib import closing import modules.scripts from modules import processing from modules.generation_parameters_copypaste import create_override_settings_dict -from modules.shared import opts, cmd_opts +from modules.shared import opts import modules.shared as shared from modules.ui import plaintext_to_html import gradio as gr -- cgit v1.2.3 From 9cebe308e9f30f2a9555cf9fc43bf20652c4a619 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 9 Sep 2023 10:20:06 +0300 Subject: return apply styles to main UI --- modules/ui.py | 2 ++ modules/ui_prompt_styles.py | 21 +++++++++++---------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index b2aed7db..06aa509b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -217,6 +217,7 @@ class Toprow: with gr.Row(elem_id=f"{id_part}_tools"): self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.") self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt", tooltip="Clear prompt") + self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{id_part}_style_apply", tooltip="Apply all selected styles to prompts.") self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False, tooltip="Restore progress") self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) @@ -232,6 +233,7 @@ class Toprow: ) self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt) + self.ui_styles.setup_apply_button(self.apply_styles) self.prompt_img.change( fn=modules.images.image_data, diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py index 64d379ef..3bcf092f 100644 --- a/modules/ui_prompt_styles.py +++ b/modules/ui_prompt_styles.py @@ -53,6 +53,8 @@ def refresh_styles(): class UiPromptStyles: def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt): self.tabname = tabname + self.main_ui_prompt = main_ui_prompt + self.main_ui_negative_prompt = main_ui_negative_prompt with gr.Row(elem_id=f"{tabname}_styles_row"): self.dropdown = gr.Dropdown(label="Styles", show_label=False, elem_id=f"{tabname}_styles", choices=list(shared.prompt_styles.styles), value=[], multiselect=True, tooltip="Styles") @@ -62,7 +64,7 @@ class UiPromptStyles: with gr.Row(): self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.") ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles") - self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") + self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply_dialog", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.") with gr.Row(): @@ -98,12 +100,7 @@ class UiPromptStyles: show_progress=False, ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False) - self.materialize.click( - fn=materialize_styles, - inputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], - outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], - show_progress=False, - ).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False) + self.setup_apply_button(self.materialize) self.copy.click( fn=lambda p, n: (p, n), @@ -114,6 +111,10 @@ class UiPromptStyles: ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close) - - - + def setup_apply_button(self, button): + button.click( + fn=materialize_styles, + inputs=[self.main_ui_prompt, self.main_ui_negative_prompt, self.dropdown], + outputs=[self.main_ui_prompt, self.main_ui_negative_prompt, self.dropdown], + show_progress=False, + ).then(fn=None, _js="function(){update_"+self.tabname+"_tokens(); closePopup();}", show_progress=False) -- cgit v1.2.3 From 06af73bd1d17b417f234746c9a2f8e8e92cf6149 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 9 Sep 2023 10:23:53 +0300 Subject: linter --- modules/scripts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/scripts.py b/modules/scripts.py index f1f17a5f..5c6e0226 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -498,7 +498,7 @@ class ScriptRunner: choices = getattr(control, 'choices', None) # as of gradio 3.41, some items in choices are strings, and some are tuples where the first elem is the string if choices is not None: - setattr(arg_info, 'choices', [x[0] if isinstance(x, tuple) else x for x in choices]) + arg_info.choices = [x[0] if isinstance(x, tuple) else x for x in choices] api_args.append(arg_info) -- cgit v1.2.3 From c9c457eda8dec414dc38e874691d1e2736d6dcbb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 9 Sep 2023 10:27:16 +0300 Subject: stylistic changes for #13118 --- modules/ui.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 5f0f1cd1..569dc807 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1,5 +1,4 @@ import datetime -import functools import mimetypes import os import sys @@ -152,7 +151,7 @@ def connect_clear_prompt(button): ) -def update_token_counter(text, steps, is_positive=True): +def update_token_counter(text, steps, *, is_positive=True): try: text, _ = extra_networks.parse_prompt(text) @@ -160,6 +159,7 @@ def update_token_counter(text, steps, is_positive=True): _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) else: prompt_flat_list = [text] + prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) except Exception: @@ -173,6 +173,10 @@ def update_token_counter(text, steps, is_positive=True): return f"{token_count}/{max_length}" +def update_negative_prompt_token_counter(text, steps): + return update_token_counter(text, steps, is_positive=False) + + class Toprow: """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation""" @@ -539,7 +543,7 @@ def create_ui(): ] toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) - toprow.negative_token_button.click(fn=wrap_queued_call(functools.partial(update_token_counter, is_positive=False)), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img') ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) -- cgit v1.2.3 From 25de9a785cc9e93c16626db6ab5b16824443de53 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 9 Sep 2023 16:56:19 +0900 Subject: Revert "thread safe extra network list_items" This reverts commit aab385d01b4311726127397552d791f4d71b7147. --- extensions-builtin/Lora/ui_extra_networks_lora.py | 10 +++++----- modules/ui_extra_networks.py | 2 -- modules/ui_extra_networks_checkpoints.py | 6 +++--- modules/ui_extra_networks_hypernets.py | 5 ++--- modules/ui_extra_networks_textual_inversion.py | 5 ++--- 5 files changed, 12 insertions(+), 16 deletions(-) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index e9f30062..55409a78 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -66,11 +66,11 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): return item def list_items(self): - with self.thread_lock: - for index, name in enumerate(networks.available_networks): - item = self.create_item(name, index) - if item is not None: - yield item + for index, name in enumerate(networks.available_networks): + item = self.create_item(name, index) + + if item is not None: + yield item def allowed_directories_for_previews(self): return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir_backcompat] diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 564bab7f..063bd7b8 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,7 +1,6 @@ import os.path import urllib.parse from pathlib import Path -from threading import Lock from modules import shared, ui_extra_networks_user_metadata, errors, extra_networks from modules.images import read_info_from_image, save_image_with_geninfo @@ -95,7 +94,6 @@ class ExtraNetworksPage: self.allow_negative_prompt = False self.metadata = {} self.items = {} - self.thread_lock = Lock() def refresh(self): pass diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 2753214f..ca6c2607 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -30,9 +30,9 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): } def list_items(self): - with self.thread_lock: - for index, name in enumerate(sd_models.checkpoints_list): - yield self.create_item(name, index) + names = list(sd_models.checkpoints_list) + for index, name in enumerate(names): + yield self.create_item(name, index) def allowed_directories_for_previews(self): return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None] diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index 411b4f11..4cedf085 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -31,9 +31,8 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): } def list_items(self): - with self.thread_lock: - for index, name in enumerate(shared.hypernetworks): - yield self.create_item(name, index) + for index, name in enumerate(shared.hypernetworks): + yield self.create_item(name, index) def allowed_directories_for_previews(self): return [shared.cmd_opts.hypernetwork_dir] diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index d25b45d6..55ef0ea7 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -29,9 +29,8 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): } def list_items(self): - with self.thread_lock: - for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings): - yield self.create_item(name, index) + for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings): + yield self.create_item(name, index) def allowed_directories_for_previews(self): return list(sd_hijack.model_hijack.embedding_db.embedding_dirs) -- cgit v1.2.3 From f5959c1c3022c454de22fab749d0f06ab3219868 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 9 Sep 2023 17:05:50 +0900 Subject: thread safe extra network using list --- extensions-builtin/Lora/ui_extra_networks_lora.py | 3 ++- modules/ui_extra_networks_hypernets.py | 3 ++- modules/ui_extra_networks_textual_inversion.py | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index 55409a78..e74daa77 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -66,7 +66,8 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): return item def list_items(self): - for index, name in enumerate(networks.available_networks): + names = list(networks.available_networks) + for index, name in enumerate(names): item = self.create_item(name, index) if item is not None: diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index 4cedf085..5f590491 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -31,7 +31,8 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): } def list_items(self): - for index, name in enumerate(shared.hypernetworks): + names = list(shared.hypernetworks) + for index, name in enumerate(names): yield self.create_item(name, index) def allowed_directories_for_previews(self): diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index 55ef0ea7..40ab0aca 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -29,7 +29,8 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): } def list_items(self): - for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings): + names = list(sd_hijack.model_hijack.embedding_db.word_embeddings) + for index, name in enumerate(names): yield self.create_item(name, index) def allowed_directories_for_previews(self): -- cgit v1.2.3 From f8042cb323f0b581e3a49880dd0023e39d7dcc2c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 9 Sep 2023 22:35:07 +0900 Subject: Ensure not override images with script enabled --- modules/img2img.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/img2img.py b/modules/img2img.py index 7ca10cf0..52cb577a 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -117,6 +117,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal if output_dir: p.outpath_samples = output_dir p.override_settings['save_to_dirs'] = False + p.override_settings['save_images_replace_action'] = "Add number suffix" if p.n_iter > 1 or p.batch_size > 1: p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]' else: @@ -125,6 +126,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal proc = modules.scripts.scripts_img2img.run(p, *args) if proc is None: + p.override_settings.pop('save_images_replace_action', None) process_images(p) -- cgit v1.2.3 From ab5741717546758c57cf6c2a040645ec2b44690a Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 9 Sep 2023 22:35:50 +0900 Subject: prevent accessing non-existing keys --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 0c191428..618f8abe 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -711,7 +711,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.before_process(p) - stored_opts = {k: opts.data[k] for k in p.override_settings.keys()} + stored_opts = {k: opts.data[k] for k in p.override_settings.keys() if k in opts.data} try: # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint -- cgit v1.2.3 From d6478a60aa7c6f96a959ca6e3b9e8d51ad22d895 Mon Sep 17 00:00:00 2001 From: zixaphir Date: Sat, 9 Sep 2023 17:22:10 -0700 Subject: Remove extra network separator without regex --- javascript/extraNetworks.js | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index ca87bead..ff58d3dc 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -140,23 +140,19 @@ function setupExtraNetworks() { onUiLoaded(setupExtraNetworks); -var re_extranet = /<([^:]+:[^:]+):[\d.]+>(.*)/; -var re_extranet_str = '<([^:]+:[^:]+):[\\d.]+>'; +var re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/; +var re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g; function tryToRemoveExtraNetworkFromPrompt(textarea, text) { - function reEscape(s) { - return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); - } var m = text.match(re_extranet); var replaced = false; var newTextareaText; if (m) { + var extraTextBeforeNet = opts.extra_networks_add_text_separator; var extraTextAfterNet = m[2]; var partToSearch = m[1]; var foundAtPosition = -1; - var escapedSeparator = `(?:${reEscape(opts.extra_networks_add_text_separator)})?`; - var re = new RegExp(escapedSeparator + re_extranet_str, 'g'); - newTextareaText = textarea.value.replaceAll(re, function(found, net, pos) { + newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, net, pos) { m = found.match(re_extranet); if (m[1] == partToSearch) { replaced = true; @@ -166,8 +162,13 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text) { return found; }); - if (foundAtPosition >= 0 && newTextareaText.substr(foundAtPosition, extraTextAfterNet.length) == extraTextAfterNet) { - newTextareaText = newTextareaText.substr(0, foundAtPosition) + newTextareaText.substr(foundAtPosition + extraTextAfterNet.length); + if (foundAtPosition >= 0) { + if (newTextareaText.substr(foundAtPosition, extraTextAfterNet.length) == extraTextAfterNet) { + newTextareaText = newTextareaText.substr(0, foundAtPosition) + newTextareaText.substr(foundAtPosition + extraTextAfterNet.length); + } + if (newTextareaText.substr(foundAtPosition - extraTextBeforeNet.length, extraTextBeforeNet.length) == extraTextBeforeNet) { + newTextareaText = newTextareaText.substr(0, foundAtPosition - extraTextBeforeNet.length) + newTextareaText.substr(foundAtPosition); + } } } else { newTextareaText = textarea.value.replaceAll(new RegExp(text, "g"), function(found) { -- cgit v1.2.3 From 26d0d87f5b05c345abfec8eb0f8bd703cacd9619 Mon Sep 17 00:00:00 2001 From: zixaphir Date: Sat, 9 Sep 2023 17:26:46 -0700 Subject: Remove extra spaces --- javascript/extraNetworks.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index ff58d3dc..158b5b64 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -140,7 +140,7 @@ function setupExtraNetworks() { onUiLoaded(setupExtraNetworks); -var re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/; +var re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/; var re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g; function tryToRemoveExtraNetworkFromPrompt(textarea, text) { -- cgit v1.2.3 From 7d4d871d4679b5b78ff67b501da5367413542984 Mon Sep 17 00:00:00 2001 From: dongwenpu Date: Sun, 10 Sep 2023 17:53:42 +0800 Subject: fix: lora-bias-backup don't reset cache --- extensions-builtin/Lora/networks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 96f935b2..315682b3 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -418,6 +418,7 @@ def network_forward(module, input, original_forward): def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): self.network_current_names = () self.network_weights_backup = None + self.network_bias_backup = None def network_Linear_forward(self, input): -- cgit v1.2.3 From 413123f08a745e9417fd384d2c1bee1e0e5e5730 Mon Sep 17 00:00:00 2001 From: liubo0902 <38622806+liubo0902@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:22:27 +0800 Subject: Update localization.py --- modules/localization.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/localization.py b/modules/localization.py index 262d49ee..108f792e 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -14,12 +14,10 @@ def list_localizations(dirname): if ext.lower() != ".json": continue - fn = fn.replace(" ", "").replace("(", "_").replace(")","") localizations[fn] = [os.path.join(dirname, file)] for file in scripts.list_scripts("localizations", ".json"): fn, ext = os.path.splitext(file.filename) - fn = fn.replace(" ", "").replace("(", "_").replace(")","") if fn not in localizations: localizations[fn] = [] localizations[fn].append(file.path) -- cgit v1.2.3 From c485a7d12e26df1497309023d09cb3c106d7ae2b Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 11 Sep 2023 13:47:44 +0900 Subject: make InputAccordion work with ui-config --- modules/ui_loadsave.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index ec8fa8e8..eb20ff25 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -4,7 +4,7 @@ import os import gradio as gr from modules import errors -from modules.ui_components import ToolButton +from modules.ui_components import ToolButton, InputAccordion def radio_choices(comp): # gradio 3.41 changes choices from list of values to list of pairs @@ -32,8 +32,6 @@ class UiLoadsave: self.error_loading = True errors.display(e, "loading settings") - - def add_component(self, path, x): """adds component to the registry of tracked components""" @@ -43,20 +41,24 @@ class UiLoadsave: key = f"{path}/{field}" if getattr(obj, 'custom_script_source', None) is not None: - key = f"customscript/{obj.custom_script_source}/{key}" + key = f"customscript/{obj.custom_script_source}/{key}" if getattr(obj, 'do_not_save_to_config', False): return saved_value = self.ui_settings.get(key, None) + + if isinstance(obj, gr.Accordion) and isinstance(x, InputAccordion) and field == 'value': + field = 'open' + if saved_value is None: self.ui_settings[key] = getattr(obj, field) elif condition and not condition(saved_value): pass else: - if isinstance(x, gr.Textbox) and field == 'value': # due to an undesirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies + if isinstance(obj, gr.Textbox) and field == 'value': # due to an undesirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies saved_value = str(saved_value) - elif isinstance(x, gr.Number) and field == 'value': + elif isinstance(obj, gr.Number) and field == 'value': try: saved_value = float(saved_value) except ValueError: @@ -67,7 +69,7 @@ class UiLoadsave: init_field(saved_value) if field == 'value' and key not in self.component_mapping: - self.component_mapping[key] = x + self.component_mapping[key] = obj if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton, gr.Button] and x.visible: apply_field(x, 'visible') @@ -100,6 +102,12 @@ class UiLoadsave: apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None)) + if type(x) == InputAccordion: + if x.accordion.visible: + apply_field(x.accordion, 'visible') + apply_field(x, 'value') + apply_field(x.accordion, 'value') + def check_tab_id(tab_id): tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children)) if type(tab_id) == str: -- cgit v1.2.3 From e785402b6acca12108e15224ff80d58817ab3c27 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 9 Sep 2023 17:28:06 +0900 Subject: return nothing if not found --- extensions-builtin/Lora/ui_extra_networks_lora.py | 3 ++- modules/ui_extra_networks_checkpoints.py | 7 ++++++- modules/ui_extra_networks_hypernets.py | 9 +++++++-- modules/ui_extra_networks_textual_inversion.py | 6 +++++- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index e74daa77..dac90a86 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -17,6 +17,8 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): def create_item(self, name, index=None, enable_filter=True): lora_on_disk = networks.available_networks.get(name) + if lora_on_disk is None: + return path, ext = os.path.splitext(lora_on_disk.filename) @@ -69,7 +71,6 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): names = list(networks.available_networks) for index, name in enumerate(names): item = self.create_item(name, index) - if item is not None: yield item diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index ca6c2607..35e958a0 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -15,6 +15,9 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def create_item(self, name, index=None, enable_filter=True): checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name) + if checkpoint is None: + return + path, ext = os.path.splitext(checkpoint.filename) return { "name": checkpoint.name_for_extra, @@ -32,7 +35,9 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def list_items(self): names = list(sd_models.checkpoints_list) for index, name in enumerate(names): - yield self.create_item(name, index) + item = self.create_item(name, index) + if item is not None: + yield item def allowed_directories_for_previews(self): return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None] diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index 5f590491..74f7d847 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -13,7 +13,10 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): shared.reload_hypernetworks() def create_item(self, name, index=None, enable_filter=True): - full_path = shared.hypernetworks[name] + full_path = shared.hypernetworks.get(name) + if full_path is None: + return + path, ext = os.path.splitext(full_path) sha256 = sha256_from_cache(full_path, f'hypernet/{name}') shorthash = sha256[0:10] if sha256 else None @@ -33,7 +36,9 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): def list_items(self): names = list(shared.hypernetworks) for index, name in enumerate(names): - yield self.create_item(name, index) + item = self.create_item(name, index) + if item is not None: + yield item def allowed_directories_for_previews(self): return [shared.cmd_opts.hypernetwork_dir] diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index 40ab0aca..71c38fab 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -14,6 +14,8 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): def create_item(self, name, index=None, enable_filter=True): embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name) + if embedding is None: + return path, ext = os.path.splitext(embedding.filename) return { @@ -31,7 +33,9 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): def list_items(self): names = list(sd_hijack.model_hijack.embedding_db.word_embeddings) for index, name in enumerate(names): - yield self.create_item(name, index) + item = self.create_item(name, index) + if item is not None: + yield item def allowed_directories_for_previews(self): return list(sd_hijack.model_hijack.embedding_db.embedding_dirs) -- cgit v1.2.3 From 59544321aa019d71d220b1da1eec703aa44fa8eb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 11 Sep 2023 21:17:28 +0300 Subject: initial work on sd_unet for SDXL --- modules/sd_hijack.py | 17 ++++++++++++----- modules/sd_unet.py | 4 ++-- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 592f0055..22a1eb5c 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -2,7 +2,7 @@ import torch from torch.nn.functional import silu from types import MethodType -from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet +from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet, patches from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr @@ -10,6 +10,7 @@ from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hija import ldm.modules.attention import ldm.modules.diffusionmodules.model import ldm.modules.diffusionmodules.openaimodel +import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms import ldm.modules.encoders.modules @@ -37,6 +38,8 @@ ldm.models.diffusion.ddpm.print = shared.ldm_print optimizers = [] current_optimizer: sd_hijack_optimizations.SdOptimization = None +ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) +sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) def list_optimizers(): new_optimizers = script_callbacks.list_optimizers_callback() @@ -239,10 +242,13 @@ class StableDiffusionModelHijack: self.layers = flatten(m) - if not hasattr(ldm.modules.diffusionmodules.openaimodel, 'copy_of_UNetModel_forward_for_webui'): - ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui = ldm.modules.diffusionmodules.openaimodel.UNetModel.forward + if isinstance(m, ldm.models.diffusion.ddpm.LatentDiffusion): + sd_unet.original_forward = ldm_original_forward + elif isinstance(m, sgm.models.diffusion.DiffusionEngine): + sd_unet.original_forward = sgm_original_forward + else: + sd_unet.original_forward = None - ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward def undo_hijack(self, m): conditioner = getattr(m, 'conditioner', None) @@ -279,7 +285,8 @@ class StableDiffusionModelHijack: self.layers = None self.clip = None - ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui + sd_unet.original_forward = None + def apply_circular(self, enable): if self.circular_enabled == enable: diff --git a/modules/sd_unet.py b/modules/sd_unet.py index 5525cfbc..6a7bc9e2 100644 --- a/modules/sd_unet.py +++ b/modules/sd_unet.py @@ -1,11 +1,11 @@ import torch.nn -import ldm.modules.diffusionmodules.openaimodel from modules import script_callbacks, shared, devices unet_options = [] current_unet_option = None current_unet = None +original_forward = None def list_unets(): @@ -88,5 +88,5 @@ def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs): if current_unet is not None: return current_unet.forward(x, timesteps, context, *args, **kwargs) - return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs) + return original_forward(self, x, timesteps, context, *args, **kwargs) -- cgit v1.2.3 From 74b80e72115af46bf1c04167a30f9ec5025cb464 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 12 Sep 2023 09:29:07 +0900 Subject: add comment --- extensions-builtin/Lora/ui_extra_networks_lora.py | 1 + modules/ui_extra_networks_checkpoints.py | 1 + modules/ui_extra_networks_hypernets.py | 1 + modules/ui_extra_networks_textual_inversion.py | 1 + 4 files changed, 4 insertions(+) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index dac90a86..df02c663 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -68,6 +68,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): return item def list_items(self): + # instantiate a list to protect against concurrent modification names = list(networks.available_networks) for index, name in enumerate(names): item = self.create_item(name, index) diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 35e958a0..df7efb2e 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -33,6 +33,7 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): } def list_items(self): + # instantiate a list to protect against concurrent modification names = list(sd_models.checkpoints_list) for index, name in enumerate(names): item = self.create_item(name, index) diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index 74f7d847..c96c4fa3 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -34,6 +34,7 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): } def list_items(self): + # instantiate a list to protect against concurrent modification names = list(shared.hypernetworks) for index, name in enumerate(names): item = self.create_item(name, index) diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index 71c38fab..1b334fda 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -31,6 +31,7 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): } def list_items(self): + # instantiate a list to protect against concurrent modification names = list(sd_hijack.model_hijack.embedding_db.word_embeddings) for index, name in enumerate(names): item = self.create_item(name, index) -- cgit v1.2.3 From 6fb2194d9cc2c9b52bc2006117d592283e00b7d6 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 12 Sep 2023 16:50:56 +0900 Subject: fetch version info when webui_dir is not work_dir --- modules/launch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 6e54d063..8cdbafa5 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -64,7 +64,7 @@ Use --skip-python-version-check to suppress this warning. @lru_cache() def commit_hash(): try: - return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() + return subprocess.check_output([git, "-C", script_path, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() except Exception: return "" @@ -72,7 +72,7 @@ def commit_hash(): @lru_cache() def git_tag(): try: - return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip() + return subprocess.check_output([git, "-C", script_path, "describe", "--tags"], shell=False, encoding='utf8').strip() except Exception: try: -- cgit v1.2.3 From 93015964c7c920fbb834bf99977ab8e16296efac Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 12 Sep 2023 22:43:35 +0900 Subject: fix add_option overriding config with default --- modules/options.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/options.py b/modules/options.py index e75916d2..ab40aff7 100644 --- a/modules/options.py +++ b/modules/options.py @@ -210,7 +210,8 @@ class Options: def add_option(self, key, info): self.data_labels[key] = info - self.data[key] = info.default + if key not in self.data: + self.data[key] = info.default def reorder(self): """reorder settings so that all items related to section always go together""" -- cgit v1.2.3 From 5b761b49ade392eb8ff4c54ab1841b63475b5dd0 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 13 Sep 2023 16:05:55 +0900 Subject: correct webpath when webui_dir is not work_dir --- modules/paths.py | 2 +- modules/paths_internal.py | 1 + modules/ui_gradio_extensions.py | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/paths.py b/modules/paths.py index 25052339..187b9496 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -1,6 +1,6 @@ import os import sys -from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401 +from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, cwd # noqa: F401 import modules.safe # noqa: F401 diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 005a9b0a..89131a54 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -8,6 +8,7 @@ import shlex commandline_args = os.environ.get('COMMANDLINE_ARGS', "") sys.argv += shlex.split(commandline_args) +cwd = os.getcwd() modules_path = os.path.dirname(os.path.realpath(__file__)) script_path = os.path.dirname(modules_path) diff --git a/modules/ui_gradio_extensions.py b/modules/ui_gradio_extensions.py index b824b113..0d368f8b 100644 --- a/modules/ui_gradio_extensions.py +++ b/modules/ui_gradio_extensions.py @@ -2,12 +2,12 @@ import os import gradio as gr from modules import localization, shared, scripts -from modules.paths import script_path, data_path +from modules.paths import script_path, data_path, cwd def webpath(fn): - if fn.startswith(script_path): - web_path = os.path.relpath(fn, script_path).replace('\\', '/') + if fn.startswith(cwd): + web_path = os.path.relpath(fn, cwd) else: web_path = os.path.abspath(fn) -- cgit v1.2.3 From cf1edc2b54d7ae8acec45ddba098957a6caa7867 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 13 Sep 2023 16:27:02 +0900 Subject: initialize state.time_start befroe state.job_count --- modules/shared_state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_state.py b/modules/shared_state.py index d272ee5b..a68789cc 100644 --- a/modules/shared_state.py +++ b/modules/shared_state.py @@ -103,6 +103,7 @@ class State: def begin(self, job: str = "(unknown)"): self.sampling_step = 0 + self.time_start = time.time() self.job_count = -1 self.processing_has_refined_job_count = False self.job_no = 0 @@ -114,7 +115,6 @@ class State: self.skipped = False self.interrupted = False self.textinfo = None - self.time_start = time.time() self.job = job devices.torch_gc() log.info("Starting job %s", job) -- cgit v1.2.3 From 0ad38a9b87b7781315ea6324a8aa6c924d1275de Mon Sep 17 00:00:00 2001 From: Der Chien Date: Wed, 13 Sep 2023 20:20:01 +0800 Subject: 20230913 setup GIT_PYTHON_GIT_EXECUTABLE for GitPython --- webui.bat | 1 + webui.sh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/webui.bat b/webui.bat index 42e7d517..a630ea4d 100644 --- a/webui.bat +++ b/webui.bat @@ -1,6 +1,7 @@ @echo off if not defined PYTHON (set PYTHON=python) +if defined GIT (set "GIT_PYTHON_GIT_EXECUTABLE=%GIT%") if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv") set SD_WEBUI_RESTART=tmp/restart diff --git a/webui.sh b/webui.sh index 3d0f87ee..bdab3f05 100755 --- a/webui.sh +++ b/webui.sh @@ -51,6 +51,8 @@ fi if [[ -z "${GIT}" ]] then export GIT="git" +else + export GIT_PYTHON_GIT_EXECUTABLE="${GIT}" fi # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv) -- cgit v1.2.3 From ab3d3528a18ea1a81f1af22ea71bfc0d8c710dde Mon Sep 17 00:00:00 2001 From: Leon Date: Thu, 14 Sep 2023 18:42:56 +0800 Subject: add --skip-load-model-at-start --- modules/cmd_args.py | 1 + modules/initialize.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 5be879dd..4e602a84 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -117,3 +117,4 @@ parser.add_argument('--api-server-stop', action='store_true', help='enable serve parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn') parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False) parser.add_argument("--disable-extra-extensions", action='store_true', help="prevent all extensions except built-in from running regardless of any other settings", default=False) +parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui", ) diff --git a/modules/initialize.py b/modules/initialize.py index f24f7637..ac95fc6f 100644 --- a/modules/initialize.py +++ b/modules/initialize.py @@ -151,8 +151,8 @@ def initialize_rest(*, reload_script_modules=False): from modules import devices devices.first_time_calculation() - - Thread(target=load_model).start() + if not shared.cmd_opts.skip_load_model_at_start: + Thread(target=load_model).start() from modules import shared_items shared_items.reload_hypernetworks() -- cgit v1.2.3 From afd06245876004710007fa1abd0a1b4b2564c181 Mon Sep 17 00:00:00 2001 From: Won-Kyu Park Date: Fri, 15 Sep 2023 17:10:01 +0900 Subject: xyz_grid: add prepare option to AxisOption --- scripts/xyz_grid.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 939d8605..ce5a1a19 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -205,13 +205,14 @@ def csv_string_to_list_strip(data_str): class AxisOption: - def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None): + def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None, prepare=None): self.label = label self.type = type self.apply = apply self.format_value = format_value self.confirm = confirm self.cost = cost + self.prepare = prepare self.choices = choices @@ -536,6 +537,8 @@ class Script(scripts.Script): if opt.choices is not None and not csv_mode: valslist = vals_dropdown + elif opt.prepare is not None: + valslist = opt.prepare(vals) else: valslist = csv_string_to_list_strip(vals) -- cgit v1.2.3 From 813535d38bbcdd8ccc51d0618a7d9fd353677bb9 Mon Sep 17 00:00:00 2001 From: "qiuwen.wang" Date: Fri, 15 Sep 2023 18:23:23 +0800 Subject: use dict[key]=model; did not update orderdict order, should use move to end --- modules/sd_models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index 930d0bee..6d17dd3c 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -309,6 +309,7 @@ def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer): if checkpoint_info in checkpoints_loaded: # use checkpoint cache print(f"Loading weights [{sd_model_hash}] from cache") + checkpoints_loaded.move_to_end(checkpoint_info) return checkpoints_loaded[checkpoint_info] print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}") -- cgit v1.2.3 From d9d94141dcfc1a84e98370bc137ffd888509b65e Mon Sep 17 00:00:00 2001 From: woweenie <145132974+woweenie@users.noreply.github.com> Date: Fri, 15 Sep 2023 18:59:44 +0200 Subject: patch DDPM.register_betas so that users can put given_betas in model yaml --- modules/sd_models.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 930d0bee..8e4983a4 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -7,7 +7,7 @@ import threading import torch import re import safetensors.torch -from omegaconf import OmegaConf +from omegaconf import OmegaConf, ListConfig from os import mkdir from urllib import request import ldm.modules.midas as midas @@ -17,6 +17,7 @@ from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack from modules.timer import Timer import tomesd +import numpy as np model_dir = "Stable-diffusion" model_path = os.path.abspath(os.path.join(paths.models_path, model_dir)) @@ -132,6 +133,7 @@ def setup_model(): os.makedirs(model_path, exist_ok=True) enable_midas_autodownload() + patch_given_betas() def checkpoint_tiles(use_short=False): @@ -453,6 +455,17 @@ def enable_midas_autodownload(): midas.api.load_model = load_model_wrapper +def patch_given_betas(): + original_register_schedule = ldm.models.diffusion.ddpm.DDPM.register_schedule + def patched_register_schedule(*args, **kwargs): + if args[1] is not None and isinstance(args[1], ListConfig): + modified_args = list(args) # Convert args tuple to a list + modified_args[1] = np.array(args[1]) # Modify the desired element + args = tuple(modified_args) # Convert the list back to a tuple + original_register_schedule(*args, **kwargs) + ldm.models.diffusion.ddpm.DDPM.register_schedule = patched_register_schedule + + def repair_config(sd_config): if not hasattr(sd_config.model.params, "use_ema"): -- cgit v1.2.3 From 663fb8797612b863b0f0b94496039ec2ac18701c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 16 Sep 2023 09:05:42 +0900 Subject: Config states time ISO in system time zone --- modules/config_states.py | 3 +-- modules/ui_extensions.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/config_states.py b/modules/config_states.py index b766aef1..651793c7 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -4,7 +4,6 @@ Supports saving and restoring webui and extensions from a known working set of c import os import json -import time import tqdm from datetime import datetime @@ -38,7 +37,7 @@ def list_config_states(): config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True) for cs in config_states: - timestamp = time.asctime(time.gmtime(cs["created_at"])) + timestamp = datetime.fromtimestamp(cs["created_at"]).strftime('%Y-%m-%d %H:%M:%S') name = cs.get("name", "Config") full_name = f"{name}: {timestamp}" all_config_states[full_name] = cs diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 2e8c1d6d..c0a73b57 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -197,7 +197,7 @@ def update_config_states_table(state_name): config_state = config_states.all_config_states[state_name] config_name = config_state.get("name", "Config") - created_date = time.asctime(time.gmtime(config_state["created_at"])) + created_date = datetime.fromtimestamp(config_state["created_at"]).strftime('%Y-%m-%d %H:%M:%S') filepath = config_state.get("filepath", "") try: -- cgit v1.2.3 From d2878a8b0b952c08d832e0308e575e352a1bc3f1 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 16 Sep 2023 09:49:53 +0900 Subject: XYZ if not Include Sub Grids do not save Sub Grid --- scripts/xyz_grid.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 939d8605..99ad96be 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -773,6 +773,8 @@ class Script(scripts.Script): # TODO: See previous comment about intentional data misalignment. adj_g = g-1 if g > 0 else g images.save_image(processed.images[g], p.outpath_grids, "xyz_grid", info=processed.infotexts[g], extension=opts.grid_format, prompt=processed.all_prompts[adj_g], seed=processed.all_seeds[adj_g], grid=True, p=processed) + if not include_sub_grids: # if not include_sub_grids then skip saving after the first grid + break if not include_sub_grids: # Done with sub-grids, drop all related information: -- cgit v1.2.3 From 701feabf496b7ce0327ccdb1ef1dc942deab25ea Mon Sep 17 00:00:00 2001 From: Zolxys Date: Sun, 17 Sep 2023 11:37:15 -0500 Subject: Fix: --sd_model in "Promts from file or textbox" script is not working Fix for bug report #8079 --- scripts/prompts_from_file.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 50320d55..ca73b2a5 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -5,11 +5,17 @@ import shlex import modules.scripts as scripts import gradio as gr -from modules import sd_samplers, errors +from modules import sd_samplers, errors, sd_models from modules.processing import Processed, process_images from modules.shared import state +def process_model_tag(tag): + info = sd_models.get_closet_checkpoint_match(tag) + assert info is not None, f'Unknown checkpoint: {tag}' + return info.name + + def process_string_tag(tag): return tag @@ -27,7 +33,7 @@ def process_boolean_tag(tag): prompt_tags = { - "sd_model": None, + "sd_model": process_model_tag, "outpath_samples": process_string_tag, "outpath_grids": process_string_tag, "prompt_for_display": process_string_tag, @@ -156,7 +162,10 @@ class Script(scripts.Script): copy_p = copy.copy(p) for k, v in args.items(): - setattr(copy_p, k, v) + if k == "sd_model": + copy_p.override_settings['sd_model_checkpoint'] = v + else: + setattr(copy_p, k, v) proc = process_images(copy_p) images += proc.images -- cgit v1.2.3 From 8e355fbd7552f1a7f5124c4685d6fa36f3d0ede1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E7=A7=8B=E6=96=87/qwwang?= Date: Mon, 18 Sep 2023 16:45:42 +0800 Subject: fix --- modules/sd_models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index 6d17dd3c..eedb38c6 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -309,6 +309,7 @@ def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer): if checkpoint_info in checkpoints_loaded: # use checkpoint cache print(f"Loading weights [{sd_model_hash}] from cache") + # move to end as latest checkpoints_loaded.move_to_end(checkpoint_info) return checkpoints_loaded[checkpoint_info] -- cgit v1.2.3 From 702a1e1cc70240f2adbcfb707a644a5a98b5443c Mon Sep 17 00:00:00 2001 From: superhero-7 <537093830@qq.com> Date: Sat, 23 Sep 2023 17:51:41 +0800 Subject: support m18 --- configs/alt-diffusion-m18-inference.yaml | 73 ++++++++++++++ modules/sd_hijack.py | 6 +- modules/sd_models_config.py | 6 +- modules/xlmr_m18.py | 164 +++++++++++++++++++++++++++++++ 4 files changed, 244 insertions(+), 5 deletions(-) create mode 100644 configs/alt-diffusion-m18-inference.yaml create mode 100644 modules/xlmr_m18.py diff --git a/configs/alt-diffusion-m18-inference.yaml b/configs/alt-diffusion-m18-inference.yaml new file mode 100644 index 00000000..41a031d5 --- /dev/null +++ b/configs/alt-diffusion-m18-inference.yaml @@ -0,0 +1,73 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: modules.xlmr_m18.BertSeriesModelWithTransformation + params: + name: "XLMR-Large" diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 592f0055..ae9b2a65 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -5,7 +5,7 @@ from types import MethodType from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts -from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr +from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18 import ldm.modules.attention import ldm.modules.diffusionmodules.model @@ -208,11 +208,10 @@ class StableDiffusionModelHijack: else: m.cond_stage_model = conditioner - if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: + if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation or type(m.cond_stage_model) == xlmr_m18.BertSeriesModelWithTransformation: model_embeddings = m.cond_stage_model.roberta.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) m.cond_stage_model = sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords(m.cond_stage_model, self) - elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: model_embeddings = m.cond_stage_model.transformer.text_model.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) @@ -258,7 +257,6 @@ class StableDiffusionModelHijack: if hasattr(m, 'cond_stage_model'): delattr(m, 'cond_stage_model') - elif type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 08dd03f1..9ba89dfc 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -21,7 +21,7 @@ config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inf config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml") config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml") config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml") - +config_alt_diffusion_m18 = os.path.join(sd_configs_path, "alt-diffusion-m18-inference.yaml") def is_using_v_parameterization_for_sd2(state_dict): """ @@ -95,7 +95,11 @@ def guess_model_config_from_state_dict(sd, filename): if diffusion_model_input.shape[1] == 8: return config_instruct_pix2pix + + # import pdb; pdb.set_trace() if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None: + if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024: + return config_alt_diffusion_m18 return config_alt_diffusion return config_default diff --git a/modules/xlmr_m18.py b/modules/xlmr_m18.py new file mode 100644 index 00000000..18785692 --- /dev/null +++ b/modules/xlmr_m18.py @@ -0,0 +1,164 @@ +from transformers import BertPreTrainedModel,BertModel,BertConfig +import torch.nn as nn +import torch +from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig +from transformers import XLMRobertaModel,XLMRobertaTokenizer +from typing import Optional + +class BertSeriesConfig(BertConfig): + def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None,project_dim=512, pooler_fn="average",learn_encoder=False,model_type='bert',**kwargs): + + super().__init__(vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, type_vocab_size, initializer_range, layer_norm_eps, pad_token_id, position_embedding_type, use_cache, classifier_dropout, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + +class RobertaSeriesConfig(XLMRobertaConfig): + def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2,project_dim=512,pooler_fn='cls',learn_encoder=False, **kwargs): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + + +class BertSeriesModelWithTransformation(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + config_class = BertSeriesConfig + + def __init__(self, config=None, **kargs): + # modify initialization for autoloading + if config is None: + config = XLMRobertaConfig() + config.attention_probs_dropout_prob= 0.1 + config.bos_token_id=0 + config.eos_token_id=2 + config.hidden_act='gelu' + config.hidden_dropout_prob=0.1 + config.hidden_size=1024 + config.initializer_range=0.02 + config.intermediate_size=4096 + config.layer_norm_eps=1e-05 + config.max_position_embeddings=514 + + config.num_attention_heads=16 + config.num_hidden_layers=24 + config.output_past=True + config.pad_token_id=1 + config.position_embedding_type= "absolute" + + config.type_vocab_size= 1 + config.use_cache=True + config.vocab_size= 250002 + config.project_dim = 1024 + config.learn_encoder = False + super().__init__(config) + self.roberta = XLMRobertaModel(config) + self.transformation = nn.Linear(config.hidden_size,config.project_dim) + # self.pre_LN=nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large') + # self.pooler = lambda x: x[:,0] + # self.post_init() + + self.has_pre_transformation = True + if self.has_pre_transformation: + self.transformation_pre = nn.Linear(config.hidden_size, config.project_dim) + self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.post_init() + + def encode(self,c): + device = next(self.parameters()).device + text = self.tokenizer(c, + truncation=True, + max_length=77, + return_length=False, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt") + text["input_ids"] = torch.tensor(text["input_ids"]).to(device) + text["attention_mask"] = torch.tensor( + text['attention_mask']).to(device) + features = self(**text) + return features['projection_state'] + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) : + r""" + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=True, + return_dict=return_dict, + ) + + # # last module outputs + # sequence_output = outputs[0] + + + # # project every module + # sequence_output_ln = self.pre_LN(sequence_output) + + # # pooler + # pooler_output = self.pooler(sequence_output_ln) + # pooler_output = self.transformation(pooler_output) + # projection_state = self.transformation(outputs.last_hidden_state) + + if self.has_pre_transformation: + sequence_output2 = outputs["hidden_states"][-2] + sequence_output2 = self.pre_LN(sequence_output2) + projection_state2 = self.transformation_pre(sequence_output2) + + return { + "projection_state": projection_state2, + "last_hidden_state": outputs.last_hidden_state, + "hidden_states": outputs.hidden_states, + "attentions": outputs.attentions, + } + else: + projection_state = self.transformation(outputs.last_hidden_state) + return { + "projection_state": projection_state, + "last_hidden_state": outputs.last_hidden_state, + "hidden_states": outputs.hidden_states, + "attentions": outputs.attentions, + } + + + # return { + # 'pooler_output':pooler_output, + # 'last_hidden_state':outputs.last_hidden_state, + # 'hidden_states':outputs.hidden_states, + # 'attentions':outputs.attentions, + # 'projection_state':projection_state, + # 'sequence_out': sequence_output + # } + + +class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation): + base_model_prefix = 'roberta' + config_class= RobertaSeriesConfig \ No newline at end of file -- cgit v1.2.3 From f8f4ff2bb8f56877dede466934dd8ddf25c21063 Mon Sep 17 00:00:00 2001 From: superhero-7 <537093830@qq.com> Date: Sat, 23 Sep 2023 17:55:19 +0800 Subject: support altdiffusion-m18 --- modules/sd_hijack.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index ae9b2a65..4b36c0e9 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -212,6 +212,7 @@ class StableDiffusionModelHijack: model_embeddings = m.cond_stage_model.roberta.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) m.cond_stage_model = sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords(m.cond_stage_model, self) + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: model_embeddings = m.cond_stage_model.transformer.text_model.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) @@ -257,6 +258,7 @@ class StableDiffusionModelHijack: if hasattr(m, 'cond_stage_model'): delattr(m, 'cond_stage_model') + elif type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped -- cgit v1.2.3 From fdecf813b63db4a4e49b92ebfdf705a0d4047287 Mon Sep 17 00:00:00 2001 From: ezt19 <136929737+ezt19@users.noreply.github.com> Date: Sat, 23 Sep 2023 20:41:28 +0000 Subject: Update dragdrop.js Fixing a problem when u cannot put two images and they are going into two different places for images. --- javascript/dragdrop.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index 5803daea..d680daf5 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -119,7 +119,7 @@ window.addEventListener('paste', e => { } const firstFreeImageField = visibleImageFields - .filter(el => el.querySelector('input[type=file]'))?.[0]; + .filter(el => !el.querySelector('img'))?.[0]; dropReplaceImage( firstFreeImageField ? -- cgit v1.2.3 From d00f6dca2825c2a73bbc1a5c707be276d62acc6b Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Mon, 25 Sep 2023 22:08:24 -0600 Subject: Escape item names --- modules/ui_extra_networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 063bd7b8..60b95f21 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -213,9 +213,9 @@ class ExtraNetworksPage: metadata_button = "" metadata = item.get("metadata") if metadata: - metadata_button = f"" + metadata_button = f"" - edit_button = f"
" + edit_button = f"
" local_path = "" filename = item.get("filename", "") -- cgit v1.2.3 From 99aa702015b2a7c6707081cc975cfd12c40d55c4 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 26 Sep 2023 21:08:55 -0600 Subject: Update card on correct tab --- javascript/extraNetworks.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 493f31af..e927346c 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -335,7 +335,7 @@ function extraNetworksEditUserMetadata(event, tabname, extraPage, cardName) { function extraNetworksRefreshSingleCard(page, tabname, name) { requestGet("./sd_extra_networks/get-single-card", {page: page, tabname: tabname, name: name}, function(data) { if (data && data.html) { - var card = gradioApp().querySelector('.card[data-name=' + JSON.stringify(name) + ']'); // likely using the wrong stringify function + var card = gradioApp().querySelector(`#${tabname}_${page.replace(" ", "_")}_cards > .card[data-name="${name}"]`); var newDiv = document.createElement('DIV'); newDiv.innerHTML = data.html; -- cgit v1.2.3 From a69daae012458bbd3d2cc472dc757fd78090ae05 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 26 Sep 2023 22:02:52 -0600 Subject: Fix data-sort-name containing spaces --- modules/ui_extra_networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 063bd7b8..799bd174 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -235,7 +235,7 @@ class ExtraNetworksPage: if search_only and shared.opts.extra_networks_hidden_models == "Never": return "" - sort_keys = " ".join([html.escape(f'data-sort-{k}={v}') for k, v in item.get("sort_keys", {}).items()]).strip() + sort_keys = " ".join([f'data-sort-{k}="{html.escape(str(v))}"' for k, v in item.get("sort_keys", {}).items()]).strip() args = { "background_image": background_image, -- cgit v1.2.3 From 30f4f25b2ed77464104f31c35b40f5994ffce67b Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 27 Sep 2023 10:21:14 +0300 Subject: Bump to torchsde==0.2.6 --- requirements_versions.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_versions.txt b/requirements_versions.txt index f8ae1f38..7d27f2be 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -27,5 +27,5 @@ timm==0.9.2 tomesd==0.1.3 torch torchdiffeq==0.2.3 -torchsde==0.2.5 +torchsde==0.2.6 transformers==4.30.2 -- cgit v1.2.3 From ad3b8a1c41b8ab98b8b98c6364d13cb8b6d5fa88 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 30 Sep 2023 08:23:12 +0300 Subject: alternative solution to #13434 --- modules/restart.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/restart.py b/modules/restart.py index 18eacaf3..2dd6493b 100644 --- a/modules/restart.py +++ b/modules/restart.py @@ -14,7 +14,9 @@ def is_restartable() -> bool: def restart_program() -> None: """creates file tmp/restart and immediately stops the process, which webui.bat/webui.sh interpret as a command to start webui again""" - (Path(script_path) / "tmp" / "restart").touch() + tmpdir = Path(script_path) / "tmp" + tmpdir.mkdir(parents=True, exist_ok=True) + (tmpdir / "restart").touch() stop_program() -- cgit v1.2.3 From 87b50397a6da273fe0160016a209e4eb0cbf4a89 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 30 Sep 2023 09:11:31 +0300 Subject: add missing import, simplify code, use patches module for #13276 --- modules/sd_models.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index e3755253..5ef7aa13 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -14,7 +14,7 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config -from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack +from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches from modules.timer import Timer import tomesd import numpy as np @@ -130,6 +130,8 @@ except Exception: def setup_model(): + """called once at startup to do various one-time tasks related to SD models""" + os.makedirs(model_path, exist_ok=True) enable_midas_autodownload() @@ -458,14 +460,17 @@ def enable_midas_autodownload(): def patch_given_betas(): - original_register_schedule = ldm.models.diffusion.ddpm.DDPM.register_schedule + import ldm.models.diffusion.ddpm + def patched_register_schedule(*args, **kwargs): - if args[1] is not None and isinstance(args[1], ListConfig): - modified_args = list(args) # Convert args tuple to a list - modified_args[1] = np.array(args[1]) # Modify the desired element - args = tuple(modified_args) # Convert the list back to a tuple + """a modified version of register_schedule function that converts plain list from Omegaconf into numpy""" + + if isinstance(args[1], ListConfig): + args = (args[0], np.array(args[1]), *args[2:]) + original_register_schedule(*args, **kwargs) - ldm.models.diffusion.ddpm.DDPM.register_schedule = patched_register_schedule + + original_register_schedule = patches.patch(__name__, ldm.models.diffusion.ddpm.DDPM, 'register_schedule', patched_register_schedule) def repair_config(sd_config): -- cgit v1.2.3 From ab63054f95a7b7dfb2f0ffa88e805c8d2f950605 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 30 Sep 2023 09:34:50 +0300 Subject: write infotext to gif image as comment --- modules/images.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/images.py b/modules/images.py index 1e63118e..daf4eebe 100644 --- a/modules/images.py +++ b/modules/images.py @@ -561,6 +561,8 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p }) piexif.insert(exif_bytes, filename) + elif extension.lower() == ".gif": + image.save(filename, format=image_format, comment=geninfo) else: image.save(filename, format=image_format, quality=opts.jpeg_quality) -- cgit v1.2.3 From 1cc7c4bfb31b80b6667154145f1455541951db18 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 30 Sep 2023 01:09:09 -0600 Subject: Allow editing whitespace delimiters --- javascript/edit-attention.js | 3 ++- modules/shared_options.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 8906c892..bc4ebed4 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -46,7 +46,8 @@ function keyupEditAttention(event) { function selectCurrentWord() { if (selectionStart !== selectionEnd) return false; - const delimiters = opts.keyedit_delimiters + " \r\n\t"; + let delimiters = opts.keyedit_delimiters.replace(/(^|[^\\])(\\\\)*\\$/, "$&\\").replace(/(^|[^\\])((\\\\)*")/g, "$1\\$2"); + delimiters = JSON.parse(`"${delimiters}"`); // seek backward until to find beggining while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) { diff --git a/modules/shared_options.py b/modules/shared_options.py index 00b273fa..a1f157c6 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -255,7 +255,7 @@ options_templates.update(options_section(('ui', "User interface"), { "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), + "keyedit_delimiters": OptionInfo(r".,\\/!?%^*;:{}=`~() \r\n\t", "Ctrl+up/down word delimiters"), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), -- cgit v1.2.3 From 5cc7bf387661354e5c268c0e8198cc44328b3282 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 30 Sep 2023 10:10:57 +0300 Subject: reword sd_checkpoint_dropdown_use_short setting and add explanation --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 8e8d402d..61b93d47 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -262,7 +262,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), - "sd_checkpoint_dropdown_use_short": OptionInfo(False, "Use short name for Stable Diffusion checkpoint dropdown"), + "sd_checkpoint_dropdown_use_short": OptionInfo(False, "Checkpoint dropdown: use filenames without paths").info("models in subdirectories like photo/sd15.ckpt will be listed as just sd15.ckpt"), "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), -- cgit v1.2.3 From b2f9709538ee40c6bbf11e3f17f7e3ea4b9cb78a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 30 Sep 2023 10:29:10 +0300 Subject: get #13121 to work without restart --- modules/ui_extra_networks.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index d8c31142..59d6ecc6 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,3 +1,4 @@ +import functools import os.path import urllib.parse from pathlib import Path @@ -15,10 +16,16 @@ from modules.ui_components import ToolButton extra_pages = [] allowed_dirs = set() -allowed_preview_extensions = ["png", "jpg", "jpeg", "webp", "gif"] -if shared.opts.samples_format not in allowed_preview_extensions: - allowed_preview_extensions.append(shared.opts.samples_format) -allowed_preview_extensions_dot = ['.' + extension for extension in allowed_preview_extensions] +default_allowed_preview_extensions = ["png", "jpg", "jpeg", "webp", "gif"] + + +@functools.cache +def allowed_preview_extensions_with_extra(extra_extensions=None): + return set(default_allowed_preview_extensions) | set(extra_extensions or []) + + +def allowed_preview_extensions(): + return allowed_preview_extensions_with_extra((shared.opts.samples_format, )) def register_page(page): @@ -38,9 +45,9 @@ def fetch_file(filename: str = ""): if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs): raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") - ext = os.path.splitext(filename)[1].lower() - if ext not in allowed_preview_extensions_dot: - raise ValueError(f"File cannot be fetched: {filename}. Only png, jpg, webp, and gif.") + ext = os.path.splitext(filename)[1].lower()[1:] + if ext not in allowed_preview_extensions(): + raise ValueError(f"File cannot be fetched: {filename}. Extensions allowed: {allowed_preview_extensions()}.") # would profit from returning 304 return FileResponse(filename, headers={"Accept-Ranges": "bytes"}) @@ -278,7 +285,7 @@ class ExtraNetworksPage: Find a preview PNG for a given path (without extension) and call link_preview on it. """ - potential_files = sum([[path + "." + ext, path + ".preview." + ext] for ext in allowed_preview_extensions], []) + potential_files = sum([[path + "." + ext, path + ".preview." + ext] for ext in allowed_preview_extensions()], []) for file in potential_files: if os.path.isfile(file): -- cgit v1.2.3 From 0935d2c3047210b799cbc6f8ce15d3dffca95af7 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 30 Sep 2023 18:37:44 -0600 Subject: Use checkboxes for whitespace delimiters --- javascript/edit-attention.js | 8 ++++++-- modules/shared_options.py | 3 ++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index bc4ebed4..943d81b0 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -46,8 +46,12 @@ function keyupEditAttention(event) { function selectCurrentWord() { if (selectionStart !== selectionEnd) return false; - let delimiters = opts.keyedit_delimiters.replace(/(^|[^\\])(\\\\)*\\$/, "$&\\").replace(/(^|[^\\])((\\\\)*")/g, "$1\\$2"); - delimiters = JSON.parse(`"${delimiters}"`); + const whitespace_delimiters = {"Tab": "\t", "Carriage Return": "\r", "Line Feed": "\n"}; + let delimiters = opts.keyedit_delimiters; + + for (let i of opts.keyedit_delimiters_whitespace) { + delimiters += whitespace_delimiters[i]; + } // seek backward until to find beggining while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) { diff --git a/modules/shared_options.py b/modules/shared_options.py index a1f157c6..717c948b 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -255,7 +255,8 @@ options_templates.update(options_section(('ui', "User interface"), { "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(r".,\\/!?%^*;:{}=`~() \r\n\t", "Ctrl+up/down word delimiters"), + "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Ctrl+up/down word delimiters"), + "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), -- cgit v1.2.3 From 0eb5fde2fd42184f431ccba5f25d714272e3e6b0 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 30 Sep 2023 21:20:58 -0600 Subject: Remove unneeded code --- javascript/edit-attention.js | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 943d81b0..df218c1e 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -18,22 +18,11 @@ function keyupEditAttention(event) { const before = text.substring(0, selectionStart); let beforeParen = before.lastIndexOf(OPEN); if (beforeParen == -1) return false; - let beforeParenClose = before.lastIndexOf(CLOSE); - while (beforeParenClose !== -1 && beforeParenClose > beforeParen) { - beforeParen = before.lastIndexOf(OPEN, beforeParen - 1); - beforeParenClose = before.lastIndexOf(CLOSE, beforeParenClose - 1); - } // Find closing parenthesis around current cursor const after = text.substring(selectionStart); let afterParen = after.indexOf(CLOSE); if (afterParen == -1) return false; - let afterParenOpen = after.indexOf(OPEN); - while (afterParenOpen !== -1 && afterParen > afterParenOpen) { - afterParen = after.indexOf(CLOSE, afterParen + 1); - afterParenOpen = after.indexOf(OPEN, afterParenOpen + 1); - } - if (beforeParen === -1 || afterParen === -1) return false; // Set the selection to the text between the parenthesis const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); @@ -53,7 +42,7 @@ function keyupEditAttention(event) { delimiters += whitespace_delimiters[i]; } - // seek backward until to find beggining + // seek backward to find beginning while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) { selectionStart--; } -- cgit v1.2.3 From 56ef5e9d48750fd43f9faba31ff67e64368153b7 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 30 Sep 2023 21:44:05 -0600 Subject: Remove end parenthesis from weight --- javascript/edit-attention.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index df218c1e..794453bf 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -86,7 +86,7 @@ function keyupEditAttention(event) { } var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; - var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end)); + var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + end)); if (isNaN(weight)) return; weight += isPlus ? delta : -delta; -- cgit v1.2.3 From 2d947175b902d6838c803036d9757e7d3226b41d Mon Sep 17 00:00:00 2001 From: superhero-7 <537093830@qq.com> Date: Sun, 1 Oct 2023 12:25:19 +0800 Subject: fix linter issues --- modules/sd_hijack.py | 4 ++-- modules/sd_models_config.py | 3 +-- modules/xlmr_m18.py | 12 ++++++------ 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 4b36c0e9..0689699c 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -212,7 +212,7 @@ class StableDiffusionModelHijack: model_embeddings = m.cond_stage_model.roberta.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) m.cond_stage_model = sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords(m.cond_stage_model, self) - + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: model_embeddings = m.cond_stage_model.transformer.text_model.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) @@ -258,7 +258,7 @@ class StableDiffusionModelHijack: if hasattr(m, 'cond_stage_model'): delattr(m, 'cond_stage_model') - + elif type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 9ba89dfc..deab2f6e 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -95,8 +95,7 @@ def guess_model_config_from_state_dict(sd, filename): if diffusion_model_input.shape[1] == 8: return config_instruct_pix2pix - - # import pdb; pdb.set_trace() + if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None: if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024: return config_alt_diffusion_m18 diff --git a/modules/xlmr_m18.py b/modules/xlmr_m18.py index 18785692..a727e865 100644 --- a/modules/xlmr_m18.py +++ b/modules/xlmr_m18.py @@ -1,4 +1,4 @@ -from transformers import BertPreTrainedModel,BertModel,BertConfig +from transformers import BertPreTrainedModel,BertConfig import torch.nn as nn import torch from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig @@ -28,7 +28,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): config_class = BertSeriesConfig def __init__(self, config=None, **kargs): - # modify initialization for autoloading + # modify initialization for autoloading if config is None: config = XLMRobertaConfig() config.attention_probs_dropout_prob= 0.1 @@ -80,7 +80,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): text["attention_mask"] = torch.tensor( text['attention_mask']).to(device) features = self(**text) - return features['projection_state'] + return features['projection_state'] def forward( self, @@ -147,8 +147,8 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): "hidden_states": outputs.hidden_states, "attentions": outputs.attentions, } - - + + # return { # 'pooler_output':pooler_output, # 'last_hidden_state':outputs.last_hidden_state, @@ -161,4 +161,4 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation): base_model_prefix = 'roberta' - config_class= RobertaSeriesConfig \ No newline at end of file + config_class= RobertaSeriesConfig -- cgit v1.2.3 From c7e810a9856641fbaf520976fde24c5536a4fd56 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 1 Oct 2023 10:15:23 +0300 Subject: add onEdit function for js and rework token-counter.js to use it --- .eslintrc.js | 1 + javascript/token-counters.js | 26 +++++++++----------------- javascript/ui.js | 17 +++++++++++++++++ 3 files changed, 27 insertions(+), 17 deletions(-) diff --git a/.eslintrc.js b/.eslintrc.js index 4777c276..cf839769 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -74,6 +74,7 @@ module.exports = { create_submit_args: "readonly", restart_reload: "readonly", updateInput: "readonly", + onEdit: "readonly", //extraNetworks.js requestGet: "readonly", popup: "readonly", diff --git a/javascript/token-counters.js b/javascript/token-counters.js index 9d81a723..710345eb 100644 --- a/javascript/token-counters.js +++ b/javascript/token-counters.js @@ -1,10 +1,9 @@ -let promptTokenCountDebounceTime = 800; -let promptTokenCountTimeouts = {}; -var promptTokenCountUpdateFunctions = {}; +let promptTokenCountUpdateFunctions = {}; function update_txt2img_tokens(...args) { // Called from Gradio update_token_counter("txt2img_token_button"); + update_token_counter("txt2img_negative_token_button"); if (args.length == 2) { return args[0]; } @@ -14,6 +13,7 @@ function update_txt2img_tokens(...args) { function update_img2img_tokens(...args) { // Called from Gradio update_token_counter("img2img_token_button"); + update_token_counter("img2img_negative_token_button"); if (args.length == 2) { return args[0]; } @@ -21,16 +21,7 @@ function update_img2img_tokens(...args) { } function update_token_counter(button_id) { - if (opts.disable_token_counters) { - return; - } - if (promptTokenCountTimeouts[button_id]) { - clearTimeout(promptTokenCountTimeouts[button_id]); - } - promptTokenCountTimeouts[button_id] = setTimeout( - () => gradioApp().getElementById(button_id)?.click(), - promptTokenCountDebounceTime, - ); + promptTokenCountUpdateFunctions[button_id]?.(); } @@ -69,10 +60,11 @@ function setupTokenCounting(id, id_counter, id_button) { prompt.parentElement.insertBefore(counter, prompt); prompt.parentElement.style.position = "relative"; - promptTokenCountUpdateFunctions[id] = function() { - update_token_counter(id_button); - }; - textarea.addEventListener("input", promptTokenCountUpdateFunctions[id]); + func = onEdit(id, textarea, 800, function() { + gradioApp().getElementById(id_button)?.click(); + }); + promptTokenCountUpdateFunctions[id] = func; + promptTokenCountUpdateFunctions[id_button] = func; } function setupTokenCounters() { diff --git a/javascript/ui.js b/javascript/ui.js index bedcbf3e..aee0d1da 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -366,3 +366,20 @@ function switchWidthHeight(tabname) { updateInput(height); return []; } + + +var onEditTimers = {}; + +// calls func after afterMs milliseconds has passed since the input elem has beed enited by user +function onEdit(editId, elem, afterMs, func) { + var edited = function() { + var existingTimer = onEditTimers[editId]; + if (existingTimer) clearTimeout(existingTimer); + + onEditTimers[editId] = setTimeout(func, afterMs); + }; + + elem.addEventListener("input", edited); + + return edited; +} -- cgit v1.2.3 From deeec0b34359b0cc4c93ee01d3c7bca9c2f609b8 Mon Sep 17 00:00:00 2001 From: Won-Kyu Park Date: Sun, 1 Oct 2023 00:25:53 +0900 Subject: fix fieldname regex to accept additional [-/] chars --- modules/generation_parameters_copypaste.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index d39f2eba..f1b0225e 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -9,7 +9,7 @@ from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks, processing from PIL import Image -re_param_code = r'\s*([\w ]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' +re_param_code = r'\s*(\w[\w -/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) re_imagesize = re.compile(r"^(\d+)x(\d+)$") re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") -- cgit v1.2.3 From c0113872c5f814cf8cf96deca541bffaf1af2568 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 1 Oct 2023 11:48:41 +0300 Subject: add search field to settings --- javascript/settings.js | 46 ++++++++++++++++++++++++++++++++++++++++++++ javascript/token-counters.js | 2 +- javascript/ui.js | 15 --------------- modules/ui_settings.py | 16 +++++++++++++-- style.css | 2 ++ 5 files changed, 63 insertions(+), 18 deletions(-) create mode 100644 javascript/settings.js diff --git a/javascript/settings.js b/javascript/settings.js new file mode 100644 index 00000000..7889cf1c --- /dev/null +++ b/javascript/settings.js @@ -0,0 +1,46 @@ +let settingsExcludeTabsFromShowAll = { + settings_tab_defaults: 1, + settings_tab_sysinfo: 1, + settings_tab_actions: 1, + settings_tab_licenses: 1, +}; + +function settingsShowAllTabs() { + gradioApp().querySelectorAll('#settings > div').forEach(function(elem) { + if (settingsExcludeTabsFromShowAll[elem.id]) return; + + elem.style.display = "block"; + }); +} + +function settingsShowOneTab() { + gradioApp().querySelector('#settings_show_one_page').click(); +} + +onUiLoaded(function() { + var edit = gradioApp().querySelector('#settings_search'); + var editTextarea = gradioApp().querySelector('#settings_search > label > input'); + var buttonShowAllPages = gradioApp().getElementById('settings_show_all_pages'); + var settings_tabs = gradioApp().querySelector('#settings div'); + + onEdit('settingsSearch', editTextarea, 250, function() { + var searchText = (editTextarea.value || "").trim(); + + gradioApp().querySelectorAll('#settings > div[id^=settings_] div[id^=column_settings_] > *').forEach(function(elem) { + var visible = elem.textContent.trim().indexOf(searchText) != -1; + elem.style.display = visible ? "" : "none"; + }); + + if (searchText != "") { + settingsShowAllTabs(); + } else { + settingsShowOneTab(); + } + }); + + settings_tabs.insertBefore(edit, settings_tabs.firstChild); + settings_tabs.appendChild(buttonShowAllPages); + + + buttonShowAllPages.addEventListener("click", settingsShowAllTabs); +}); diff --git a/javascript/token-counters.js b/javascript/token-counters.js index 710345eb..2ecc7d91 100644 --- a/javascript/token-counters.js +++ b/javascript/token-counters.js @@ -60,7 +60,7 @@ function setupTokenCounting(id, id_counter, id_button) { prompt.parentElement.insertBefore(counter, prompt); prompt.parentElement.style.position = "relative"; - func = onEdit(id, textarea, 800, function() { + var func = onEdit(id, textarea, 800, function() { gradioApp().getElementById(id_button)?.click(); }); promptTokenCountUpdateFunctions[id] = func; diff --git a/javascript/ui.js b/javascript/ui.js index aee0d1da..2e262602 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -263,21 +263,6 @@ onAfterUiUpdate(function() { json_elem.parentElement.style.display = "none"; setupTokenCounters(); - - var show_all_pages = gradioApp().getElementById('settings_show_all_pages'); - var settings_tabs = gradioApp().querySelector('#settings div'); - if (show_all_pages && settings_tabs) { - settings_tabs.appendChild(show_all_pages); - show_all_pages.onclick = function() { - gradioApp().querySelectorAll('#settings > div').forEach(function(elem) { - if (elem.id == "settings_tab_licenses") { - return; - } - - elem.style.display = "block"; - }); - }; - } }); onOptionsChanged(function() { diff --git a/modules/ui_settings.py b/modules/ui_settings.py index c6fe3604..74a3aef3 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -64,6 +64,9 @@ class UiSettings: quicksettings_list = None quicksettings_names = None text_settings = None + show_all_pages = None + show_one_page = None + search_input = None def run_settings(self, *args): changed = [] @@ -136,7 +139,7 @@ class UiSettings: gr.Group() current_tab = gr.TabItem(elem_id=f"settings_{elem_id}", label=text) current_tab.__enter__() - current_row = gr.Column(variant='compact') + current_row = gr.Column(elem_id=f"column_settings_{elem_id}", variant='compact') current_row.__enter__() previous_section = item.section @@ -183,7 +186,11 @@ class UiSettings: with gr.TabItem("Licenses", id="licenses", elem_id="settings_tab_licenses"): gr.HTML(shared.html("licenses.html"), elem_id="licenses") - gr.Button(value="Show all pages", elem_id="settings_show_all_pages") + self.show_all_pages = gr.Button(value="Show all pages", elem_id="settings_show_all_pages") + self.show_one_page = gr.Button(value="Show only one page", elem_id="settings_show_one_page", visible=False) + self.show_one_page.click(lambda: None) + + self.search_input = gr.Textbox(value="", elem_id="settings_search", max_lines=1, placeholder="Search...", show_label=False) self.text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False) @@ -313,3 +320,8 @@ class UiSettings: outputs=[self.component_dict[k] for k in component_keys], queue=False, ) + + def search(self, text): + print(text) + + return [gr.update(visible=text in (comp.label or "")) for comp in self.components] diff --git a/style.css b/style.css index 58eb29c1..eee50552 100644 --- a/style.css +++ b/style.css @@ -423,6 +423,7 @@ div#extras_scale_to_tab div.form{ #settings > div{ border: none; margin-left: 10em; + padding: 0 var(--spacing-xl); } #settings > div.tab-nav{ @@ -437,6 +438,7 @@ div#extras_scale_to_tab div.form{ border: none; text-align: left; white-space: initial; + padding: 4px; } #settings_result{ -- cgit v1.2.3 From f71e919ecb001c4d191b76a87477d6118de7be12 Mon Sep 17 00:00:00 2001 From: FluttyProger Date: Sun, 1 Oct 2023 18:06:48 +0300 Subject: Ability for extensions to return custom data via api in response.images --- modules/api/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 905ef9c9..efedafa4 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -103,7 +103,8 @@ def decode_base64_to_image(encoding): def encode_pil_to_base64(image): with io.BytesIO() as output_bytes: - + if isinstance(image, str): + return image if opts.samples_format.lower() == 'png': use_metadata = False metadata = PngImagePlugin.PngInfo() -- cgit v1.2.3 From 3f763d41e8ff7f09f89adb00eec440f18566d260 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sun, 1 Oct 2023 22:38:27 -0600 Subject: Change denoising_strength default to None. --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index e124e7f0..061d9955 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -142,7 +142,7 @@ class StableDiffusionProcessing: overlay_images: list = None eta: float = None do_not_reload_embeddings: bool = False - denoising_strength: float = 0 + denoising_strength: float = None ddim_discretize: str = None s_min_uncond: float = None s_churn: float = None -- cgit v1.2.3 From 6ab0b65ed15730f590a3e530f00a2047f582b488 Mon Sep 17 00:00:00 2001 From: PermissionDenied7335 Date: Mon, 2 Oct 2023 15:43:59 +0800 Subject: Added an option not to enable venv --- webui.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/webui.sh b/webui.sh index 3d0f87ee..f3621903 100755 --- a/webui.sh +++ b/webui.sh @@ -4,12 +4,6 @@ # change the variables in webui-user.sh instead # ################################################# - -use_venv=1 -if [[ $venv_dir == "-" ]]; then - use_venv=0 -fi - SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) @@ -28,6 +22,12 @@ then source "$SCRIPT_DIR"/webui-user.sh fi +# If $venv_dir is "-", then disable venv support +use_venv=1 +if [[ $venv_dir == "-" ]]; then + use_venv=0 +fi + # Set defaults # Install directory without trailing slash if [[ -z "${install_dir}" ]] -- cgit v1.2.3 From c2279da52260410ac2b4f64b7de7b2866f3a07a1 Mon Sep 17 00:00:00 2001 From: Won-Kyu Park Date: Tue, 3 Oct 2023 01:16:41 +0900 Subject: fix re_param_code (regression bug PR #13458) --- modules/generation_parameters_copypaste.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index f1b0225e..0a606515 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -9,7 +9,7 @@ from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks, processing from PIL import Image -re_param_code = r'\s*(\w[\w -/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' +re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) re_imagesize = re.compile(r"^(\d+)x(\d+)$") re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") -- cgit v1.2.3 From 86a46e81892c72cc50f9a37dfb1ca285f189134d Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Mon, 2 Oct 2023 22:22:15 -0600 Subject: Fix accidentally closing popup dialogs --- javascript/extraNetworks.js | 11 ++++------- style.css | 14 ++++++++++---- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 493f31af..b1b85669 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -216,27 +216,24 @@ function extraNetworksSearchButton(tabs_id, event) { var globalPopup = null; var globalPopupInner = null; + function closePopup() { if (!globalPopup) return; - globalPopup.style.display = "none"; } + function popup(contents) { if (!globalPopup) { globalPopup = document.createElement('div'); - globalPopup.onclick = closePopup; globalPopup.classList.add('global-popup'); - + var close = document.createElement('div'); close.classList.add('global-popup-close'); - close.onclick = closePopup; + close.addEventListener("click", closePopup); close.title = "Close"; globalPopup.appendChild(close); globalPopupInner = document.createElement('div'); - globalPopupInner.onclick = function(event) { - event.stopPropagation(); return false; - }; globalPopupInner.classList.add('global-popup-inner'); globalPopup.appendChild(globalPopupInner); diff --git a/style.css b/style.css index fb4e2f1f..e034ecfd 100644 --- a/style.css +++ b/style.css @@ -581,7 +581,6 @@ table.popup-table .link{ width: 100%; height: 100%; overflow: auto; - background-color: rgba(20, 20, 20, 0.95); } .global-popup *{ @@ -590,9 +589,6 @@ table.popup-table .link{ .global-popup-close:before { content: "×"; -} - -.global-popup-close{ position: fixed; right: 0.25em; top: 0; @@ -601,10 +597,20 @@ table.popup-table .link{ font-size: 32pt; } +.global-popup-close{ + position: fixed; + left: 0; + top: 0; + width: 100%; + height: 100%; + background-color: rgba(20, 20, 20, 0.95); +} + .global-popup-inner{ display: inline-block; margin: auto; padding: 2em; + z-index: 1001; } /* fullpage image viewer */ -- cgit v1.2.3 From e5381320b99f657aadad0bf8f414108a96567b3c Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Mon, 2 Oct 2023 22:33:03 -0600 Subject: Lint --- javascript/extraNetworks.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index b1b85669..439e76a3 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -226,7 +226,7 @@ function popup(contents) { if (!globalPopup) { globalPopup = document.createElement('div'); globalPopup.classList.add('global-popup'); - + var close = document.createElement('div'); close.classList.add('global-popup-close'); close.addEventListener("click", closePopup); -- cgit v1.2.3 From 7d60076b8b275771a1aa98f017aff845ef68d964 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 3 Oct 2023 16:22:32 +0300 Subject: case-insensitive search for settings --- javascript/settings.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/settings.js b/javascript/settings.js index 7889cf1c..4e79ec00 100644 --- a/javascript/settings.js +++ b/javascript/settings.js @@ -24,10 +24,10 @@ onUiLoaded(function() { var settings_tabs = gradioApp().querySelector('#settings div'); onEdit('settingsSearch', editTextarea, 250, function() { - var searchText = (editTextarea.value || "").trim(); + var searchText = (editTextarea.value || "").trim().toLowerCase(); gradioApp().querySelectorAll('#settings > div[id^=settings_] div[id^=column_settings_] > *').forEach(function(elem) { - var visible = elem.textContent.trim().indexOf(searchText) != -1; + var visible = elem.textContent.trim().toLowerCase().indexOf(searchText) != -1; elem.style.display = visible ? "" : "none"; }); -- cgit v1.2.3 From 35fd24e857e625c090e9af3fdcef145b88bef436 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 3 Oct 2023 23:05:48 +0900 Subject: Less placeholder bug_report template --- .github/ISSUE_TEMPLATE/bug_report.yml | 44 +++++++---------------------------- 1 file changed, 8 insertions(+), 36 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index a423f052..5876e941 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -39,10 +39,7 @@ body: label: What happened? description: Tell us what happened in a very clear and simple way placeholder: | - I tried to use txt2img with the XYZ grid script, with DPM++ SDE, DPM++ 2M SDE samplers. - It should generate a grid of 2 images but I only got 1. - - (add screenshot or screen recording if necessary) + txt2img is not working as intended. validations: required: true - type: textarea @@ -51,9 +48,9 @@ body: label: Steps to reproduce the problem description: Please provide us with precise step by step instructions on how to reproduce the bug placeholder: | - 1. Go to txt2img tab, select XYZ grid script - 2. Set axis type to `Sampler`, and select DPM++ 2M SDE, DPM++ 3M SDE - 3. Set `Sampling steps` to 1, click generate button + 1. Go to ... + 2. Press ... + 3. ... validations: required: true - type: textarea @@ -62,8 +59,7 @@ body: label: What should have happened? description: Tell us what you think the normal behavior should be placeholder: | - It should generate a grid of 2 images. - This was working in webui version 1.x.x + WebUI should ... validations: required: true - type: dropdown @@ -97,37 +93,13 @@ body: label: Console logs description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service. render: Shell - placeholder: | - generating image for xyz plot: UnboundLocalError - Traceback (most recent call last): - File "B:\GitHub\stable-diffusion-webui\scripts\xyz_grid.py", line 698, in cell - res = process_images(pc) - File "B:\GitHub\stable-diffusion-webui\modules\processing.py", line 732, in process_images - res = process_images_inner(p) - File "B:\GitHub\stable-diffusion-webui\modules\processing.py", line 867, in process_images_inner - samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) - File "B:\GitHub\stable-diffusion-webui\modules\processing.py", line 1140, in sample - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) - File "B:\GitHub\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in sample - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - File "B:\GitHub\stable-diffusion-webui\modules\sd_samplers_common.py", line 261, in launch_sampling - return func() - File "B:\GitHub\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - File "B:\GitHub\stable-diffusion-webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context - return func(*args, **kwargs) - File "B:\GitHub\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 651, in sample_dpmpp_2m_sde - h_last = h - UnboundLocalError: local variable 'h' referenced before assignment validations: required: true - type: textarea id: misc attributes: label: Additional information - description: Please provide us with any relevant additional info or context. - placeholder: | + description: | + Please provide us with any relevant additional info or context. Examples: - I have updated the GPU driver recently. - I suspect the issue is caused by XXXXX. - I am using a VPN. +  I have updated my GPU driver recently. -- cgit v1.2.3 From e34949be52a89af21b2bcb0c18ca8d834e9cb562 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 6 Oct 2023 22:49:33 -0600 Subject: Edit-attention fixes --- javascript/edit-attention.js | 56 +++++++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 794453bf..b7f3215d 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -26,6 +26,7 @@ function keyupEditAttention(event) { // Set the selection to the text between the parenthesis const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); + if (!/.*:[\d.]+/.test(parenContent)) return false; const lastColon = parenContent.lastIndexOf(":"); selectionStart = beforeParen + 1; selectionEnd = selectionStart + lastColon; @@ -66,40 +67,53 @@ function keyupEditAttention(event) { var closeCharacter = ')'; var delta = opts.keyedit_precision_attention; - if (selectionStart > 0 && text[selectionStart - 1] == '<') { + if (selectionStart > 0 && /<.*:[\d.]+>/.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(">") + 1))) { closeCharacter = '>'; delta = opts.keyedit_precision_extra; - } else if (selectionStart == 0 || text[selectionStart - 1] != "(") { - + } else if (selectionStart > 0 && /\(.*\)|\[.*\]/.test(text.slice(selectionStart - 1, selectionEnd + 1))) { + closeCharacter = null; + if (isPlus) { + text = text.slice(0, selectionStart) + text[selectionStart - 1] + text.slice(selectionStart, selectionEnd) + text[selectionEnd] + text.slice(selectionEnd); + selectionStart++; + selectionEnd++; + } else { + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 1); + selectionStart--; + selectionEnd--; + } + } else if (selectionStart == 0 || !/\(.*:[\d.]+\)/.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(")") + 1))) { // do not include spaces at the end while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') { - selectionEnd -= 1; + selectionEnd--; } + if (selectionStart == selectionEnd) { return; } text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd); - selectionStart += 1; - selectionEnd += 1; + selectionStart++; + selectionEnd++; } - var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; - var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + end)); - if (isNaN(weight)) return; - - weight += isPlus ? delta : -delta; - weight = parseFloat(weight.toPrecision(12)); - if (String(weight).length == 1) weight += ".0"; - - if (closeCharacter == ')' && weight == 1) { - var endParenPos = text.substring(selectionEnd).indexOf(')'); - text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); - selectionStart--; - selectionEnd--; - } else { - text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end); + if (closeCharacter) { + var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; + var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + end)); + if (isNaN(weight)) return; + + weight += isPlus ? delta : -delta; + weight = parseFloat(weight.toPrecision(12)); + if (String(weight).length == 1) weight += ".0"; + + if (closeCharacter == ')' && weight == 1) { + var endParenPos = text.substring(selectionEnd).indexOf(')'); + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); + selectionStart--; + selectionEnd--; + } else { + text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end); + } } target.focus(); -- cgit v1.2.3 From 76010a51ef1f3805a7487723599035bc2356c3fb Mon Sep 17 00:00:00 2001 From: wangqiuwen Date: Sat, 7 Oct 2023 15:36:01 +0800 Subject: up --- modules/sd_models.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index eedb38c6..3a060ab6 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -1,4 +1,5 @@ import collections +import copy import os.path import sys import gc @@ -309,8 +310,6 @@ def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer): if checkpoint_info in checkpoints_loaded: # use checkpoint cache print(f"Loading weights [{sd_model_hash}] from cache") - # move to end as latest - checkpoints_loaded.move_to_end(checkpoint_info) return checkpoints_loaded[checkpoint_info] print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}") @@ -352,12 +351,12 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer if model.is_sdxl: sd_models_xl.extend_sdxl(model) - model.load_state_dict(state_dict, strict=False) - timer.record("apply weights to model") - if shared.opts.sd_checkpoint_cache > 0: # cache newly loaded model - checkpoints_loaded[checkpoint_info] = state_dict + checkpoints_loaded[checkpoint_info] = copy.deepcopy(state_dict) + + model.load_state_dict(state_dict, strict=False) + timer.record("apply weights to model") del state_dict -- cgit v1.2.3 From 770ee23f18d12fb3b5627c636aa420f481e292ee Mon Sep 17 00:00:00 2001 From: wangqiuwen Date: Sat, 7 Oct 2023 15:38:50 +0800 Subject: reverst --- modules/sd_models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index 3a060ab6..8d63e7f1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -310,6 +310,8 @@ def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer): if checkpoint_info in checkpoints_loaded: # use checkpoint cache print(f"Loading weights [{sd_model_hash}] from cache") + # move to end as latest + checkpoints_loaded.move_to_end(checkpoint_info) return checkpoints_loaded[checkpoint_info] print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}") -- cgit v1.2.3 From 09a2da835ef9c3dd12f3d842fbd5dd9e19ded859 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 7 Oct 2023 14:48:43 -0600 Subject: Add brackets, vertical bar to default delimiters --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index ab9b0072..48243234 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -256,7 +256,7 @@ options_templates.update(options_section(('ui', "User interface"), { "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Ctrl+up/down word delimiters"), + "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~()[]<>| ", "Ctrl+up/down word delimiters"), "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), -- cgit v1.2.3 From fd51b8501e1d9f9380c948cbf665c7708baef5d6 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 7 Oct 2023 15:28:25 -0600 Subject: Fix multi-line selections --- javascript/edit-attention.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index b7f3215d..d7b6001b 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -26,7 +26,7 @@ function keyupEditAttention(event) { // Set the selection to the text between the parenthesis const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); - if (!/.*:[\d.]+/.test(parenContent)) return false; + if (!/.*:[\d.]+/s.test(parenContent)) return false; const lastColon = parenContent.lastIndexOf(":"); selectionStart = beforeParen + 1; selectionEnd = selectionStart + lastColon; @@ -67,10 +67,10 @@ function keyupEditAttention(event) { var closeCharacter = ')'; var delta = opts.keyedit_precision_attention; - if (selectionStart > 0 && /<.*:[\d.]+>/.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(">") + 1))) { + if (selectionStart > 0 && /<.*:[\d.]+>/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(">") + 1))) { closeCharacter = '>'; delta = opts.keyedit_precision_extra; - } else if (selectionStart > 0 && /\(.*\)|\[.*\]/.test(text.slice(selectionStart - 1, selectionEnd + 1))) { + } else if (selectionStart > 0 && /\(.*\)|\[.*\]/s.test(text.slice(selectionStart - 1, selectionEnd + 1))) { closeCharacter = null; if (isPlus) { text = text.slice(0, selectionStart) + text[selectionStart - 1] + text.slice(selectionStart, selectionEnd) + text[selectionEnd] + text.slice(selectionEnd); @@ -81,7 +81,7 @@ function keyupEditAttention(event) { selectionStart--; selectionEnd--; } - } else if (selectionStart == 0 || !/\(.*:[\d.]+\)/.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(")") + 1))) { + } else if (selectionStart == 0 || !/\(.*:[\d.]+\)/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(")") + 1))) { // do not include spaces at the end while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') { selectionEnd--; -- cgit v1.2.3 From 3562b0dc7427e92828001cd713ff47a83255ccc8 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 7 Oct 2023 15:52:16 -0600 Subject: Fix negative values --- javascript/edit-attention.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index d7b6001b..89b37aaf 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -26,7 +26,7 @@ function keyupEditAttention(event) { // Set the selection to the text between the parenthesis const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); - if (!/.*:[\d.]+/s.test(parenContent)) return false; + if (!/.*:-?[\d.]+/s.test(parenContent)) return false; const lastColon = parenContent.lastIndexOf(":"); selectionStart = beforeParen + 1; selectionEnd = selectionStart + lastColon; @@ -67,7 +67,7 @@ function keyupEditAttention(event) { var closeCharacter = ')'; var delta = opts.keyedit_precision_attention; - if (selectionStart > 0 && /<.*:[\d.]+>/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(">") + 1))) { + if (selectionStart > 0 && /<.*:-?[\d.]+>/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(">") + 1))) { closeCharacter = '>'; delta = opts.keyedit_precision_extra; } else if (selectionStart > 0 && /\(.*\)|\[.*\]/s.test(text.slice(selectionStart - 1, selectionEnd + 1))) { @@ -81,7 +81,7 @@ function keyupEditAttention(event) { selectionStart--; selectionEnd--; } - } else if (selectionStart == 0 || !/\(.*:[\d.]+\)/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(")") + 1))) { + } else if (selectionStart == 0 || !/\(.*:-?[\d.]+\)/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(")") + 1))) { // do not include spaces at the end while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') { selectionEnd--; -- cgit v1.2.3 From 9821625a76177ebc8b62a1ee6d8ef39cf4805f99 Mon Sep 17 00:00:00 2001 From: Leon Date: Mon, 9 Oct 2023 18:36:48 +0800 Subject: fix the key error exception when adding an overwriting key which is defined in the extensions --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 36bc94f7..fee2440f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -711,7 +711,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.before_process(p) - stored_opts = {k: opts.data[k] for k in p.override_settings.keys() if k in opts.data} + stored_opts = {k: opts.data[k] if k in opts.data else opts.get_default(k) for k in p.override_settings.keys() if k in opts.data} try: # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint -- cgit v1.2.3 From 2aa485b5afb13fd6aab79777e4dfc488591b2f1c Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 9 Oct 2023 22:52:09 +0800 Subject: add lora bundle system --- extensions-builtin/Lora/network.py | 1 + extensions-builtin/Lora/networks.py | 48 +++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index d8e8dfb7..6021fd8d 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -93,6 +93,7 @@ class Network: # LoraModule self.unet_multiplier = 1.0 self.dyn_dim = None self.modules = {} + self.bundle_embeddings = {} self.mtime = None self.mentioned_name = None diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 315682b3..652b8ebe 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -15,6 +15,7 @@ import torch from typing import Union from modules import shared, devices, sd_models, errors, scripts, sd_hijack +from modules.textual_inversion.textual_inversion import Embedding module_types = [ network_lora.ModuleTypeLora(), @@ -149,9 +150,15 @@ def load_network(name, network_on_disk): is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping matched_networks = {} + bundle_embeddings = {} for key_network, weight in sd.items(): key_network_without_network_parts, network_part = key_network.split(".", 1) + if key_network_without_network_parts == "bundle_emb": + emb_name, vec_name = network_part.split(".", 1) + emb_dict = bundle_embeddings.get(emb_name, {}) + emb_dict[vec_name] = weight + bundle_embeddings[emb_name] = emb_dict key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2) sd_module = shared.sd_model.network_layer_mapping.get(key, None) @@ -195,6 +202,8 @@ def load_network(name, network_on_disk): net.modules[key] = net_module + net.bundle_embeddings = bundle_embeddings + if keys_failed_to_match: logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}") @@ -210,11 +219,14 @@ def purge_networks_from_memory(): def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): + emb_db = sd_hijack.model_hijack.embedding_db already_loaded = {} for net in loaded_networks: if net.name in names: already_loaded[net.name] = net + for emb_name in net.bundle_embeddings: + emb_db.register_embedding_by_name(None, shared.sd_model, emb_name) loaded_networks.clear() @@ -257,6 +269,41 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0 loaded_networks.append(net) + for emb_name, data in net.bundle_embeddings.items(): + # textual inversion embeddings + if 'string_to_param' in data: + param_dict = data['string_to_param'] + param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 + assert len(param_dict) == 1, 'embedding file has multiple terms in it' + emb = next(iter(param_dict.items()))[1] + vec = emb.detach().to(devices.device, dtype=torch.float32) + shape = vec.shape[-1] + vectors = vec.shape[0] + elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding + vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()} + shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1] + vectors = data['clip_g'].shape[0] + elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts + assert len(data.keys()) == 1, 'embedding file has multiple terms in it' + + emb = next(iter(data.values())) + if len(emb.shape) == 1: + emb = emb.unsqueeze(0) + vec = emb.detach().to(devices.device, dtype=torch.float32) + shape = vec.shape[-1] + vectors = vec.shape[0] + else: + raise Exception(f"Couldn't identify {emb_name} in lora: {name} as neither textual inversion embedding nor diffuser concept.") + + embedding = Embedding(vec, emb_name) + embedding.vectors = vectors + embedding.shape = shape + + if emb_db.expected_shape == -1 or emb_db.expected_shape == embedding.shape: + emb_db.register_embedding(embedding, shared.sd_model) + else: + emb_db.skipped_embeddings[name] = embedding + if failed_to_load_networks: sd_hijack.model_hijack.comments.append("Networks not found: " + ", ".join(failed_to_load_networks)) @@ -565,6 +612,7 @@ extra_network_lora = None available_networks = {} available_network_aliases = {} loaded_networks = [] +loaded_bundle_embeddings = {} networks_in_memory = {} available_network_hash_lookup = {} forbidden_network_aliases = {} -- cgit v1.2.3 From 3d8b1af6beb9015f6b3573661d8ed00275f6129f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 10 Oct 2023 12:09:33 +0800 Subject: Support string_to_param nested dict format: bundle_emb.EMBNAME.string_to_param.KEYNAME --- extensions-builtin/Lora/networks.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 652b8ebe..ab3517d8 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -157,7 +157,11 @@ def load_network(name, network_on_disk): if key_network_without_network_parts == "bundle_emb": emb_name, vec_name = network_part.split(".", 1) emb_dict = bundle_embeddings.get(emb_name, {}) - emb_dict[vec_name] = weight + if vec_name.split('.')[0] == 'string_to_param': + _, k2 = vec_name.split('.', 1) + emb_dict['string_to_param'] = {k2: weight} + else: + emb_dict[vec_name] = weight bundle_embeddings[emb_name] = emb_dict key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2) @@ -301,6 +305,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if emb_db.expected_shape == -1 or emb_db.expected_shape == embedding.shape: emb_db.register_embedding(embedding, shared.sd_model) + print(f'registered bundle embedding: {embedding.name}') else: emb_db.skipped_embeddings[name] = embedding -- cgit v1.2.3 From 2282eb8dd5905e8ed71231a0b8fc77599d10c12f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 10 Oct 2023 12:11:00 +0800 Subject: Remove dev debug print --- extensions-builtin/Lora/networks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index ab3517d8..465e24c8 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -305,7 +305,6 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if emb_db.expected_shape == -1 or emb_db.expected_shape == embedding.shape: emb_db.register_embedding(embedding, shared.sd_model) - print(f'registered bundle embedding: {embedding.name}') else: emb_db.skipped_embeddings[name] = embedding -- cgit v1.2.3 From 81e94de3185d42dba4e7bb72cf836f683f28b03f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:44:20 +0800 Subject: Add warning when meet emb name conflicting Choose standalone embedding (in /embeddings folder) first --- extensions-builtin/Lora/lora_logger.py | 33 ++++++++++++++ extensions-builtin/Lora/networks.py | 80 ++++++++++++++++++++-------------- 2 files changed, 81 insertions(+), 32 deletions(-) create mode 100644 extensions-builtin/Lora/lora_logger.py diff --git a/extensions-builtin/Lora/lora_logger.py b/extensions-builtin/Lora/lora_logger.py new file mode 100644 index 00000000..d50e90f0 --- /dev/null +++ b/extensions-builtin/Lora/lora_logger.py @@ -0,0 +1,33 @@ +import sys +import copy +import logging + + +class ColoredFormatter(logging.Formatter): + COLORS = { + "DEBUG": "\033[0;36m", # CYAN + "INFO": "\033[0;32m", # GREEN + "WARNING": "\033[0;33m", # YELLOW + "ERROR": "\033[0;31m", # RED + "CRITICAL": "\033[0;37;41m", # WHITE ON RED + "RESET": "\033[0m", # RESET COLOR + } + + def format(self, record): + colored_record = copy.copy(record) + levelname = colored_record.levelname + seq = self.COLORS.get(levelname, self.COLORS["RESET"]) + colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}" + return super().format(colored_record) + + +logger = logging.getLogger("lora") +logger.propagate = False + + +if not logger.handlers: + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter( + ColoredFormatter("[%(name)s]-%(levelname)s: %(message)s") + ) + logger.addHandler(handler) \ No newline at end of file diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 465e24c8..12f70576 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -17,6 +17,8 @@ from typing import Union from modules import shared, devices, sd_models, errors, scripts, sd_hijack from modules.textual_inversion.textual_inversion import Embedding +from lora_logger import logger + module_types = [ network_lora.ModuleTypeLora(), network_hada.ModuleTypeHada(), @@ -206,7 +208,40 @@ def load_network(name, network_on_disk): net.modules[key] = net_module - net.bundle_embeddings = bundle_embeddings + embeddings = {} + for emb_name, data in bundle_embeddings.items(): + # textual inversion embeddings + if 'string_to_param' in data: + param_dict = data['string_to_param'] + param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 + assert len(param_dict) == 1, 'embedding file has multiple terms in it' + emb = next(iter(param_dict.items()))[1] + vec = emb.detach().to(devices.device, dtype=torch.float32) + shape = vec.shape[-1] + vectors = vec.shape[0] + elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding + vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()} + shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1] + vectors = data['clip_g'].shape[0] + elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts + assert len(data.keys()) == 1, 'embedding file has multiple terms in it' + + emb = next(iter(data.values())) + if len(emb.shape) == 1: + emb = emb.unsqueeze(0) + vec = emb.detach().to(devices.device, dtype=torch.float32) + shape = vec.shape[-1] + vectors = vec.shape[0] + else: + raise Exception(f"Couldn't identify {emb_name} in lora: {name} as neither textual inversion embedding nor diffuser concept.") + + embedding = Embedding(vec, emb_name) + embedding.vectors = vectors + embedding.shape = shape + embedding.loaded = None + embeddings[emb_name] = embedding + + net.bundle_embeddings = embeddings if keys_failed_to_match: logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}") @@ -229,8 +264,9 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No for net in loaded_networks: if net.name in names: already_loaded[net.name] = net - for emb_name in net.bundle_embeddings: - emb_db.register_embedding_by_name(None, shared.sd_model, emb_name) + for emb_name, embedding in net.bundle_embeddings.items(): + if embedding.loaded: + emb_db.register_embedding_by_name(None, shared.sd_model, emb_name) loaded_networks.clear() @@ -273,37 +309,17 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0 loaded_networks.append(net) - for emb_name, data in net.bundle_embeddings.items(): - # textual inversion embeddings - if 'string_to_param' in data: - param_dict = data['string_to_param'] - param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 - assert len(param_dict) == 1, 'embedding file has multiple terms in it' - emb = next(iter(param_dict.items()))[1] - vec = emb.detach().to(devices.device, dtype=torch.float32) - shape = vec.shape[-1] - vectors = vec.shape[0] - elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding - vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()} - shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1] - vectors = data['clip_g'].shape[0] - elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts - assert len(data.keys()) == 1, 'embedding file has multiple terms in it' - - emb = next(iter(data.values())) - if len(emb.shape) == 1: - emb = emb.unsqueeze(0) - vec = emb.detach().to(devices.device, dtype=torch.float32) - shape = vec.shape[-1] - vectors = vec.shape[0] - else: - raise Exception(f"Couldn't identify {emb_name} in lora: {name} as neither textual inversion embedding nor diffuser concept.") - - embedding = Embedding(vec, emb_name) - embedding.vectors = vectors - embedding.shape = shape + for emb_name, embedding in net.bundle_embeddings.items(): + if embedding.loaded is None and emb_name in emb_db.word_embeddings: + logger.warning( + f'Skip bundle embedding: "{emb_name}"' + ' as it was already loaded from embeddings folder' + ) + continue + embedding.loaded = False if emb_db.expected_shape == -1 or emb_db.expected_shape == embedding.shape: + embedding.loaded = True emb_db.register_embedding(embedding, shared.sd_model) else: emb_db.skipped_embeddings[name] = embedding -- cgit v1.2.3 From 891ccb767c3815db48a124677d1cd0f204018ad4 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 10 Oct 2023 15:07:25 +0800 Subject: Fix lint --- extensions-builtin/Lora/lora_logger.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/lora_logger.py b/extensions-builtin/Lora/lora_logger.py index d50e90f0..d51de297 100644 --- a/extensions-builtin/Lora/lora_logger.py +++ b/extensions-builtin/Lora/lora_logger.py @@ -30,4 +30,4 @@ if not logger.handlers: handler.setFormatter( ColoredFormatter("[%(name)s]-%(levelname)s: %(message)s") ) - logger.addHandler(handler) \ No newline at end of file + logger.addHandler(handler) -- cgit v1.2.3 From dbb10fbd8c2dd4f3ca83a1d2e15e188799074ce4 Mon Sep 17 00:00:00 2001 From: Won-Kyu Park Date: Sun, 1 Oct 2023 20:18:25 +0900 Subject: show the preview image in the modalview if available --- javascript/imageviewer.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index c21d396e..e4dae91b 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -33,8 +33,11 @@ function updateOnBackgroundChange() { const modalImage = gradioApp().getElementById("modalImage"); if (modalImage && modalImage.offsetParent) { let currentButton = selected_gallery_button(); - - if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) { + let preview = gradioApp().querySelectorAll('.livePreview > img'); + if (preview.length > 0) { + // show preview image if available + modalImage.src = preview[preview.length - 1].src; + } else if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) { modalImage.src = currentButton.children[0].src; if (modalImage.style.display === 'none') { const modal = gradioApp().getElementById("lightboxModal"); -- cgit v1.2.3 From 906d1179e9a333eeb0f12a95b592dd5b44eb0aaa Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 11 Oct 2023 21:26:58 -0700 Subject: support inference with LyCORIS GLora networks --- extensions-builtin/Lora/network_glora.py | 33 ++++++++++++++++++++++++++++++++ extensions-builtin/Lora/networks.py | 2 ++ 2 files changed, 35 insertions(+) create mode 100644 extensions-builtin/Lora/network_glora.py diff --git a/extensions-builtin/Lora/network_glora.py b/extensions-builtin/Lora/network_glora.py new file mode 100644 index 00000000..492d4870 --- /dev/null +++ b/extensions-builtin/Lora/network_glora.py @@ -0,0 +1,33 @@ + +import network + +class ModuleTypeGLora(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["a1.weight", "a2.weight", "alpha", "b1.weight", "b2.weight"]): + return NetworkModuleGLora(net, weights) + + return None + +# adapted from https://github.com/KohakuBlueleaf/LyCORIS +class NetworkModuleGLora(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + + if hasattr(self.sd_module, 'weight'): + self.shape = self.sd_module.weight.shape + + self.w1a = weights.w["a1.weight"] + self.w1b = weights.w["b1.weight"] + self.w2a = weights.w["a2.weight"] + self.w2b = weights.w["b2.weight"] + + def calc_updown(self, orig_weight): + w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) + w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) + w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) + w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) + + output_shape = [w1a.size(0), w1b.size(1)] + updown = ((w2b @ w1b) + ((orig_weight @ w2a) @ w1a)) + + return self.finalize_updown(updown, orig_weight, output_shape) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 315682b3..ddab3c55 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -5,6 +5,7 @@ import re import lora_patches import network import network_lora +import network_glora import network_hada import network_ia3 import network_lokr @@ -23,6 +24,7 @@ module_types = [ network_lokr.ModuleTypeLokr(), network_full.ModuleTypeFull(), network_norm.ModuleTypeNorm(), + network_glora.ModuleTypeGLora(), ] -- cgit v1.2.3 From fbc8d213546047d8970b92809e15b33e8a1301be Mon Sep 17 00:00:00 2001 From: Won-Kyu Park Date: Sat, 14 Oct 2023 02:39:04 +0900 Subject: fix IndexError: list index out of range error interrupted while postprocess --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/processing.py b/modules/processing.py index 36bc94f7..df037fb0 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -820,6 +820,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: state.skipped = False if state.interrupted: + infotexts.append(Processed(p, []).infotext(p, 0)) break sd_models.reload_model_weights() # model can be changed for example by refiner -- cgit v1.2.3 From 44d14bc32e4a5501df04f844a00f9e18b777f7eb Mon Sep 17 00:00:00 2001 From: Gleb Alekseev Date: Fri, 13 Oct 2023 15:08:59 -0300 Subject: added option to play notification sound or not --- modules/shared_options.py | 1 + modules/ui.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 00b273fa..afcbf9b8 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -22,6 +22,7 @@ restricted_opts = { } options_templates.update(options_section(('saving-images', "Saving images/grids"), { + "notification_audio": OptionInfo(True, "Play notification sound after image generation", comment_after="(notification.mp3 should be present in the root directory)").needs_reload_ui(), "samples_save": OptionInfo(True, "Always save all generated images"), "samples_format": OptionInfo('png', 'File format for images'), "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), diff --git a/modules/ui.py b/modules/ui.py index 579bab98..df327891 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1286,7 +1286,7 @@ def create_ui(): loadsave.setup_ui() - if os.path.exists(os.path.join(script_path, "notification.mp3")): + if os.path.exists(os.path.join(script_path, "notification.mp3")) and shared.opts.notification_audio: gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) footer = shared.html("footer.html") -- cgit v1.2.3 From 954499a49409582085ed288b94b837ecae7ff86c Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 13 Oct 2023 16:44:01 -0600 Subject: Convert (emphasis) to (emphasis:1.1) per @SirVeggie's suggestion --- javascript/edit-attention.js | 55 ++++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 89b37aaf..ee48eb99 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -71,16 +71,23 @@ function keyupEditAttention(event) { closeCharacter = '>'; delta = opts.keyedit_precision_extra; } else if (selectionStart > 0 && /\(.*\)|\[.*\]/s.test(text.slice(selectionStart - 1, selectionEnd + 1))) { - closeCharacter = null; - if (isPlus) { - text = text.slice(0, selectionStart) + text[selectionStart - 1] + text.slice(selectionStart, selectionEnd) + text[selectionEnd] + text.slice(selectionEnd); - selectionStart++; - selectionEnd++; + let start = text[selectionStart - 1]; + let end = text[selectionEnd]; + let numParen = 0; + + while (text[selectionStart - numParen - 1] == start && text[selectionEnd + numParen] == end) { + numParen++; + } + + if (start == "(") { + weight = 1.1 ** numParen; } else { - text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 1); - selectionStart--; - selectionEnd--; + weight = 0.9 ** numParen; } + + text = text.slice(0, selectionStart - numParen) + "(" + text.slice(selectionStart, selectionEnd) + ":" + weight + ")" + text.slice(selectionEnd + numParen); + selectionStart -= numParen - 1; + selectionEnd -= numParen - 1; } else if (selectionStart == 0 || !/\(.*:-?[\d.]+\)/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(")") + 1))) { // do not include spaces at the end while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') { @@ -97,23 +104,21 @@ function keyupEditAttention(event) { selectionEnd++; } - if (closeCharacter) { - var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; - var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + end)); - if (isNaN(weight)) return; - - weight += isPlus ? delta : -delta; - weight = parseFloat(weight.toPrecision(12)); - if (String(weight).length == 1) weight += ".0"; - - if (closeCharacter == ')' && weight == 1) { - var endParenPos = text.substring(selectionEnd).indexOf(')'); - text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); - selectionStart--; - selectionEnd--; - } else { - text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end); - } + var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; + var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + end)); + if (isNaN(weight)) return; + + weight += isPlus ? delta : -delta; + weight = parseFloat(weight.toPrecision(12)); + if (Number.isInteger(weight)) weight += ".0"; + + if (closeCharacter == ')' && weight == 1) { + var endParenPos = text.substring(selectionEnd).indexOf(')'); + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); + selectionStart--; + selectionEnd--; + } else { + text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end); } target.focus(); -- cgit v1.2.3 From fff1a0c74fb769701ade393cda005bf5975ec5f4 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 13 Oct 2023 17:18:02 -0600 Subject: Make attention conversion optional Fix square brackets multiplier --- javascript/edit-attention.js | 71 +++++++++++++++++++++++++++----------------- modules/shared_options.py | 2 +- 2 files changed, 44 insertions(+), 29 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index ee48eb99..8b6dd240 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -73,21 +73,34 @@ function keyupEditAttention(event) { } else if (selectionStart > 0 && /\(.*\)|\[.*\]/s.test(text.slice(selectionStart - 1, selectionEnd + 1))) { let start = text[selectionStart - 1]; let end = text[selectionEnd]; - let numParen = 0; - - while (text[selectionStart - numParen - 1] == start && text[selectionEnd + numParen] == end) { - numParen++; - } - - if (start == "(") { - weight = 1.1 ** numParen; + if (opts.keyedit_convert) { + let numParen = 0; + + while (text[selectionStart - numParen - 1] == start && text[selectionEnd + numParen] == end) { + numParen++; + } + + if (start == "(") { + weight = 1.1 ** numParen; + } else { + weight = (1 / 1.1) ** numParen; + } + + text = text.slice(0, selectionStart - numParen) + "(" + text.slice(selectionStart, selectionEnd) + ":" + weight + ")" + text.slice(selectionEnd + numParen); + selectionStart -= numParen - 1; + selectionEnd -= numParen - 1; } else { - weight = 0.9 ** numParen; + closeCharacter = null; + if (isPlus) { + text = text.slice(0, selectionStart) + start + text.slice(selectionStart, selectionEnd) + end + text.slice(selectionEnd); + selectionStart++; + selectionEnd++; + } else { + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 1); + selectionStart--; + selectionEnd--; + } } - - text = text.slice(0, selectionStart - numParen) + "(" + text.slice(selectionStart, selectionEnd) + ":" + weight + ")" + text.slice(selectionEnd + numParen); - selectionStart -= numParen - 1; - selectionEnd -= numParen - 1; } else if (selectionStart == 0 || !/\(.*:-?[\d.]+\)/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(")") + 1))) { // do not include spaces at the end while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') { @@ -104,21 +117,23 @@ function keyupEditAttention(event) { selectionEnd++; } - var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; - var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + end)); - if (isNaN(weight)) return; - - weight += isPlus ? delta : -delta; - weight = parseFloat(weight.toPrecision(12)); - if (Number.isInteger(weight)) weight += ".0"; - - if (closeCharacter == ')' && weight == 1) { - var endParenPos = text.substring(selectionEnd).indexOf(')'); - text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); - selectionStart--; - selectionEnd--; - } else { - text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end); + if (closeCharacter) { + var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; + var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + end)); + if (isNaN(weight)) return; + + weight += isPlus ? delta : -delta; + weight = parseFloat(weight.toPrecision(12)); + if (Number.isInteger(weight)) weight += ".0"; + + if (closeCharacter == ')' && weight == 1) { + var endParenPos = text.substring(selectionEnd).indexOf(')'); + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); + selectionStart--; + selectionEnd--; + } else { + text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end); + } } target.focus(); diff --git a/modules/shared_options.py b/modules/shared_options.py index 48243234..a674d3da 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -258,6 +258,7 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~()[]<>| ", "Ctrl+up/down word delimiters"), "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), + "keyedit_convert": OptionInfo(True, "Convert (attention) to (attention:1.1)"), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), @@ -332,4 +333,3 @@ options_templates.update(options_section((None, "Hidden options"), { "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) - -- cgit v1.2.3 From 3a66c3c9e1cecab5095f37964d40a5f4cde317af Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 14 Oct 2023 07:35:06 +0300 Subject: put notification.mp3 option at the end of the page --- modules/shared_options.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index cb356638..ce395302 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -22,7 +22,6 @@ restricted_opts = { } options_templates.update(options_section(('saving-images', "Saving images/grids"), { - "notification_audio": OptionInfo(True, "Play notification sound after image generation", comment_after="(notification.mp3 should be present in the root directory)").needs_reload_ui(), "samples_save": OptionInfo(True, "Always save all generated images"), "samples_format": OptionInfo('png', 'File format for images'), "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), @@ -63,6 +62,8 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), + + "notification_audio": OptionInfo(True, "Play notification sound after image generation").info("notification.mp3 should be present in the root directory").needs_reload_ui(), })) options_templates.update(options_section(('saving-paths', "Paths for saving"), { -- cgit v1.2.3 From a109c7aeb8871fe0ae201794f140f8f2e9b5c3ac Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 14 Oct 2023 07:49:03 +0300 Subject: more general case of adding an infotext when no images have been generated --- modules/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index df037fb0..816f5fc7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -820,7 +820,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: state.skipped = False if state.interrupted: - infotexts.append(Processed(p, []).infotext(p, 0)) break sd_models.reload_model_weights() # model can be changed for example by refiner @@ -961,6 +960,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: state.nextjob() + if not infotexts: + infotexts.append(Processed(p, []).infotext(p, 0)) + p.color_corrections = None index_of_first_image = 0 -- cgit v1.2.3 From 0619df9835833079f8ba5cb5a510b55c4433acaf Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 14 Oct 2023 08:01:04 +0300 Subject: use shallow copy for #13535 --- modules/sd_models.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 2b43868e..c8efeedc 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -1,5 +1,4 @@ import collections -import copy import os.path import sys import gc @@ -360,7 +359,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer if shared.opts.sd_checkpoint_cache > 0: # cache newly loaded model - checkpoints_loaded[checkpoint_info] = copy.deepcopy(state_dict) + checkpoints_loaded[checkpoint_info] = state_dict.copy() model.load_state_dict(state_dict, strict=False) timer.record("apply weights to model") -- cgit v1.2.3 From a8cbe50c9fa324ed887089e4333452ecc4355c92 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 14 Oct 2023 12:14:56 +0300 Subject: remove duplicated code --- extensions-builtin/Lora/networks.py | 31 +---------- modules/textual_inversion/textual_inversion.py | 74 ++++++++++++++------------ 2 files changed, 42 insertions(+), 63 deletions(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 12f70576..d5f0f9f1 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -15,7 +15,7 @@ import torch from typing import Union from modules import shared, devices, sd_models, errors, scripts, sd_hijack -from modules.textual_inversion.textual_inversion import Embedding +import modules.textual_inversion.textual_inversion as textual_inversion from lora_logger import logger @@ -210,34 +210,7 @@ def load_network(name, network_on_disk): embeddings = {} for emb_name, data in bundle_embeddings.items(): - # textual inversion embeddings - if 'string_to_param' in data: - param_dict = data['string_to_param'] - param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 - assert len(param_dict) == 1, 'embedding file has multiple terms in it' - emb = next(iter(param_dict.items()))[1] - vec = emb.detach().to(devices.device, dtype=torch.float32) - shape = vec.shape[-1] - vectors = vec.shape[0] - elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding - vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()} - shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1] - vectors = data['clip_g'].shape[0] - elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts - assert len(data.keys()) == 1, 'embedding file has multiple terms in it' - - emb = next(iter(data.values())) - if len(emb.shape) == 1: - emb = emb.unsqueeze(0) - vec = emb.detach().to(devices.device, dtype=torch.float32) - shape = vec.shape[-1] - vectors = vec.shape[0] - else: - raise Exception(f"Couldn't identify {emb_name} in lora: {name} as neither textual inversion embedding nor diffuser concept.") - - embedding = Embedding(vec, emb_name) - embedding.vectors = vectors - embedding.shape = shape + embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name) embedding.loaded = None embeddings[emb_name] = embedding diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 401a0a2a..04dda585 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -181,40 +181,7 @@ class EmbeddingDatabase: else: return - - # textual inversion embeddings - if 'string_to_param' in data: - param_dict = data['string_to_param'] - param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 - assert len(param_dict) == 1, 'embedding file has multiple terms in it' - emb = next(iter(param_dict.items()))[1] - vec = emb.detach().to(devices.device, dtype=torch.float32) - shape = vec.shape[-1] - vectors = vec.shape[0] - elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding - vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()} - shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1] - vectors = data['clip_g'].shape[0] - elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts - assert len(data.keys()) == 1, 'embedding file has multiple terms in it' - - emb = next(iter(data.values())) - if len(emb.shape) == 1: - emb = emb.unsqueeze(0) - vec = emb.detach().to(devices.device, dtype=torch.float32) - shape = vec.shape[-1] - vectors = vec.shape[0] - else: - raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.") - - embedding = Embedding(vec, name) - embedding.step = data.get('step', None) - embedding.sd_checkpoint = data.get('sd_checkpoint', None) - embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None) - embedding.vectors = vectors - embedding.shape = shape - embedding.filename = path - embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '') + embedding = create_embedding_from_data(data, name, filename=filename, filepath=path) if self.expected_shape == -1 or self.expected_shape == embedding.shape: self.register_embedding(embedding, shared.sd_model) @@ -313,6 +280,45 @@ def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'): return fn +def create_embedding_from_data(data, name, filename='unknown embedding file', filepath=None): + if 'string_to_param' in data: # textual inversion embeddings + param_dict = data['string_to_param'] + param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 + assert len(param_dict) == 1, 'embedding file has multiple terms in it' + emb = next(iter(param_dict.items()))[1] + vec = emb.detach().to(devices.device, dtype=torch.float32) + shape = vec.shape[-1] + vectors = vec.shape[0] + elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding + vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()} + shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1] + vectors = data['clip_g'].shape[0] + elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts + assert len(data.keys()) == 1, 'embedding file has multiple terms in it' + + emb = next(iter(data.values())) + if len(emb.shape) == 1: + emb = emb.unsqueeze(0) + vec = emb.detach().to(devices.device, dtype=torch.float32) + shape = vec.shape[-1] + vectors = vec.shape[0] + else: + raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.") + + embedding = Embedding(vec, name) + embedding.step = data.get('step', None) + embedding.sd_checkpoint = data.get('sd_checkpoint', None) + embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None) + embedding.vectors = vectors + embedding.shape = shape + + if filepath: + embedding.filename = filepath + embedding.set_hash(hashes.sha256(filepath, "textual_inversion/" + name) or '') + + return embedding + + def write_loss(log_directory, filename, step, epoch_len, values): if shared.opts.training_write_csv_every == 0: return -- cgit v1.2.3 From 117ec7199416b56a16cbc5591f13fe6cce521d9e Mon Sep 17 00:00:00 2001 From: Won-Kyu Park Date: Sat, 14 Oct 2023 21:58:28 +0900 Subject: support webui.settings.bat --- webui.bat | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/webui.bat b/webui.bat index a630ea4d..e2c9079d 100644 --- a/webui.bat +++ b/webui.bat @@ -1,5 +1,9 @@ @echo off +if exist webui.settings.bat ( + call webui.settings.bat +) + if not defined PYTHON (set PYTHON=python) if defined GIT (set "GIT_PYTHON_GIT_EXECUTABLE=%GIT%") if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv") -- cgit v1.2.3 From f00eaa4d000856964f810aa4b651200861bafc6f Mon Sep 17 00:00:00 2001 From: Khachatur Avanesian Date: Sun, 15 Oct 2023 02:34:03 +0300 Subject: Start / Restart generation by Ctrl (Alt) + Enter Add ability to interrupt current generation and start generation again by Ctrl (Alt) + Enter --- script.js | 328 +++++++++++++++++++++++++++++++------------------------------- 1 file changed, 165 insertions(+), 163 deletions(-) diff --git a/script.js b/script.js index 34cca765..8bf1257d 100644 --- a/script.js +++ b/script.js @@ -1,163 +1,165 @@ -function gradioApp() { - const elems = document.getElementsByTagName('gradio-app'); - const elem = elems.length == 0 ? document : elems[0]; - - if (elem !== document) { - elem.getElementById = function(id) { - return document.getElementById(id); - }; - } - return elem.shadowRoot ? elem.shadowRoot : elem; -} - -/** - * Get the currently selected top-level UI tab button (e.g. the button that says "Extras"). - */ -function get_uiCurrentTab() { - return gradioApp().querySelector('#tabs > .tab-nav > button.selected'); -} - -/** - * Get the first currently visible top-level UI tab content (e.g. the div hosting the "txt2img" UI). - */ -function get_uiCurrentTabContent() { - return gradioApp().querySelector('#tabs > .tabitem[id^=tab_]:not([style*="display: none"])'); -} - -var uiUpdateCallbacks = []; -var uiAfterUpdateCallbacks = []; -var uiLoadedCallbacks = []; -var uiTabChangeCallbacks = []; -var optionsChangedCallbacks = []; -var uiAfterUpdateTimeout = null; -var uiCurrentTab = null; - -/** - * Register callback to be called at each UI update. - * The callback receives an array of MutationRecords as an argument. - */ -function onUiUpdate(callback) { - uiUpdateCallbacks.push(callback); -} - -/** - * Register callback to be called soon after UI updates. - * The callback receives no arguments. - * - * This is preferred over `onUiUpdate` if you don't need - * access to the MutationRecords, as your function will - * not be called quite as often. - */ -function onAfterUiUpdate(callback) { - uiAfterUpdateCallbacks.push(callback); -} - -/** - * Register callback to be called when the UI is loaded. - * The callback receives no arguments. - */ -function onUiLoaded(callback) { - uiLoadedCallbacks.push(callback); -} - -/** - * Register callback to be called when the UI tab is changed. - * The callback receives no arguments. - */ -function onUiTabChange(callback) { - uiTabChangeCallbacks.push(callback); -} - -/** - * Register callback to be called when the options are changed. - * The callback receives no arguments. - * @param callback - */ -function onOptionsChanged(callback) { - optionsChangedCallbacks.push(callback); -} - -function executeCallbacks(queue, arg) { - for (const callback of queue) { - try { - callback(arg); - } catch (e) { - console.error("error running callback", callback, ":", e); - } - } -} - -/** - * Schedule the execution of the callbacks registered with onAfterUiUpdate. - * The callbacks are executed after a short while, unless another call to this function - * is made before that time. IOW, the callbacks are executed only once, even - * when there are multiple mutations observed. - */ -function scheduleAfterUiUpdateCallbacks() { - clearTimeout(uiAfterUpdateTimeout); - uiAfterUpdateTimeout = setTimeout(function() { - executeCallbacks(uiAfterUpdateCallbacks); - }, 200); -} - -var executedOnLoaded = false; - -document.addEventListener("DOMContentLoaded", function() { - var mutationObserver = new MutationObserver(function(m) { - if (!executedOnLoaded && gradioApp().querySelector('#txt2img_prompt')) { - executedOnLoaded = true; - executeCallbacks(uiLoadedCallbacks); - } - - executeCallbacks(uiUpdateCallbacks, m); - scheduleAfterUiUpdateCallbacks(); - const newTab = get_uiCurrentTab(); - if (newTab && (newTab !== uiCurrentTab)) { - uiCurrentTab = newTab; - executeCallbacks(uiTabChangeCallbacks); - } - }); - mutationObserver.observe(gradioApp(), {childList: true, subtree: true}); -}); - -/** - * Add a ctrl+enter as a shortcut to start a generation - */ -document.addEventListener('keydown', function(e) { - var handled = false; - if (e.key !== undefined) { - if ((e.key == "Enter" && (e.metaKey || e.ctrlKey || e.altKey))) handled = true; - } else if (e.keyCode !== undefined) { - if ((e.keyCode == 13 && (e.metaKey || e.ctrlKey || e.altKey))) handled = true; - } - if (handled) { - var button = get_uiCurrentTabContent().querySelector('button[id$=_generate]'); - if (button) { - button.click(); - } - e.preventDefault(); - } -}); - -/** - * checks that a UI element is not in another hidden element or tab content - */ -function uiElementIsVisible(el) { - if (el === document) { - return true; - } - - const computedStyle = getComputedStyle(el); - const isVisible = computedStyle.display !== 'none'; - - if (!isVisible) return false; - return uiElementIsVisible(el.parentNode); -} - -function uiElementInSight(el) { - const clRect = el.getBoundingClientRect(); - const windowHeight = window.innerHeight; - const isOnScreen = clRect.bottom > 0 && clRect.top < windowHeight; - - return isOnScreen; -} +function gradioApp() { + const elems = document.getElementsByTagName('gradio-app'); + const elem = elems.length == 0 ? document : elems[0]; + + if (elem !== document) { + elem.getElementById = function(id) { + return document.getElementById(id); + }; + } + return elem.shadowRoot ? elem.shadowRoot : elem; +} + +/** + * Get the currently selected top-level UI tab button (e.g. the button that says "Extras"). + */ +function get_uiCurrentTab() { + return gradioApp().querySelector('#tabs > .tab-nav > button.selected'); +} + +/** + * Get the first currently visible top-level UI tab content (e.g. the div hosting the "txt2img" UI). + */ +function get_uiCurrentTabContent() { + return gradioApp().querySelector('#tabs > .tabitem[id^=tab_]:not([style*="display: none"])'); +} + +var uiUpdateCallbacks = []; +var uiAfterUpdateCallbacks = []; +var uiLoadedCallbacks = []; +var uiTabChangeCallbacks = []; +var optionsChangedCallbacks = []; +var uiAfterUpdateTimeout = null; +var uiCurrentTab = null; + +/** + * Register callback to be called at each UI update. + * The callback receives an array of MutationRecords as an argument. + */ +function onUiUpdate(callback) { + uiUpdateCallbacks.push(callback); +} + +/** + * Register callback to be called soon after UI updates. + * The callback receives no arguments. + * + * This is preferred over `onUiUpdate` if you don't need + * access to the MutationRecords, as your function will + * not be called quite as often. + */ +function onAfterUiUpdate(callback) { + uiAfterUpdateCallbacks.push(callback); +} + +/** + * Register callback to be called when the UI is loaded. + * The callback receives no arguments. + */ +function onUiLoaded(callback) { + uiLoadedCallbacks.push(callback); +} + +/** + * Register callback to be called when the UI tab is changed. + * The callback receives no arguments. + */ +function onUiTabChange(callback) { + uiTabChangeCallbacks.push(callback); +} + +/** + * Register callback to be called when the options are changed. + * The callback receives no arguments. + * @param callback + */ +function onOptionsChanged(callback) { + optionsChangedCallbacks.push(callback); +} + +function executeCallbacks(queue, arg) { + for (const callback of queue) { + try { + callback(arg); + } catch (e) { + console.error("error running callback", callback, ":", e); + } + } +} + +/** + * Schedule the execution of the callbacks registered with onAfterUiUpdate. + * The callbacks are executed after a short while, unless another call to this function + * is made before that time. IOW, the callbacks are executed only once, even + * when there are multiple mutations observed. + */ +function scheduleAfterUiUpdateCallbacks() { + clearTimeout(uiAfterUpdateTimeout); + uiAfterUpdateTimeout = setTimeout(function() { + executeCallbacks(uiAfterUpdateCallbacks); + }, 200); +} + +var executedOnLoaded = false; + +document.addEventListener("DOMContentLoaded", function() { + var mutationObserver = new MutationObserver(function(m) { + if (!executedOnLoaded && gradioApp().querySelector('#txt2img_prompt')) { + executedOnLoaded = true; + executeCallbacks(uiLoadedCallbacks); + } + + executeCallbacks(uiUpdateCallbacks, m); + scheduleAfterUiUpdateCallbacks(); + const newTab = get_uiCurrentTab(); + if (newTab && (newTab !== uiCurrentTab)) { + uiCurrentTab = newTab; + executeCallbacks(uiTabChangeCallbacks); + } + }); + mutationObserver.observe(gradioApp(), {childList: true, subtree: true}); +}); + +/** + * Add a Ctrl (Alt) + Enter as a shortcut to start / restart a generation + */ +document.addEventListener('keydown', (e) => { + const isEnter = e.key === 'Enter' || e.keyCode === 13 + const isModifierKey = e.metaKey || e.ctrlKey || e.altKey + + const interruptButton = get_uiCurrentTabContent().querySelector('button[id$=_interrupt]') + const generateButton = get_uiCurrentTabContent().querySelector('button[id$=_generate]') + + if (isEnter && isModifierKey) { + if (interruptButton.style.display === 'block') { + interruptButton.click() + setTimeout(() => generateButton.click(), 500) + } else { + generateButton.click() + } + e.preventDefault() + } +}) + +/** + * checks that a UI element is not in another hidden element or tab content + */ +function uiElementIsVisible(el) { + if (el === document) { + return true; + } + + const computedStyle = getComputedStyle(el); + const isVisible = computedStyle.display !== 'none'; + + if (!isVisible) return false; + return uiElementIsVisible(el.parentNode); +} + +function uiElementInSight(el) { + const clRect = el.getBoundingClientRect(); + const windowHeight = window.innerHeight; + const isOnScreen = clRect.bottom > 0 && clRect.top < windowHeight; + + return isOnScreen; +} -- cgit v1.2.3 From 0d65d0eabded4a79c3168ed14599db3970d414c5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 15 Oct 2023 08:45:38 +0300 Subject: add an option to not print stack traces on ctrl+c. --- modules/initialize_util.py | 6 +++++- modules/shared_options.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/initialize_util.py b/modules/initialize_util.py index 2894eee4..2e9b6d89 100644 --- a/modules/initialize_util.py +++ b/modules/initialize_util.py @@ -150,10 +150,14 @@ def dumpstacks(): def configure_sigint_handler(): # make the program just exit at ctrl+c without waiting for anything + + from modules import shared + def sigint_handler(sig, frame): print(f'Interrupted with signal {sig} in {frame}') - dumpstacks() + if shared.opts.dump_stacks_on_signal: + dumpstacks() os._exit(0) diff --git a/modules/shared_options.py b/modules/shared_options.py index ce395302..6ef9fdd5 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -112,6 +112,7 @@ options_templates.update(options_section(('system', "System"), { "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), + "dump_stacks_on_signal": OptionInfo(False, "Print stack traces before exiting the program with ctrl+c."), })) options_templates.update(options_section(('API', "API"), { -- cgit v1.2.3 From 282903bb6798f49af66f6935ee4dc0015895cf7c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 15 Oct 2023 09:41:02 +0300 Subject: repair unload sd checkpoint button --- modules/api/api.py | 11 +++++------ modules/sd_models.py | 13 +------------ modules/ui_settings.py | 24 +++++++++++++++++------- 3 files changed, 23 insertions(+), 25 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index efedafa4..09083874 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -17,15 +17,14 @@ from fastapi.encoders import jsonable_encoder from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, generation_parameters_copypaste +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, generation_parameters_copypaste, sd_models from modules.api import models from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork -from PIL import PngImagePlugin,Image -from modules.sd_models import unload_model_weights, reload_model_weights, checkpoint_aliases +from PIL import PngImagePlugin, Image from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices @@ -541,12 +540,12 @@ class Api: return {} def unloadapi(self): - unload_model_weights() + sd_models.unload_model_weights() return {} def reloadapi(self): - reload_model_weights() + sd_models.send_model_to_device(shared.sd_model) return {} @@ -566,7 +565,7 @@ class Api: def set_config(self, req: dict[str, Any]): checkpoint_name = req.get("sd_model_checkpoint", None) - if checkpoint_name is not None and checkpoint_name not in checkpoint_aliases: + if checkpoint_name is not None and checkpoint_name not in sd_models.checkpoint_aliases: raise RuntimeError(f"model {checkpoint_name!r} not found") for k, v in req.items(): diff --git a/modules/sd_models.py b/modules/sd_models.py index c8efeedc..3b6cdea1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -1,7 +1,6 @@ import collections import os.path import sys -import gc import threading import torch @@ -798,17 +797,7 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - timer = Timer() - - if model_data.sd_model: - model_data.sd_model.to(devices.cpu) - sd_hijack.model_hijack.undo_hijack(model_data.sd_model) - model_data.sd_model = None - sd_model = None - gc.collect() - devices.torch_gc() - - print(f"Unloaded weights {timer.summary()}.") + send_model_to_cpu(sd_model or shared.sd_model) return sd_model diff --git a/modules/ui_settings.py b/modules/ui_settings.py index 74a3aef3..e054d00a 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -1,6 +1,6 @@ import gradio as gr -from modules import ui_common, shared, script_callbacks, scripts, sd_models, sysinfo +from modules import ui_common, shared, script_callbacks, scripts, sd_models, sysinfo, timer from modules.call_queue import wrap_gradio_call from modules.shared import opts from modules.ui_components import FormRow @@ -177,8 +177,8 @@ class UiSettings: download_localization = gr.Button(value='Download localization template', elem_id="download_localization") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") with gr.Row(): - unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model") - reload_sd_model = gr.Button(value='Reload the last SD checkpoint back into VRAM', elem_id="sett_reload_sd_model") + unload_sd_model = gr.Button(value='Unload SD checkpoint to RAM', elem_id="sett_unload_sd_model") + reload_sd_model = gr.Button(value='Load SD checkpoint to VRAM from RAM', elem_id="sett_reload_sd_model") with gr.Row(): calculate_all_checkpoint_hash = gr.Button(value='Calculate hash for all checkpoint', elem_id="calculate_all_checkpoint_hash") calculate_all_checkpoint_hash_threads = gr.Number(value=1, label="Number of parallel calculations", elem_id="calculate_all_checkpoint_hash_threads", precision=0, minimum=1) @@ -194,16 +194,26 @@ class UiSettings: self.text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False) + def call_func_and_return_text(func, text): + def handler(): + t = timer.Timer() + func() + t.record(text) + + return f'{text} in {t.total:.1f}s' + + return handler + unload_sd_model.click( - fn=sd_models.unload_model_weights, + fn=call_func_and_return_text(sd_models.unload_model_weights, 'Unloaded the checkpoint'), inputs=[], - outputs=[] + outputs=[self.result] ) reload_sd_model.click( - fn=sd_models.reload_model_weights, + fn=call_func_and_return_text(lambda: sd_models.send_model_to_device(shared.sd_model), 'Loaded the checkpoint'), inputs=[], - outputs=[] + outputs=[self.result] ) request_notifications.click( -- cgit v1.2.3 From 2f6ea8b10312e700f8d01f90dff15d17690ce49c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 15 Oct 2023 10:12:38 +0300 Subject: respect keyedit_precision_attention setting when converting from old (((attention))) syntax --- javascript/edit-attention.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 8b6dd240..45d9a788 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -86,6 +86,8 @@ function keyupEditAttention(event) { weight = (1 / 1.1) ** numParen; } + weight = Math.round(weight / opts.keyedit_precision_attention) * opts.keyedit_precision_attention; + text = text.slice(0, selectionStart - numParen) + "(" + text.slice(selectionStart, selectionEnd) + ":" + weight + ")" + text.slice(selectionEnd + numParen); selectionStart -= numParen - 1; selectionEnd -= numParen - 1; -- cgit v1.2.3 From 77bd953da2f96845b196f242f7ba51138cd54e3b Mon Sep 17 00:00:00 2001 From: Khachatur Avanesian Date: Sun, 15 Oct 2023 10:25:36 +0300 Subject: Update script.js Exclude lambda --- script.js | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/script.js b/script.js index 8bf1257d..2fab1554 100644 --- a/script.js +++ b/script.js @@ -123,23 +123,25 @@ document.addEventListener("DOMContentLoaded", function() { /** * Add a Ctrl (Alt) + Enter as a shortcut to start / restart a generation */ -document.addEventListener('keydown', (e) => { - const isEnter = e.key === 'Enter' || e.keyCode === 13 - const isModifierKey = e.metaKey || e.ctrlKey || e.altKey +document.addEventListener('keydown', function(e) { + const isEnter = e.key === 'Enter' || e.keyCode === 13; + const isModifierKey = e.metaKey || e.ctrlKey || e.altKey; - const interruptButton = get_uiCurrentTabContent().querySelector('button[id$=_interrupt]') - const generateButton = get_uiCurrentTabContent().querySelector('button[id$=_generate]') + const interruptButton = get_uiCurrentTabContent().querySelector('button[id$=_interrupt]'); + const generateButton = get_uiCurrentTabContent().querySelector('button[id$=_generate]'); if (isEnter && isModifierKey) { if (interruptButton.style.display === 'block') { - interruptButton.click() - setTimeout(() => generateButton.click(), 500) + interruptButton.click(); + setTimeout(function() { + generateButton.click(); + }, 500); } else { - generateButton.click() + generateButton.click(); } - e.preventDefault() + e.preventDefault(); } -}) +}); /** * checks that a UI element is not in another hidden element or tab content -- cgit v1.2.3 From d295e97a0d7ca985ab21e935fd933ce629fba2bf Mon Sep 17 00:00:00 2001 From: Khachatur Avanesian Date: Sun, 15 Oct 2023 10:37:48 +0300 Subject: Update script.js LF instead CRLF --- script.js | 130 ++++++++++++++++++++++++++++++++------------------------------ 1 file changed, 68 insertions(+), 62 deletions(-) diff --git a/script.js b/script.js index 2fab1554..8af9773f 100644 --- a/script.js +++ b/script.js @@ -1,43 +1,45 @@ function gradioApp() { - const elems = document.getElementsByTagName('gradio-app'); - const elem = elems.length == 0 ? document : elems[0]; + const elems = document.getElementsByTagName('gradio-app') + const elem = elems.length == 0 ? document : elems[0] if (elem !== document) { - elem.getElementById = function(id) { - return document.getElementById(id); - }; + elem.getElementById = function (id) { + return document.getElementById(id) + } } - return elem.shadowRoot ? elem.shadowRoot : elem; + return elem.shadowRoot ? elem.shadowRoot : elem } /** * Get the currently selected top-level UI tab button (e.g. the button that says "Extras"). */ function get_uiCurrentTab() { - return gradioApp().querySelector('#tabs > .tab-nav > button.selected'); + return gradioApp().querySelector('#tabs > .tab-nav > button.selected') } /** * Get the first currently visible top-level UI tab content (e.g. the div hosting the "txt2img" UI). */ function get_uiCurrentTabContent() { - return gradioApp().querySelector('#tabs > .tabitem[id^=tab_]:not([style*="display: none"])'); + return gradioApp().querySelector( + '#tabs > .tabitem[id^=tab_]:not([style*="display: none"])' + ) } -var uiUpdateCallbacks = []; -var uiAfterUpdateCallbacks = []; -var uiLoadedCallbacks = []; -var uiTabChangeCallbacks = []; -var optionsChangedCallbacks = []; -var uiAfterUpdateTimeout = null; -var uiCurrentTab = null; +var uiUpdateCallbacks = [] +var uiAfterUpdateCallbacks = [] +var uiLoadedCallbacks = [] +var uiTabChangeCallbacks = [] +var optionsChangedCallbacks = [] +var uiAfterUpdateTimeout = null +var uiCurrentTab = null /** * Register callback to be called at each UI update. * The callback receives an array of MutationRecords as an argument. */ function onUiUpdate(callback) { - uiUpdateCallbacks.push(callback); + uiUpdateCallbacks.push(callback) } /** @@ -49,7 +51,7 @@ function onUiUpdate(callback) { * not be called quite as often. */ function onAfterUiUpdate(callback) { - uiAfterUpdateCallbacks.push(callback); + uiAfterUpdateCallbacks.push(callback) } /** @@ -57,7 +59,7 @@ function onAfterUiUpdate(callback) { * The callback receives no arguments. */ function onUiLoaded(callback) { - uiLoadedCallbacks.push(callback); + uiLoadedCallbacks.push(callback) } /** @@ -65,7 +67,7 @@ function onUiLoaded(callback) { * The callback receives no arguments. */ function onUiTabChange(callback) { - uiTabChangeCallbacks.push(callback); + uiTabChangeCallbacks.push(callback) } /** @@ -74,15 +76,15 @@ function onUiTabChange(callback) { * @param callback */ function onOptionsChanged(callback) { - optionsChangedCallbacks.push(callback); + optionsChangedCallbacks.push(callback) } function executeCallbacks(queue, arg) { for (const callback of queue) { try { - callback(arg); + callback(arg) } catch (e) { - console.error("error running callback", callback, ":", e); + console.error('error running callback', callback, ':', e) } } } @@ -94,74 +96,78 @@ function executeCallbacks(queue, arg) { * when there are multiple mutations observed. */ function scheduleAfterUiUpdateCallbacks() { - clearTimeout(uiAfterUpdateTimeout); - uiAfterUpdateTimeout = setTimeout(function() { - executeCallbacks(uiAfterUpdateCallbacks); - }, 200); + clearTimeout(uiAfterUpdateTimeout) + uiAfterUpdateTimeout = setTimeout(function () { + executeCallbacks(uiAfterUpdateCallbacks) + }, 200) } -var executedOnLoaded = false; +var executedOnLoaded = false -document.addEventListener("DOMContentLoaded", function() { - var mutationObserver = new MutationObserver(function(m) { +document.addEventListener('DOMContentLoaded', function () { + var mutationObserver = new MutationObserver(function (m) { if (!executedOnLoaded && gradioApp().querySelector('#txt2img_prompt')) { - executedOnLoaded = true; - executeCallbacks(uiLoadedCallbacks); + executedOnLoaded = true + executeCallbacks(uiLoadedCallbacks) } - executeCallbacks(uiUpdateCallbacks, m); - scheduleAfterUiUpdateCallbacks(); - const newTab = get_uiCurrentTab(); - if (newTab && (newTab !== uiCurrentTab)) { - uiCurrentTab = newTab; - executeCallbacks(uiTabChangeCallbacks); + executeCallbacks(uiUpdateCallbacks, m) + scheduleAfterUiUpdateCallbacks() + const newTab = get_uiCurrentTab() + if (newTab && newTab !== uiCurrentTab) { + uiCurrentTab = newTab + executeCallbacks(uiTabChangeCallbacks) } - }); - mutationObserver.observe(gradioApp(), {childList: true, subtree: true}); -}); + }) + mutationObserver.observe(gradioApp(), {childList: true, subtree: true}) +}) /** - * Add a Ctrl (Alt) + Enter as a shortcut to start / restart a generation + * Add a ctrl+enter as a shortcut to start a generation */ -document.addEventListener('keydown', function(e) { - const isEnter = e.key === 'Enter' || e.keyCode === 13; - const isModifierKey = e.metaKey || e.ctrlKey || e.altKey; +document.addEventListener('keydown', function (e) { + const isEnter = e.key === 'Enter' || e.keyCode === 13 + const isModifierKey = e.metaKey || e.ctrlKey || e.altKey - const interruptButton = get_uiCurrentTabContent().querySelector('button[id$=_interrupt]'); - const generateButton = get_uiCurrentTabContent().querySelector('button[id$=_generate]'); + const interruptButton = get_uiCurrentTabContent().querySelector( + 'button[id$=_interrupt]' + ) + const generateButton = get_uiCurrentTabContent().querySelector( + 'button[id$=_generate]' + ) if (isEnter && isModifierKey) { if (interruptButton.style.display === 'block') { - interruptButton.click(); - setTimeout(function() { - generateButton.click(); - }, 500); + interruptButton.click() + setTimeout(function () { + generateButton.click() + }, 500) } else { - generateButton.click(); + generateButton.click() } - e.preventDefault(); + e.preventDefault() } -}); +}) /** * checks that a UI element is not in another hidden element or tab content */ function uiElementIsVisible(el) { if (el === document) { - return true; + return true } - const computedStyle = getComputedStyle(el); - const isVisible = computedStyle.display !== 'none'; + const computedStyle = getComputedStyle(el) + const isVisible = computedStyle.display !== 'none' - if (!isVisible) return false; - return uiElementIsVisible(el.parentNode); + if (!isVisible) return false + return uiElementIsVisible(el.parentNode) } function uiElementInSight(el) { - const clRect = el.getBoundingClientRect(); - const windowHeight = window.innerHeight; - const isOnScreen = clRect.bottom > 0 && clRect.top < windowHeight; + const clRect = el.getBoundingClientRect() + const windowHeight = window.innerHeight + const isOnScreen = clRect.bottom > 0 && clRect.top < windowHeight - return isOnScreen; + return isOnScreen } -- cgit v1.2.3 From 3e223523ced2a19347d0b42b662129947152dc49 Mon Sep 17 00:00:00 2001 From: Khachatur Avanesian Date: Sun, 15 Oct 2023 10:48:50 +0300 Subject: Update script.js --- script.js | 128 ++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 61 insertions(+), 67 deletions(-) diff --git a/script.js b/script.js index 8af9773f..0f63ee93 100644 --- a/script.js +++ b/script.js @@ -1,45 +1,43 @@ function gradioApp() { - const elems = document.getElementsByTagName('gradio-app') - const elem = elems.length == 0 ? document : elems[0] + const elems = document.getElementsByTagName('gradio-app'); + const elem = elems.length == 0 ? document : elems[0]; if (elem !== document) { - elem.getElementById = function (id) { - return document.getElementById(id) - } + elem.getElementById = function(id) { + return document.getElementById(id); + }; } - return elem.shadowRoot ? elem.shadowRoot : elem + return elem.shadowRoot ? elem.shadowRoot : elem; } /** * Get the currently selected top-level UI tab button (e.g. the button that says "Extras"). */ function get_uiCurrentTab() { - return gradioApp().querySelector('#tabs > .tab-nav > button.selected') + return gradioApp().querySelector('#tabs > .tab-nav > button.selected'); } /** * Get the first currently visible top-level UI tab content (e.g. the div hosting the "txt2img" UI). */ function get_uiCurrentTabContent() { - return gradioApp().querySelector( - '#tabs > .tabitem[id^=tab_]:not([style*="display: none"])' - ) + return gradioApp().querySelector('#tabs > .tabitem[id^=tab_]:not([style*="display: none"])'); } -var uiUpdateCallbacks = [] -var uiAfterUpdateCallbacks = [] -var uiLoadedCallbacks = [] -var uiTabChangeCallbacks = [] -var optionsChangedCallbacks = [] -var uiAfterUpdateTimeout = null -var uiCurrentTab = null +var uiUpdateCallbacks = []; +var uiAfterUpdateCallbacks = []; +var uiLoadedCallbacks = []; +var uiTabChangeCallbacks = []; +var optionsChangedCallbacks = []; +var uiAfterUpdateTimeout = null; +var uiCurrentTab = null; /** * Register callback to be called at each UI update. * The callback receives an array of MutationRecords as an argument. */ function onUiUpdate(callback) { - uiUpdateCallbacks.push(callback) + uiUpdateCallbacks.push(callback); } /** @@ -51,7 +49,7 @@ function onUiUpdate(callback) { * not be called quite as often. */ function onAfterUiUpdate(callback) { - uiAfterUpdateCallbacks.push(callback) + uiAfterUpdateCallbacks.push(callback); } /** @@ -59,7 +57,7 @@ function onAfterUiUpdate(callback) { * The callback receives no arguments. */ function onUiLoaded(callback) { - uiLoadedCallbacks.push(callback) + uiLoadedCallbacks.push(callback); } /** @@ -67,7 +65,7 @@ function onUiLoaded(callback) { * The callback receives no arguments. */ function onUiTabChange(callback) { - uiTabChangeCallbacks.push(callback) + uiTabChangeCallbacks.push(callback); } /** @@ -76,15 +74,15 @@ function onUiTabChange(callback) { * @param callback */ function onOptionsChanged(callback) { - optionsChangedCallbacks.push(callback) + optionsChangedCallbacks.push(callback); } function executeCallbacks(queue, arg) { for (const callback of queue) { try { - callback(arg) + callback(arg); } catch (e) { - console.error('error running callback', callback, ':', e) + console.error("error running callback", callback, ":", e); } } } @@ -96,78 +94,74 @@ function executeCallbacks(queue, arg) { * when there are multiple mutations observed. */ function scheduleAfterUiUpdateCallbacks() { - clearTimeout(uiAfterUpdateTimeout) - uiAfterUpdateTimeout = setTimeout(function () { - executeCallbacks(uiAfterUpdateCallbacks) - }, 200) + clearTimeout(uiAfterUpdateTimeout); + uiAfterUpdateTimeout = setTimeout(function() { + executeCallbacks(uiAfterUpdateCallbacks); + }, 200); } -var executedOnLoaded = false +var executedOnLoaded = false; -document.addEventListener('DOMContentLoaded', function () { - var mutationObserver = new MutationObserver(function (m) { +document.addEventListener("DOMContentLoaded", function() { + var mutationObserver = new MutationObserver(function(m) { if (!executedOnLoaded && gradioApp().querySelector('#txt2img_prompt')) { - executedOnLoaded = true - executeCallbacks(uiLoadedCallbacks) + executedOnLoaded = true; + executeCallbacks(uiLoadedCallbacks); } - executeCallbacks(uiUpdateCallbacks, m) - scheduleAfterUiUpdateCallbacks() - const newTab = get_uiCurrentTab() - if (newTab && newTab !== uiCurrentTab) { - uiCurrentTab = newTab - executeCallbacks(uiTabChangeCallbacks) + executeCallbacks(uiUpdateCallbacks, m); + scheduleAfterUiUpdateCallbacks(); + const newTab = get_uiCurrentTab(); + if (newTab && (newTab !== uiCurrentTab)) { + uiCurrentTab = newTab; + executeCallbacks(uiTabChangeCallbacks); } - }) - mutationObserver.observe(gradioApp(), {childList: true, subtree: true}) -}) + }); + mutationObserver.observe(gradioApp(), {childList: true, subtree: true}); +}); /** * Add a ctrl+enter as a shortcut to start a generation */ -document.addEventListener('keydown', function (e) { - const isEnter = e.key === 'Enter' || e.keyCode === 13 - const isModifierKey = e.metaKey || e.ctrlKey || e.altKey +document.addEventListener('keydown', function(e) { + const isEnter = e.key === 'Enter' || e.keyCode === 13; + const isModifierKey = e.metaKey || e.ctrlKey || e.altKey; - const interruptButton = get_uiCurrentTabContent().querySelector( - 'button[id$=_interrupt]' - ) - const generateButton = get_uiCurrentTabContent().querySelector( - 'button[id$=_generate]' - ) + const interruptButton = get_uiCurrentTabContent().querySelector('button[id$=_interrupt]'); + const generateButton = get_uiCurrentTabContent().querySelector('button[id$=_generate]'); if (isEnter && isModifierKey) { if (interruptButton.style.display === 'block') { - interruptButton.click() - setTimeout(function () { - generateButton.click() - }, 500) + interruptButton.click(); + setTimeout(function() { + generateButton.click(); + }, 500); } else { - generateButton.click() + generateButton.click(); } - e.preventDefault() + e.preventDefault(); } -}) +}); /** * checks that a UI element is not in another hidden element or tab content */ function uiElementIsVisible(el) { if (el === document) { - return true + return true; } - const computedStyle = getComputedStyle(el) - const isVisible = computedStyle.display !== 'none' + const computedStyle = getComputedStyle(el); + const isVisible = computedStyle.display !== 'none'; - if (!isVisible) return false - return uiElementIsVisible(el.parentNode) + if (!isVisible) return false; + return uiElementIsVisible(el.parentNode); } function uiElementInSight(el) { - const clRect = el.getBoundingClientRect() - const windowHeight = window.innerHeight - const isOnScreen = clRect.bottom > 0 && clRect.top < windowHeight + const clRect = el.getBoundingClientRect(); + const windowHeight = window.innerHeight; + const isOnScreen = clRect.bottom > 0 && clRect.top < windowHeight; - return isOnScreen + return isOnScreen; } -- cgit v1.2.3 From d33cb2b8122f259002ce6ef2e7f5cf30dbe069b5 Mon Sep 17 00:00:00 2001 From: Khachatur Avanesian Date: Sun, 15 Oct 2023 11:01:45 +0300 Subject: Add files via upload LF --- script.js | 334 +++++++++++++++++++++++++++++++------------------------------- 1 file changed, 167 insertions(+), 167 deletions(-) diff --git a/script.js b/script.js index 0f63ee93..5f6ee354 100644 --- a/script.js +++ b/script.js @@ -1,167 +1,167 @@ -function gradioApp() { - const elems = document.getElementsByTagName('gradio-app'); - const elem = elems.length == 0 ? document : elems[0]; - - if (elem !== document) { - elem.getElementById = function(id) { - return document.getElementById(id); - }; - } - return elem.shadowRoot ? elem.shadowRoot : elem; -} - -/** - * Get the currently selected top-level UI tab button (e.g. the button that says "Extras"). - */ -function get_uiCurrentTab() { - return gradioApp().querySelector('#tabs > .tab-nav > button.selected'); -} - -/** - * Get the first currently visible top-level UI tab content (e.g. the div hosting the "txt2img" UI). - */ -function get_uiCurrentTabContent() { - return gradioApp().querySelector('#tabs > .tabitem[id^=tab_]:not([style*="display: none"])'); -} - -var uiUpdateCallbacks = []; -var uiAfterUpdateCallbacks = []; -var uiLoadedCallbacks = []; -var uiTabChangeCallbacks = []; -var optionsChangedCallbacks = []; -var uiAfterUpdateTimeout = null; -var uiCurrentTab = null; - -/** - * Register callback to be called at each UI update. - * The callback receives an array of MutationRecords as an argument. - */ -function onUiUpdate(callback) { - uiUpdateCallbacks.push(callback); -} - -/** - * Register callback to be called soon after UI updates. - * The callback receives no arguments. - * - * This is preferred over `onUiUpdate` if you don't need - * access to the MutationRecords, as your function will - * not be called quite as often. - */ -function onAfterUiUpdate(callback) { - uiAfterUpdateCallbacks.push(callback); -} - -/** - * Register callback to be called when the UI is loaded. - * The callback receives no arguments. - */ -function onUiLoaded(callback) { - uiLoadedCallbacks.push(callback); -} - -/** - * Register callback to be called when the UI tab is changed. - * The callback receives no arguments. - */ -function onUiTabChange(callback) { - uiTabChangeCallbacks.push(callback); -} - -/** - * Register callback to be called when the options are changed. - * The callback receives no arguments. - * @param callback - */ -function onOptionsChanged(callback) { - optionsChangedCallbacks.push(callback); -} - -function executeCallbacks(queue, arg) { - for (const callback of queue) { - try { - callback(arg); - } catch (e) { - console.error("error running callback", callback, ":", e); - } - } -} - -/** - * Schedule the execution of the callbacks registered with onAfterUiUpdate. - * The callbacks are executed after a short while, unless another call to this function - * is made before that time. IOW, the callbacks are executed only once, even - * when there are multiple mutations observed. - */ -function scheduleAfterUiUpdateCallbacks() { - clearTimeout(uiAfterUpdateTimeout); - uiAfterUpdateTimeout = setTimeout(function() { - executeCallbacks(uiAfterUpdateCallbacks); - }, 200); -} - -var executedOnLoaded = false; - -document.addEventListener("DOMContentLoaded", function() { - var mutationObserver = new MutationObserver(function(m) { - if (!executedOnLoaded && gradioApp().querySelector('#txt2img_prompt')) { - executedOnLoaded = true; - executeCallbacks(uiLoadedCallbacks); - } - - executeCallbacks(uiUpdateCallbacks, m); - scheduleAfterUiUpdateCallbacks(); - const newTab = get_uiCurrentTab(); - if (newTab && (newTab !== uiCurrentTab)) { - uiCurrentTab = newTab; - executeCallbacks(uiTabChangeCallbacks); - } - }); - mutationObserver.observe(gradioApp(), {childList: true, subtree: true}); -}); - -/** - * Add a ctrl+enter as a shortcut to start a generation - */ -document.addEventListener('keydown', function(e) { - const isEnter = e.key === 'Enter' || e.keyCode === 13; - const isModifierKey = e.metaKey || e.ctrlKey || e.altKey; - - const interruptButton = get_uiCurrentTabContent().querySelector('button[id$=_interrupt]'); - const generateButton = get_uiCurrentTabContent().querySelector('button[id$=_generate]'); - - if (isEnter && isModifierKey) { - if (interruptButton.style.display === 'block') { - interruptButton.click(); - setTimeout(function() { - generateButton.click(); - }, 500); - } else { - generateButton.click(); - } - e.preventDefault(); - } -}); - -/** - * checks that a UI element is not in another hidden element or tab content - */ -function uiElementIsVisible(el) { - if (el === document) { - return true; - } - - const computedStyle = getComputedStyle(el); - const isVisible = computedStyle.display !== 'none'; - - if (!isVisible) return false; - return uiElementIsVisible(el.parentNode); -} - -function uiElementInSight(el) { - const clRect = el.getBoundingClientRect(); - const windowHeight = window.innerHeight; - const isOnScreen = clRect.bottom > 0 && clRect.top < windowHeight; - - return isOnScreen; -} +function gradioApp() { + const elems = document.getElementsByTagName('gradio-app'); + const elem = elems.length == 0 ? document : elems[0]; + + if (elem !== document) { + elem.getElementById = function(id) { + return document.getElementById(id); + }; + } + return elem.shadowRoot ? elem.shadowRoot : elem; +} + +/** + * Get the currently selected top-level UI tab button (e.g. the button that says "Extras"). + */ +function get_uiCurrentTab() { + return gradioApp().querySelector('#tabs > .tab-nav > button.selected'); +} + +/** + * Get the first currently visible top-level UI tab content (e.g. the div hosting the "txt2img" UI). + */ +function get_uiCurrentTabContent() { + return gradioApp().querySelector('#tabs > .tabitem[id^=tab_]:not([style*="display: none"])'); +} + +var uiUpdateCallbacks = []; +var uiAfterUpdateCallbacks = []; +var uiLoadedCallbacks = []; +var uiTabChangeCallbacks = []; +var optionsChangedCallbacks = []; +var uiAfterUpdateTimeout = null; +var uiCurrentTab = null; + +/** + * Register callback to be called at each UI update. + * The callback receives an array of MutationRecords as an argument. + */ +function onUiUpdate(callback) { + uiUpdateCallbacks.push(callback); +} + +/** + * Register callback to be called soon after UI updates. + * The callback receives no arguments. + * + * This is preferred over `onUiUpdate` if you don't need + * access to the MutationRecords, as your function will + * not be called quite as often. + */ +function onAfterUiUpdate(callback) { + uiAfterUpdateCallbacks.push(callback); +} + +/** + * Register callback to be called when the UI is loaded. + * The callback receives no arguments. + */ +function onUiLoaded(callback) { + uiLoadedCallbacks.push(callback); +} + +/** + * Register callback to be called when the UI tab is changed. + * The callback receives no arguments. + */ +function onUiTabChange(callback) { + uiTabChangeCallbacks.push(callback); +} + +/** + * Register callback to be called when the options are changed. + * The callback receives no arguments. + * @param callback + */ +function onOptionsChanged(callback) { + optionsChangedCallbacks.push(callback); +} + +function executeCallbacks(queue, arg) { + for (const callback of queue) { + try { + callback(arg); + } catch (e) { + console.error("error running callback", callback, ":", e); + } + } +} + +/** + * Schedule the execution of the callbacks registered with onAfterUiUpdate. + * The callbacks are executed after a short while, unless another call to this function + * is made before that time. IOW, the callbacks are executed only once, even + * when there are multiple mutations observed. + */ +function scheduleAfterUiUpdateCallbacks() { + clearTimeout(uiAfterUpdateTimeout); + uiAfterUpdateTimeout = setTimeout(function() { + executeCallbacks(uiAfterUpdateCallbacks); + }, 200); +} + +var executedOnLoaded = false; + +document.addEventListener("DOMContentLoaded", function() { + var mutationObserver = new MutationObserver(function(m) { + if (!executedOnLoaded && gradioApp().querySelector('#txt2img_prompt')) { + executedOnLoaded = true; + executeCallbacks(uiLoadedCallbacks); + } + + executeCallbacks(uiUpdateCallbacks, m); + scheduleAfterUiUpdateCallbacks(); + const newTab = get_uiCurrentTab(); + if (newTab && (newTab !== uiCurrentTab)) { + uiCurrentTab = newTab; + executeCallbacks(uiTabChangeCallbacks); + } + }); + mutationObserver.observe(gradioApp(), {childList: true, subtree: true}); +}); + +/** + * Add a ctrl+enter as a shortcut to start a generation + */ +document.addEventListener('keydown', function(e) { + const isEnter = e.key === 'Enter' || e.keyCode === 13; + const isModifierKey = e.metaKey || e.ctrlKey || e.altKey; + + const interruptButton = get_uiCurrentTabContent().querySelector('button[id$=_interrupt]'); + const generateButton = get_uiCurrentTabContent().querySelector('button[id$=_generate]'); + + if (isEnter && isModifierKey) { + if (interruptButton.style.display === 'block') { + interruptButton.click(); + setTimeout(function() { + generateButton.click(); + }, 500); + } else { + generateButton.click(); + } + e.preventDefault(); + } +}); + +/** + * checks that a UI element is not in another hidden element or tab content + */ +function uiElementIsVisible(el) { + if (el === document) { + return true; + } + + const computedStyle = getComputedStyle(el); + const isVisible = computedStyle.display !== 'none'; + + if (!isVisible) return false; + return uiElementIsVisible(el.parentNode); +} + +function uiElementInSight(el) { + const clRect = el.getBoundingClientRect(); + const windowHeight = window.innerHeight; + const isOnScreen = clRect.bottom > 0 && clRect.top < windowHeight; + + return isOnScreen; +} -- cgit v1.2.3 From ec718f76b58b183859ed732e11ec748c41a13f76 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Tue, 17 Oct 2023 23:35:50 -0700 Subject: wip incorrect OFT implementation --- extensions-builtin/Lora/network_oft.py | 82 ++++++++++++++++++++++++++++++++++ extensions-builtin/Lora/networks.py | 5 +++ 2 files changed, 87 insertions(+) create mode 100644 extensions-builtin/Lora/network_oft.py diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py new file mode 100644 index 00000000..9ddb175c --- /dev/null +++ b/extensions-builtin/Lora/network_oft.py @@ -0,0 +1,82 @@ +import torch +import network + + +class ModuleTypeOFT(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["oft_blocks"]): + return NetworkModuleOFT(net, weights) + + return None + +# adapted from https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py +class NetworkModuleOFT(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + + self.oft_blocks = weights.w["oft_blocks"] + self.alpha = weights.w["alpha"] + + self.dim = self.oft_blocks.shape[0] + self.num_blocks = self.dim + + #if type(self.alpha) == torch.Tensor: + # self.alpha = self.alpha.detach().numpy() + + if "Linear" in self.sd_module.__class__.__name__: + self.out_dim = self.sd_module.out_features + elif "Conv" in self.sd_module.__class__.__name__: + self.out_dim = self.sd_module.out_channels + + self.constraint = self.alpha * self.out_dim + self.block_size = self.out_dim // self.num_blocks + + self.oft_multiplier = self.multiplier() + + # replace forward method of original linear rather than replacing the module + # self.org_forward = self.sd_module.forward + # self.sd_module.forward = self.forward + + def get_weight(self): + block_Q = self.oft_blocks - self.oft_blocks.transpose(1, 2) + norm_Q = torch.norm(block_Q.flatten()) + new_norm_Q = torch.clamp(norm_Q, max=self.constraint) + block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) + I = torch.eye(self.block_size, device=self.oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) + block_R = torch.matmul(I + block_Q, (I - block_Q).inverse()) + + block_R_weighted = self.oft_multiplier * block_R + (1 - self.oft_multiplier) * I + R = torch.block_diag(*block_R_weighted) + + return R + + def calc_updown(self, orig_weight): + oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + block_Q = oft_blocks - oft_blocks.transpose(1, 2) + norm_Q = torch.norm(block_Q.flatten()) + new_norm_Q = torch.clamp(norm_Q, max=self.constraint) + block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) + I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) + block_R = torch.matmul(I + block_Q, (I - block_Q).inverse()) + + block_R_weighted = self.oft_multiplier * block_R + (1 - self.oft_multiplier) * I + R = torch.block_diag(*block_R_weighted) + #R = self.get_weight().to(orig_weight.device, dtype=orig_weight.dtype) + # W = R*W_0 + updown = orig_weight + R + output_shape = [R.size(0), orig_weight.size(1)] + return self.finalize_updown(updown, orig_weight, output_shape) + + # def forward(self, x, y=None): + # x = self.org_forward(x) + # if self.oft_multiplier == 0.0: + # return x + + # R = self.get_weight().to(x.device, dtype=x.dtype) + # if x.dim() == 4: + # x = x.permute(0, 2, 3, 1) + # x = torch.matmul(x, R) + # x = x.permute(0, 3, 1, 2) + # else: + # x = torch.matmul(x, R) + # return x diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 60d8dec4..bd1f1b75 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -11,6 +11,7 @@ import network_ia3 import network_lokr import network_full import network_norm +import network_oft import torch from typing import Union @@ -28,6 +29,7 @@ module_types = [ network_full.ModuleTypeFull(), network_norm.ModuleTypeNorm(), network_glora.ModuleTypeGLora(), + network_oft.ModuleTypeOFT(), ] @@ -183,6 +185,9 @@ def load_network(name, network_on_disk): elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts: key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model") sd_module = shared.sd_model.network_layer_mapping.get(key, None) + elif sd_module is None and "oft_unet" in key_network_without_network_parts: + key = key_network_without_network_parts.replace("oft_unet", "diffusion_model") + sd_module = shared.sd_model.network_layer_mapping.get(key, None) # some SD1 Loras also have correct compvis keys if sd_module is None: -- cgit v1.2.3 From 1c6efdbba774d603c592debaccd6f5ad827bd1b2 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 18 Oct 2023 04:16:01 -0700 Subject: inference working but SLOW --- extensions-builtin/Lora/network_oft.py | 73 +++++++++++++++++----------------- extensions-builtin/Lora/networks.py | 42 +++++++++++++++++-- 2 files changed, 75 insertions(+), 40 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 9ddb175c..f085eca5 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -12,6 +12,7 @@ class ModuleTypeOFT(network.ModuleType): # adapted from https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py class NetworkModuleOFT(network.NetworkModule): def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) self.oft_blocks = weights.w["oft_blocks"] @@ -20,24 +21,29 @@ class NetworkModuleOFT(network.NetworkModule): self.dim = self.oft_blocks.shape[0] self.num_blocks = self.dim - #if type(self.alpha) == torch.Tensor: - # self.alpha = self.alpha.detach().numpy() - if "Linear" in self.sd_module.__class__.__name__: self.out_dim = self.sd_module.out_features elif "Conv" in self.sd_module.__class__.__name__: self.out_dim = self.sd_module.out_channels - self.constraint = self.alpha * self.out_dim + self.constraint = self.alpha + #self.constraint = self.alpha * self.out_dim self.block_size = self.out_dim // self.num_blocks - self.oft_multiplier = self.multiplier() + self.org_module: list[torch.Module] = [self.sd_module] + + self.R = self.get_weight() - # replace forward method of original linear rather than replacing the module - # self.org_forward = self.sd_module.forward - # self.sd_module.forward = self.forward + self.apply_to() + + # replace forward method of original linear rather than replacing the module + def apply_to(self): + self.org_forward = self.org_module[0].forward + self.org_module[0].forward = self.forward - def get_weight(self): + def get_weight(self, multiplier=None): + if not multiplier: + multiplier = self.multiplier() block_Q = self.oft_blocks - self.oft_blocks.transpose(1, 2) norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=self.constraint) @@ -45,38 +51,31 @@ class NetworkModuleOFT(network.NetworkModule): I = torch.eye(self.block_size, device=self.oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) block_R = torch.matmul(I + block_Q, (I - block_Q).inverse()) - block_R_weighted = self.oft_multiplier * block_R + (1 - self.oft_multiplier) * I + block_R_weighted = multiplier * block_R + (1 - multiplier) * I R = torch.block_diag(*block_R_weighted) return R def calc_updown(self, orig_weight): - oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - block_Q = oft_blocks - oft_blocks.transpose(1, 2) - norm_Q = torch.norm(block_Q.flatten()) - new_norm_Q = torch.clamp(norm_Q, max=self.constraint) - block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) - I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) - block_R = torch.matmul(I + block_Q, (I - block_Q).inverse()) - - block_R_weighted = self.oft_multiplier * block_R + (1 - self.oft_multiplier) * I - R = torch.block_diag(*block_R_weighted) - #R = self.get_weight().to(orig_weight.device, dtype=orig_weight.dtype) - # W = R*W_0 - updown = orig_weight + R - output_shape = [R.size(0), orig_weight.size(1)] + R = self.R + if orig_weight.dim() == 4: + weight = torch.einsum("oihw, op -> pihw", orig_weight, R) + else: + weight = torch.einsum("oi, op -> pi", orig_weight, R) + updown = orig_weight @ R + output_shape = [orig_weight.size(0), R.size(1)] + #output_shape = [R.size(0), orig_weight.size(1)] return self.finalize_updown(updown, orig_weight, output_shape) - # def forward(self, x, y=None): - # x = self.org_forward(x) - # if self.oft_multiplier == 0.0: - # return x - - # R = self.get_weight().to(x.device, dtype=x.dtype) - # if x.dim() == 4: - # x = x.permute(0, 2, 3, 1) - # x = torch.matmul(x, R) - # x = x.permute(0, 3, 1, 2) - # else: - # x = torch.matmul(x, R) - # return x + def forward(self, x, y=None): + x = self.org_forward(x) + if self.multiplier() == 0.0: + return x + R = self.get_weight().to(x.device, dtype=x.dtype) + if x.dim() == 4: + x = x.permute(0, 2, 3, 1) + x = torch.matmul(x, R) + x = x.permute(0, 3, 1, 2) + else: + x = torch.matmul(x, R) + return x diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index bd1f1b75..e5e73450 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -169,6 +169,10 @@ def load_network(name, network_on_disk): else: emb_dict[vec_name] = weight bundle_embeddings[emb_name] = emb_dict + + #if key_network_without_network_parts == "oft_unet": + # print(key_network_without_network_parts) + # pass key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2) sd_module = shared.sd_model.network_layer_mapping.get(key, None) @@ -185,15 +189,39 @@ def load_network(name, network_on_disk): elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts: key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model") sd_module = shared.sd_model.network_layer_mapping.get(key, None) - elif sd_module is None and "oft_unet" in key_network_without_network_parts: - key = key_network_without_network_parts.replace("oft_unet", "diffusion_model") - sd_module = shared.sd_model.network_layer_mapping.get(key, None) # some SD1 Loras also have correct compvis keys if sd_module is None: key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model") sd_module = shared.sd_model.network_layer_mapping.get(key, None) + elif sd_module is None and "oft_unet" in key_network_without_network_parts: + # UNET_TARGET_REPLACE_MODULE_ALL_LINEAR = ["Transformer2DModel"] + # UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"] + UNET_TARGET_REPLACE_MODULE_ATTN_ONLY = ["CrossAttention"] + # TODO: Change matchedm odules based on whether all linear, conv, etc + + key = key_network_without_network_parts.replace("oft_unet", "diffusion_model") + sd_module = shared.sd_model.network_layer_mapping.get(key, None) + #key_no_suffix = key.rsplit("_to_", 1)[0] + ## Match all modules of class CrossAttention + #replace_module_list = [] + #for module_type in UNET_TARGET_REPLACE_MODULE_ATTN_ONLY: + # replace_module_list += [module for k, module in shared.sd_model.network_layer_mapping.items() if module_type in module.__class__.__name__] + + #matched_module = replace_module_list.get(key_no_suffix, None) + #if key.endswith('to_q'): + # sd_module = matched_module.to_q or None + #if key.endswith('to_k'): + # sd_module = matched_module.to_k or None + #if key.endswith('to_v'): + # sd_module = matched_module.to_v or None + #if key.endswith('to_out_0'): + # sd_module = matched_module.to_out[0] or None + #if key.endswith('to_out_1'): + # sd_module = matched_module.to_out[1] or None + + if sd_module is None: keys_failed_to_match[key_network] = key continue @@ -214,6 +242,14 @@ def load_network(name, network_on_disk): raise AssertionError(f"Could not find a module type (out of {', '.join([x.__class__.__name__ for x in module_types])}) that would accept those keys: {', '.join(weights.w)}") net.modules[key] = net_module + + # replaces forward method of original Linear + # applied_to_count = 0 + #for key, created_module in net.modules.items(): + # if isinstance(created_module, network_oft.NetworkModuleOFT): + # net_module.apply_to() + #applied_to_count += 1 + # print(f'Applied OFT modules: {applied_to_count}') embeddings = {} for emb_name, data in bundle_embeddings.items(): -- cgit v1.2.3 From 853e21d98eada4db9a9fd1ae8eda90cf763e2818 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 18 Oct 2023 04:27:44 -0700 Subject: faster by using cached R in forward --- extensions-builtin/Lora/network_oft.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index f085eca5..68efb1db 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -57,21 +57,32 @@ class NetworkModuleOFT(network.NetworkModule): return R def calc_updown(self, orig_weight): + # this works R = self.R + + # this causes major deepfrying i.e. just doesn't work + # R = self.R.to(orig_weight.device, dtype=orig_weight.dtype) + if orig_weight.dim() == 4: weight = torch.einsum("oihw, op -> pihw", orig_weight, R) else: weight = torch.einsum("oi, op -> pi", orig_weight, R) + updown = orig_weight @ R - output_shape = [orig_weight.size(0), R.size(1)] - #output_shape = [R.size(0), orig_weight.size(1)] + output_shape = self.oft_blocks.shape + + ## this works + # updown = orig_weight @ R + # output_shape = [orig_weight.size(0), R.size(1)] + return self.finalize_updown(updown, orig_weight, output_shape) def forward(self, x, y=None): x = self.org_forward(x) if self.multiplier() == 0.0: return x - R = self.get_weight().to(x.device, dtype=x.dtype) + #R = self.get_weight().to(x.device, dtype=x.dtype) + R = self.R.to(x.device, dtype=x.dtype) if x.dim() == 4: x = x.permute(0, 2, 3, 1) x = torch.matmul(x, R) -- cgit v1.2.3 From eb01d7f0e0fb46285985803296a25715165fb3f9 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 18 Oct 2023 04:56:53 -0700 Subject: faster by calculating R in updown and using cached R in forward --- extensions-builtin/Lora/network_oft.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 68efb1db..fd5b0c0f 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -58,17 +58,18 @@ class NetworkModuleOFT(network.NetworkModule): def calc_updown(self, orig_weight): # this works - R = self.R + # R = self.R + self.R = self.get_weight(self.multiplier()) - # this causes major deepfrying i.e. just doesn't work + # sending R to device causes major deepfrying i.e. just doesn't work # R = self.R.to(orig_weight.device, dtype=orig_weight.dtype) - if orig_weight.dim() == 4: - weight = torch.einsum("oihw, op -> pihw", orig_weight, R) - else: - weight = torch.einsum("oi, op -> pi", orig_weight, R) + # if orig_weight.dim() == 4: + # weight = torch.einsum("oihw, op -> pihw", orig_weight, R) + # else: + # weight = torch.einsum("oi, op -> pi", orig_weight, R) - updown = orig_weight @ R + updown = orig_weight @ self.R output_shape = self.oft_blocks.shape ## this works -- cgit v1.2.3 From 321680ccd0e0404223fbdf4f26498f7d0317fb75 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 19 Oct 2023 12:41:17 -0700 Subject: refactor: fix constraint, re-use get_weight --- extensions-builtin/Lora/network_oft.py | 40 ++++++++++++++-------------------- 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index fd5b0c0f..2af1bc4c 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -9,7 +9,7 @@ class ModuleTypeOFT(network.ModuleType): return None -# adapted from https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py +# adapted from kohya's implementation https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py class NetworkModuleOFT(network.NetworkModule): def __init__(self, net: network.Network, weights: network.NetworkWeights): @@ -17,7 +17,6 @@ class NetworkModuleOFT(network.NetworkModule): self.oft_blocks = weights.w["oft_blocks"] self.alpha = weights.w["alpha"] - self.dim = self.oft_blocks.shape[0] self.num_blocks = self.dim @@ -26,64 +25,57 @@ class NetworkModuleOFT(network.NetworkModule): elif "Conv" in self.sd_module.__class__.__name__: self.out_dim = self.sd_module.out_channels - self.constraint = self.alpha - #self.constraint = self.alpha * self.out_dim + self.constraint = self.alpha * self.out_dim self.block_size = self.out_dim // self.num_blocks self.org_module: list[torch.Module] = [self.sd_module] - - self.R = self.get_weight() - + self.R = self.get_weight(self.oft_blocks) self.apply_to() # replace forward method of original linear rather than replacing the module + # how do we revert this to unload the weights? def apply_to(self): self.org_forward = self.org_module[0].forward self.org_module[0].forward = self.forward - def get_weight(self, multiplier=None): - if not multiplier: - multiplier = self.multiplier() - block_Q = self.oft_blocks - self.oft_blocks.transpose(1, 2) + def get_weight(self, oft_blocks, multiplier=None): + block_Q = oft_blocks - oft_blocks.transpose(1, 2) norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=self.constraint) block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) I = torch.eye(self.block_size, device=self.oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) block_R = torch.matmul(I + block_Q, (I - block_Q).inverse()) - - block_R_weighted = multiplier * block_R + (1 - multiplier) * I - R = torch.block_diag(*block_R_weighted) + #block_R_weighted = multiplier * block_R + (1 - multiplier) * I + #R = torch.block_diag(*block_R_weighted) + R = torch.block_diag(*block_R) return R def calc_updown(self, orig_weight): - # this works - # R = self.R - self.R = self.get_weight(self.multiplier()) + oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - # sending R to device causes major deepfrying i.e. just doesn't work - # R = self.R.to(orig_weight.device, dtype=orig_weight.dtype) + R = self.get_weight(oft_blocks) + self.R = R # if orig_weight.dim() == 4: # weight = torch.einsum("oihw, op -> pihw", orig_weight, R) # else: # weight = torch.einsum("oi, op -> pi", orig_weight, R) - updown = orig_weight @ self.R + updown = orig_weight @ R output_shape = self.oft_blocks.shape - ## this works - # updown = orig_weight @ R - # output_shape = [orig_weight.size(0), R.size(1)] - return self.finalize_updown(updown, orig_weight, output_shape) def forward(self, x, y=None): x = self.org_forward(x) if self.multiplier() == 0.0: return x + + # calculating R here is excruciatingly slow #R = self.get_weight().to(x.device, dtype=x.dtype) R = self.R.to(x.device, dtype=x.dtype) + if x.dim() == 4: x = x.permute(0, 2, 3, 1) x = torch.matmul(x, R) -- cgit v1.2.3 From d10c4db57ed08234a7aed5f530f269ff78544ab0 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 19 Oct 2023 12:52:14 -0700 Subject: style: formatting --- extensions-builtin/Lora/network_oft.py | 4 ++-- extensions-builtin/Lora/networks.py | 35 ---------------------------------- 2 files changed, 2 insertions(+), 37 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 2af1bc4c..0a87958e 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -37,7 +37,7 @@ class NetworkModuleOFT(network.NetworkModule): def apply_to(self): self.org_forward = self.org_module[0].forward self.org_module[0].forward = self.forward - + def get_weight(self, oft_blocks, multiplier=None): block_Q = oft_blocks - oft_blocks.transpose(1, 2) norm_Q = torch.norm(block_Q.flatten()) @@ -66,7 +66,7 @@ class NetworkModuleOFT(network.NetworkModule): output_shape = self.oft_blocks.shape return self.finalize_updown(updown, orig_weight, output_shape) - + def forward(self, x, y=None): x = self.org_forward(x) if self.multiplier() == 0.0: diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index e5e73450..78a97033 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -169,10 +169,6 @@ def load_network(name, network_on_disk): else: emb_dict[vec_name] = weight bundle_embeddings[emb_name] = emb_dict - - #if key_network_without_network_parts == "oft_unet": - # print(key_network_without_network_parts) - # pass key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2) sd_module = shared.sd_model.network_layer_mapping.get(key, None) @@ -196,31 +192,8 @@ def load_network(name, network_on_disk): sd_module = shared.sd_model.network_layer_mapping.get(key, None) elif sd_module is None and "oft_unet" in key_network_without_network_parts: - # UNET_TARGET_REPLACE_MODULE_ALL_LINEAR = ["Transformer2DModel"] - # UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"] - UNET_TARGET_REPLACE_MODULE_ATTN_ONLY = ["CrossAttention"] - # TODO: Change matchedm odules based on whether all linear, conv, etc - key = key_network_without_network_parts.replace("oft_unet", "diffusion_model") sd_module = shared.sd_model.network_layer_mapping.get(key, None) - #key_no_suffix = key.rsplit("_to_", 1)[0] - ## Match all modules of class CrossAttention - #replace_module_list = [] - #for module_type in UNET_TARGET_REPLACE_MODULE_ATTN_ONLY: - # replace_module_list += [module for k, module in shared.sd_model.network_layer_mapping.items() if module_type in module.__class__.__name__] - - #matched_module = replace_module_list.get(key_no_suffix, None) - #if key.endswith('to_q'): - # sd_module = matched_module.to_q or None - #if key.endswith('to_k'): - # sd_module = matched_module.to_k or None - #if key.endswith('to_v'): - # sd_module = matched_module.to_v or None - #if key.endswith('to_out_0'): - # sd_module = matched_module.to_out[0] or None - #if key.endswith('to_out_1'): - # sd_module = matched_module.to_out[1] or None - if sd_module is None: keys_failed_to_match[key_network] = key @@ -242,14 +215,6 @@ def load_network(name, network_on_disk): raise AssertionError(f"Could not find a module type (out of {', '.join([x.__class__.__name__ for x in module_types])}) that would accept those keys: {', '.join(weights.w)}") net.modules[key] = net_module - - # replaces forward method of original Linear - # applied_to_count = 0 - #for key, created_module in net.modules.items(): - # if isinstance(created_module, network_oft.NetworkModuleOFT): - # net_module.apply_to() - #applied_to_count += 1 - # print(f'Applied OFT modules: {applied_to_count}') embeddings = {} for emb_name, data in bundle_embeddings.items(): -- cgit v1.2.3 From 0550659ce6e1c37d1ab05cb8a2cb31d499fa552f Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 19 Oct 2023 13:13:02 -0700 Subject: style: fix ambiguous variable name --- extensions-builtin/Lora/network_oft.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 0a87958e..4e8382c1 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -43,8 +43,8 @@ class NetworkModuleOFT(network.NetworkModule): norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=self.constraint) block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) - I = torch.eye(self.block_size, device=self.oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) - block_R = torch.matmul(I + block_Q, (I - block_Q).inverse()) + m_I = torch.eye(self.block_size, device=self.oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) + block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) #block_R_weighted = multiplier * block_R + (1 - multiplier) * I #R = torch.block_diag(*block_R_weighted) R = torch.block_diag(*block_R) -- cgit v1.2.3 From 384fab9627942dc7a5771368180bab9cfe0c2877 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 21 Oct 2023 08:45:51 +0300 Subject: rework some of changes for emphasis editing keys, force conversion of old-style emphasis --- javascript/edit-attention.js | 101 ++++++++++++++++++++----------------------- modules/shared_options.py | 3 +- 2 files changed, 47 insertions(+), 57 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 45d9a788..f3af9a4c 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -26,10 +26,15 @@ function keyupEditAttention(event) { // Set the selection to the text between the parenthesis const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); - if (!/.*:-?[\d.]+/s.test(parenContent)) return false; - const lastColon = parenContent.lastIndexOf(":"); - selectionStart = beforeParen + 1; - selectionEnd = selectionStart + lastColon; + if (/.*:-?[\d.]+/s.test(parenContent)) { + const lastColon = parenContent.lastIndexOf(":"); + selectionStart = beforeParen + 1; + selectionEnd = selectionStart + lastColon; + } else { + selectionStart = beforeParen + 1; + selectionEnd = selectionStart + parenContent.length; + } + target.setSelectionRange(selectionStart, selectionEnd); return true; } @@ -58,7 +63,7 @@ function keyupEditAttention(event) { } // If the user hasn't selected anything, let's select their current parenthesis block or word - if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')')) { + if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')') && !selectCurrentParenthesisBlock('[', ']')) { selectCurrentWord(); } @@ -66,44 +71,31 @@ function keyupEditAttention(event) { var closeCharacter = ')'; var delta = opts.keyedit_precision_attention; + var start = selectionStart > 0 ? text[selectionStart - 1] : ""; + var end = text[selectionEnd]; - if (selectionStart > 0 && /<.*:-?[\d.]+>/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(">") + 1))) { + if (start == '<') { closeCharacter = '>'; delta = opts.keyedit_precision_extra; - } else if (selectionStart > 0 && /\(.*\)|\[.*\]/s.test(text.slice(selectionStart - 1, selectionEnd + 1))) { - let start = text[selectionStart - 1]; - let end = text[selectionEnd]; - if (opts.keyedit_convert) { - let numParen = 0; - - while (text[selectionStart - numParen - 1] == start && text[selectionEnd + numParen] == end) { - numParen++; - } - - if (start == "(") { - weight = 1.1 ** numParen; - } else { - weight = (1 / 1.1) ** numParen; - } - - weight = Math.round(weight / opts.keyedit_precision_attention) * opts.keyedit_precision_attention; - - text = text.slice(0, selectionStart - numParen) + "(" + text.slice(selectionStart, selectionEnd) + ":" + weight + ")" + text.slice(selectionEnd + numParen); - selectionStart -= numParen - 1; - selectionEnd -= numParen - 1; + } else if (start == '(' && end == ')' || start == '[' && end == ']') { // convert old-style (((emphasis))) + let numParen = 0; + + while (text[selectionStart - numParen - 1] == start && text[selectionEnd + numParen] == end) { + numParen++; + } + + if (start == "[") { + weight = (1 / 1.1) ** numParen; } else { - closeCharacter = null; - if (isPlus) { - text = text.slice(0, selectionStart) + start + text.slice(selectionStart, selectionEnd) + end + text.slice(selectionEnd); - selectionStart++; - selectionEnd++; - } else { - text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 1); - selectionStart--; - selectionEnd--; - } + weight = 1.1 ** numParen; } - } else if (selectionStart == 0 || !/\(.*:-?[\d.]+\)/s.test(text.slice(selectionStart - 1, selectionEnd + text.slice(selectionEnd).indexOf(")") + 1))) { + + weight = Math.round(weight / opts.keyedit_precision_attention) * opts.keyedit_precision_attention; + + text = text.slice(0, selectionStart - numParen) + "(" + text.slice(selectionStart, selectionEnd) + ":" + weight + ")" + text.slice(selectionEnd + numParen); + selectionStart -= numParen - 1; + selectionEnd -= numParen - 1; + } else if (start != '(') { // do not include spaces at the end while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') { selectionEnd--; @@ -119,23 +111,22 @@ function keyupEditAttention(event) { selectionEnd++; } - if (closeCharacter) { - var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; - var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + end)); - if (isNaN(weight)) return; - - weight += isPlus ? delta : -delta; - weight = parseFloat(weight.toPrecision(12)); - if (Number.isInteger(weight)) weight += ".0"; - - if (closeCharacter == ')' && weight == 1) { - var endParenPos = text.substring(selectionEnd).indexOf(')'); - text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); - selectionStart--; - selectionEnd--; - } else { - text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end); - } + if (text[selectionEnd] != ':') return; + var weightLength = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; + var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + weightLength)); + if (isNaN(weight)) return; + + weight += isPlus ? delta : -delta; + weight = parseFloat(weight.toPrecision(12)); + if (Number.isInteger(weight)) weight += ".0"; + + if (closeCharacter == ')' && weight == 1) { + var endParenPos = text.substring(selectionEnd).indexOf(')'); + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); + selectionStart--; + selectionEnd--; + } else { + text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + weightLength); } target.focus(); diff --git a/modules/shared_options.py b/modules/shared_options.py index 32bf7353..0a82216f 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -259,9 +259,8 @@ options_templates.update(options_section(('ui', "User interface"), { "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~()[]<>| ", "Ctrl+up/down word delimiters"), + "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Ctrl+up/down word delimiters"), "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), - "keyedit_convert": OptionInfo(True, "Convert (attention) to (attention:1.1)"), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), -- cgit v1.2.3 From 464fbcd92118bf00173b9982325fe6348201313e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 21 Oct 2023 09:09:32 +0300 Subject: fix the situation with emphasis editing (aaaa:1.1) bbbb (cccc:1.1) --- javascript/edit-attention.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index f3af9a4c..04464100 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -19,11 +19,17 @@ function keyupEditAttention(event) { let beforeParen = before.lastIndexOf(OPEN); if (beforeParen == -1) return false; + let beforeClosingParen = before.lastIndexOf(CLOSE); + if (beforeClosingParen != -1 && beforeClosingParen > beforeParen) return false; + // Find closing parenthesis around current cursor const after = text.substring(selectionStart); let afterParen = after.indexOf(CLOSE); if (afterParen == -1) return false; + let afterOpeningParen = after.indexOf(OPEN); + if (afterOpeningParen != -1 && afterOpeningParen < beforeParen) return false; + // Set the selection to the text between the parenthesis const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); if (/.*:-?[\d.]+/s.test(parenContent)) { -- cgit v1.2.3 From 443ca983ade333721930ea2f18f80b45762e2aea Mon Sep 17 00:00:00 2001 From: avantcontra Date: Sun, 22 Oct 2023 03:21:23 +0800 Subject: fix bug when using --gfpgan-models-path --- modules/gfpgan_model.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index 8e0f13bd..93567253 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -9,6 +9,7 @@ from modules import paths, shared, devices, modelloader, errors model_dir = "GFPGAN" user_path = None model_path = os.path.join(paths.models_path, model_dir) +model_file_path = None model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" have_gfpgan = False loaded_gfpgan_model = None @@ -17,24 +18,32 @@ loaded_gfpgan_model = None def gfpgann(): global loaded_gfpgan_model global model_path + global model_file_path if loaded_gfpgan_model is not None: loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan) return loaded_gfpgan_model if gfpgan_constructor is None: return None + + models = modelloader.load_models(model_path, model_url, user_path, ext_filter=['.pth']) - models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN") if len(models) == 1 and models[0].startswith("http"): model_file = models[0] elif len(models) != 0: - latest_file = max(models, key=os.path.getctime) + gfp_models = [] + for item in models: + if 'GFPGAN' in os.path.basename(item): + gfp_models.append(item) + latest_file = max(gfp_models, key=os.path.getctime) model_file = latest_file else: print("Unable to load gfpgan model!") return None + if hasattr(facexlib.detection.retinaface, 'device'): facexlib.detection.retinaface.device = devices.device_gfpgan + model_file_path = model_file model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan) loaded_gfpgan_model = model @@ -77,19 +86,25 @@ def setup_model(dirname): global user_path global have_gfpgan global gfpgan_constructor + global model_file_path + + facexlib_path = model_path + + if dirname is not None: + facexlib_path = dirname load_file_from_url_orig = gfpgan.utils.load_file_from_url facex_load_file_from_url_orig = facexlib.detection.load_file_from_url facex_load_file_from_url_orig2 = facexlib.parsing.load_file_from_url def my_load_file_from_url(**kwargs): - return load_file_from_url_orig(**dict(kwargs, model_dir=model_path)) + return load_file_from_url_orig(**dict(kwargs, model_dir=model_file_path)) def facex_load_file_from_url(**kwargs): - return facex_load_file_from_url_orig(**dict(kwargs, save_dir=model_path, model_dir=None)) + return facex_load_file_from_url_orig(**dict(kwargs, save_dir=facexlib_path, model_dir=None)) def facex_load_file_from_url2(**kwargs): - return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=model_path, model_dir=None)) + return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=facexlib_path, model_dir=None)) gfpgan.utils.load_file_from_url = my_load_file_from_url facexlib.detection.load_file_from_url = facex_load_file_from_url -- cgit v1.2.3 From 236dd55dbe895ba72a64567482ee67ab680c5344 Mon Sep 17 00:00:00 2001 From: avantcontra Date: Sun, 22 Oct 2023 04:32:13 +0800 Subject: fix Blank line contains whitespace --- modules/gfpgan_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index 93567253..01d668ec 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -25,7 +25,7 @@ def gfpgann(): if gfpgan_constructor is None: return None - + models = modelloader.load_models(model_path, model_url, user_path, ext_filter=['.pth']) if len(models) == 1 and models[0].startswith("http"): -- cgit v1.2.3 From 2d8c894b274d60a3e3563a2ace23c4ebcea9e652 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sat, 21 Oct 2023 13:43:31 -0700 Subject: refactor: use forward hook instead of custom forward --- extensions-builtin/Lora/network_oft.py | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 4e8382c1..8e561ab0 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -36,9 +36,11 @@ class NetworkModuleOFT(network.NetworkModule): # how do we revert this to unload the weights? def apply_to(self): self.org_forward = self.org_module[0].forward - self.org_module[0].forward = self.forward + #self.org_module[0].forward = self.forward + self.org_module[0].register_forward_hook(self.forward_hook) def get_weight(self, oft_blocks, multiplier=None): + self.constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) block_Q = oft_blocks - oft_blocks.transpose(1, 2) norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=self.constraint) @@ -66,14 +68,10 @@ class NetworkModuleOFT(network.NetworkModule): output_shape = self.oft_blocks.shape return self.finalize_updown(updown, orig_weight, output_shape) - - def forward(self, x, y=None): - x = self.org_forward(x) - if self.multiplier() == 0.0: - return x - - # calculating R here is excruciatingly slow - #R = self.get_weight().to(x.device, dtype=x.dtype) + + def forward_hook(self, module, args, output): + #print(f'Forward hook in {self.network_key} called') + x = output R = self.R.to(x.device, dtype=x.dtype) if x.dim() == 4: @@ -83,3 +81,20 @@ class NetworkModuleOFT(network.NetworkModule): else: x = torch.matmul(x, R) return x + + # def forward(self, x, y=None): + # x = self.org_forward(x) + # if self.multiplier() == 0.0: + # return x + + # # calculating R here is excruciatingly slow + # #R = self.get_weight().to(x.device, dtype=x.dtype) + # R = self.R.to(x.device, dtype=x.dtype) + + # if x.dim() == 4: + # x = x.permute(0, 2, 3, 1) + # x = torch.matmul(x, R) + # x = x.permute(0, 3, 1, 2) + # else: + # x = torch.matmul(x, R) + # return x -- cgit v1.2.3 From 768354772853a1d27a9bf7e41bd6a6e4eac7a9c7 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sat, 21 Oct 2023 14:42:24 -0700 Subject: fix: return orig weights during updown, merge weights before forward --- extensions-builtin/Lora/network_oft.py | 90 ++++++++++++++++++++++++++-------- 1 file changed, 69 insertions(+), 21 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 8e561ab0..f5f32c23 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,5 +1,6 @@ import torch import network +from modules import devices class ModuleTypeOFT(network.ModuleType): @@ -29,23 +30,56 @@ class NetworkModuleOFT(network.NetworkModule): self.block_size = self.out_dim // self.num_blocks self.org_module: list[torch.Module] = [self.sd_module] + self.org_weight = self.org_module[0].weight.to(self.org_module[0].weight.device, copy=True) + #self.org_weight = self.org_module[0].weight.to(devices.cpu, copy=True) self.R = self.get_weight(self.oft_blocks) + + self.merged_weight = self.merge_weight() self.apply_to() + self.merged = False + + + def merge_weight(self): + org_sd = self.org_module[0].state_dict() + R = self.R.to(self.org_weight.device, dtype=self.org_weight.dtype) + if self.org_weight.dim() == 4: + weight = torch.einsum("oihw, op -> pihw", self.org_weight, R) + else: + weight = torch.einsum("oi, op -> pi", self.org_weight, R) + org_sd['weight'] = weight + # replace weight + #self.org_module[0].load_state_dict(org_sd) + return weight + pass + + def replace_weight(self, new_weight): + org_sd = self.org_module[0].state_dict() + org_sd['weight'] = new_weight + self.org_module[0].load_state_dict(org_sd) + self.merged = True + + def restore_weight(self): + org_sd = self.org_module[0].state_dict() + org_sd['weight'] = self.org_weight + self.org_module[0].load_state_dict(org_sd) + self.merged = False + # replace forward method of original linear rather than replacing the module # how do we revert this to unload the weights? def apply_to(self): self.org_forward = self.org_module[0].forward #self.org_module[0].forward = self.forward + self.org_module[0].register_forward_pre_hook(self.pre_forward_hook) self.org_module[0].register_forward_hook(self.forward_hook) def get_weight(self, oft_blocks, multiplier=None): - self.constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) + constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) block_Q = oft_blocks - oft_blocks.transpose(1, 2) norm_Q = torch.norm(block_Q.flatten()) - new_norm_Q = torch.clamp(norm_Q, max=self.constraint) + new_norm_Q = torch.clamp(norm_Q, max=constraint) block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) - m_I = torch.eye(self.block_size, device=self.oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) + m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) #block_R_weighted = multiplier * block_R + (1 - multiplier) * I #R = torch.block_diag(*block_R_weighted) @@ -54,33 +88,47 @@ class NetworkModuleOFT(network.NetworkModule): return R def calc_updown(self, orig_weight): - oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + #oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - R = self.get_weight(oft_blocks) - self.R = R + #R = self.R.to(orig_weight.device, dtype=orig_weight.dtype) + ##self.R = R - # if orig_weight.dim() == 4: - # weight = torch.einsum("oihw, op -> pihw", orig_weight, R) - # else: - # weight = torch.einsum("oi, op -> pi", orig_weight, R) + #if orig_weight.dim() == 4: + # weight = torch.einsum("oihw, op -> pihw", orig_weight, R) + #else: + # weight = torch.einsum("oi, op -> pi", orig_weight, R) - updown = orig_weight @ R - output_shape = self.oft_blocks.shape + #updown = orig_weight @ R + #updown = weight + updown = torch.zeros_like(orig_weight, device=orig_weight.device, dtype=orig_weight.dtype) + #updown = orig_weight + output_shape = orig_weight.shape + #orig_weight = self.merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) + #output_shape = self.oft_blocks.shape return self.finalize_updown(updown, orig_weight, output_shape) + def pre_forward_hook(self, module, input): + if not self.merged: + self.replace_weight(self.merged_weight) + + def forward_hook(self, module, args, output): + if self.merged: + pass + #self.restore_weight() #print(f'Forward hook in {self.network_key} called') - x = output - R = self.R.to(x.device, dtype=x.dtype) - if x.dim() == 4: - x = x.permute(0, 2, 3, 1) - x = torch.matmul(x, R) - x = x.permute(0, 3, 1, 2) - else: - x = torch.matmul(x, R) - return x + #x = output + #R = self.R.to(x.device, dtype=x.dtype) + + #if x.dim() == 4: + # x = x.permute(0, 2, 3, 1) + # x = torch.matmul(x, R) + # x = x.permute(0, 3, 1, 2) + #else: + # x = torch.matmul(x, R) + #return x # def forward(self, x, y=None): # x = self.org_forward(x) -- cgit v1.2.3 From fce86ab7d75690785f0f5b496f1b3aee922c0ae3 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sat, 21 Oct 2023 16:03:54 -0700 Subject: fix: support multiplier, no forward pass hook --- extensions-builtin/Lora/network_oft.py | 43 ++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index f5f32c23..e0672ba6 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -32,21 +32,27 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] self.org_weight = self.org_module[0].weight.to(self.org_module[0].weight.device, copy=True) #self.org_weight = self.org_module[0].weight.to(devices.cpu, copy=True) - self.R = self.get_weight(self.oft_blocks) + init_multiplier = self.multiplier() * self.calc_scale() + self.last_multiplier = init_multiplier + self.R = self.get_weight(self.oft_blocks, init_multiplier) self.merged_weight = self.merge_weight() self.apply_to() self.merged = False + # weights_backup = getattr(self.org_module[0], 'network_weights_backup', None) + # if weights_backup is None: + # self.org_module[0].network_weights_backup = self.org_weight + def merge_weight(self): - org_sd = self.org_module[0].state_dict() + #org_sd = self.org_module[0].state_dict() R = self.R.to(self.org_weight.device, dtype=self.org_weight.dtype) if self.org_weight.dim() == 4: weight = torch.einsum("oihw, op -> pihw", self.org_weight, R) else: weight = torch.einsum("oi, op -> pi", self.org_weight, R) - org_sd['weight'] = weight + #org_sd['weight'] = weight # replace weight #self.org_module[0].load_state_dict(org_sd) return weight @@ -74,6 +80,7 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module[0].register_forward_hook(self.forward_hook) def get_weight(self, oft_blocks, multiplier=None): + multiplier = multiplier.to(oft_blocks.device, dtype=oft_blocks.dtype) constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) block_Q = oft_blocks - oft_blocks.transpose(1, 2) norm_Q = torch.norm(block_Q.flatten()) @@ -81,9 +88,9 @@ class NetworkModuleOFT(network.NetworkModule): block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) - #block_R_weighted = multiplier * block_R + (1 - multiplier) * I - #R = torch.block_diag(*block_R_weighted) - R = torch.block_diag(*block_R) + block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I + R = torch.block_diag(*block_R_weighted) + #R = torch.block_diag(*block_R) return R @@ -93,6 +100,8 @@ class NetworkModuleOFT(network.NetworkModule): #R = self.R.to(orig_weight.device, dtype=orig_weight.dtype) ##self.R = R + #R = self.R.to(orig_weight.device, dtype=orig_weight.dtype) + ##self.R = R #if orig_weight.dim() == 4: # weight = torch.einsum("oihw, op -> pihw", orig_weight, R) #else: @@ -103,19 +112,33 @@ class NetworkModuleOFT(network.NetworkModule): updown = torch.zeros_like(orig_weight, device=orig_weight.device, dtype=orig_weight.dtype) #updown = orig_weight output_shape = orig_weight.shape - #orig_weight = self.merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) + orig_weight = self.merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) #output_shape = self.oft_blocks.shape return self.finalize_updown(updown, orig_weight, output_shape) def pre_forward_hook(self, module, input): - if not self.merged: + multiplier = self.multiplier() * self.calc_scale() + if not multiplier==self.last_multiplier or not self.merged: + + #if multiplier != self.last_multiplier or not self.merged: + self.R = self.get_weight(self.oft_blocks, multiplier) + self.last_multiplier = multiplier + self.merged_weight = self.merge_weight() self.replace_weight(self.merged_weight) + #elif not self.merged: + # self.replace_weight(self.merged_weight) def forward_hook(self, module, args, output): - if self.merged: - pass + pass + #output = output * self.multiplier() * self.calc_scale() + #if len(args) > 0: + # y = args[0] + # output = output + y + #return output + #if self.merged: + # pass #self.restore_weight() #print(f'Forward hook in {self.network_key} called') -- cgit v1.2.3 From 76f5abdbdb739133eff2ccefa36eac62bea3fa08 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sat, 21 Oct 2023 16:07:45 -0700 Subject: style: cleanup oft --- extensions-builtin/Lora/network_oft.py | 82 +++------------------------------- 1 file changed, 7 insertions(+), 75 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index e0672ba6..e462ccb1 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,6 +1,5 @@ import torch import network -from modules import devices class ModuleTypeOFT(network.ModuleType): @@ -31,33 +30,24 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] self.org_weight = self.org_module[0].weight.to(self.org_module[0].weight.device, copy=True) - #self.org_weight = self.org_module[0].weight.to(devices.cpu, copy=True) + init_multiplier = self.multiplier() * self.calc_scale() self.last_multiplier = init_multiplier + self.R = self.get_weight(self.oft_blocks, init_multiplier) self.merged_weight = self.merge_weight() self.apply_to() self.merged = False - # weights_backup = getattr(self.org_module[0], 'network_weights_backup', None) - # if weights_backup is None: - # self.org_module[0].network_weights_backup = self.org_weight - - def merge_weight(self): - #org_sd = self.org_module[0].state_dict() R = self.R.to(self.org_weight.device, dtype=self.org_weight.dtype) if self.org_weight.dim() == 4: weight = torch.einsum("oihw, op -> pihw", self.org_weight, R) else: weight = torch.einsum("oi, op -> pi", self.org_weight, R) - #org_sd['weight'] = weight - # replace weight - #self.org_module[0].load_state_dict(org_sd) return weight - pass - + def replace_weight(self, new_weight): org_sd = self.org_module[0].state_dict() org_sd['weight'] = new_weight @@ -70,9 +60,7 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module[0].load_state_dict(org_sd) self.merged = False - - # replace forward method of original linear rather than replacing the module - # how do we revert this to unload the weights? + # FIXME: hook forward method of original linear, but how do we undo the hook when we are done? def apply_to(self): self.org_forward = self.org_module[0].forward #self.org_module[0].forward = self.forward @@ -90,82 +78,26 @@ class NetworkModuleOFT(network.NetworkModule): block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I R = torch.block_diag(*block_R_weighted) - #R = torch.block_diag(*block_R) return R def calc_updown(self, orig_weight): - #oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - - #R = self.R.to(orig_weight.device, dtype=orig_weight.dtype) - ##self.R = R - - #R = self.R.to(orig_weight.device, dtype=orig_weight.dtype) - ##self.R = R - #if orig_weight.dim() == 4: - # weight = torch.einsum("oihw, op -> pihw", orig_weight, R) - #else: - # weight = torch.einsum("oi, op -> pi", orig_weight, R) - - #updown = orig_weight @ R - #updown = weight updown = torch.zeros_like(orig_weight, device=orig_weight.device, dtype=orig_weight.dtype) - #updown = orig_weight output_shape = orig_weight.shape orig_weight = self.merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) #output_shape = self.oft_blocks.shape return self.finalize_updown(updown, orig_weight, output_shape) - + def pre_forward_hook(self, module, input): multiplier = self.multiplier() * self.calc_scale() - if not multiplier==self.last_multiplier or not self.merged: - #if multiplier != self.last_multiplier or not self.merged: + if not multiplier==self.last_multiplier or not self.merged: self.R = self.get_weight(self.oft_blocks, multiplier) self.last_multiplier = multiplier self.merged_weight = self.merge_weight() self.replace_weight(self.merged_weight) - #elif not self.merged: - # self.replace_weight(self.merged_weight) - + def forward_hook(self, module, args, output): pass - #output = output * self.multiplier() * self.calc_scale() - #if len(args) > 0: - # y = args[0] - # output = output + y - #return output - #if self.merged: - # pass - #self.restore_weight() - #print(f'Forward hook in {self.network_key} called') - - #x = output - #R = self.R.to(x.device, dtype=x.dtype) - - #if x.dim() == 4: - # x = x.permute(0, 2, 3, 1) - # x = torch.matmul(x, R) - # x = x.permute(0, 3, 1, 2) - #else: - # x = torch.matmul(x, R) - #return x - - # def forward(self, x, y=None): - # x = self.org_forward(x) - # if self.multiplier() == 0.0: - # return x - - # # calculating R here is excruciatingly slow - # #R = self.get_weight().to(x.device, dtype=x.dtype) - # R = self.R.to(x.device, dtype=x.dtype) - - # if x.dim() == 4: - # x = x.permute(0, 2, 3, 1) - # x = torch.matmul(x, R) - # x = x.permute(0, 3, 1, 2) - # else: - # x = torch.matmul(x, R) - # return x -- cgit v1.2.3 From de8ee92ed88b855098e273f576a27f4789f0693d Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sat, 21 Oct 2023 17:37:17 -0700 Subject: fix: use merge_weight to cache value --- extensions-builtin/Lora/network_oft.py | 57 ++++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 17 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index e462ccb1..ebe6740c 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -29,23 +29,27 @@ class NetworkModuleOFT(network.NetworkModule): self.block_size = self.out_dim // self.num_blocks self.org_module: list[torch.Module] = [self.sd_module] - self.org_weight = self.org_module[0].weight.to(self.org_module[0].weight.device, copy=True) + #self.org_weight = self.org_module[0].weight.to(self.org_module[0].weight.device, copy=True) init_multiplier = self.multiplier() * self.calc_scale() self.last_multiplier = init_multiplier self.R = self.get_weight(self.oft_blocks, init_multiplier) + self.hooks = [] self.merged_weight = self.merge_weight() - self.apply_to() + + #self.apply_to() + self.applied = False self.merged = False def merge_weight(self): - R = self.R.to(self.org_weight.device, dtype=self.org_weight.dtype) - if self.org_weight.dim() == 4: - weight = torch.einsum("oihw, op -> pihw", self.org_weight, R) + org_weight = self.org_module[0].weight + R = self.R.to(org_weight.device, dtype=org_weight.dtype) + if org_weight.dim() == 4: + weight = torch.einsum("oihw, op -> pihw", org_weight, R) else: - weight = torch.einsum("oi, op -> pi", self.org_weight, R) + weight = torch.einsum("oi, op -> pi", org_weight, R) return weight def replace_weight(self, new_weight): @@ -55,17 +59,29 @@ class NetworkModuleOFT(network.NetworkModule): self.merged = True def restore_weight(self): - org_sd = self.org_module[0].state_dict() - org_sd['weight'] = self.org_weight - self.org_module[0].load_state_dict(org_sd) - self.merged = False + pass + #org_sd = self.org_module[0].state_dict() + #org_sd['weight'] = self.org_weight + #self.org_module[0].load_state_dict(org_sd) + #self.merged = False # FIXME: hook forward method of original linear, but how do we undo the hook when we are done? def apply_to(self): - self.org_forward = self.org_module[0].forward - #self.org_module[0].forward = self.forward - self.org_module[0].register_forward_pre_hook(self.pre_forward_hook) - self.org_module[0].register_forward_hook(self.forward_hook) + if not self.applied: + self.org_forward = self.org_module[0].forward + #self.org_module[0].forward = self.forward + prehook = self.org_module[0].register_forward_pre_hook(self.pre_forward_hook) + hook = self.org_module[0].register_forward_hook(self.forward_hook) + self.hooks.append(prehook) + self.hooks.append(hook) + self.applied = True + + def remove_from(self): + if self.applied: + for hook in self.hooks: + hook.remove() + self.hooks = [] + self.applied = False def get_weight(self, oft_blocks, multiplier=None): multiplier = multiplier.to(oft_blocks.device, dtype=oft_blocks.dtype) @@ -82,14 +98,22 @@ class NetworkModuleOFT(network.NetworkModule): return R def calc_updown(self, orig_weight): + if not self.applied: + self.apply_to() + + self.merged_weight = self.merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) + updown = torch.zeros_like(orig_weight, device=orig_weight.device, dtype=orig_weight.dtype) output_shape = orig_weight.shape - orig_weight = self.merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) + orig_weight = self.merged_weight #output_shape = self.oft_blocks.shape return self.finalize_updown(updown, orig_weight, output_shape) def pre_forward_hook(self, module, input): + #if not self.applied: + # self.apply_to() + multiplier = self.multiplier() * self.calc_scale() if not multiplier==self.last_multiplier or not self.merged: @@ -98,6 +122,5 @@ class NetworkModuleOFT(network.NetworkModule): self.merged_weight = self.merge_weight() self.replace_weight(self.merged_weight) - def forward_hook(self, module, args, output): - pass + pass \ No newline at end of file -- cgit v1.2.3 From 4a50c9638c3eac860fb05ae603cd61aabf4cd1a9 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sun, 22 Oct 2023 08:54:24 -0700 Subject: refactor: remove used OFT functions --- extensions-builtin/Lora/network_oft.py | 82 +++++----------------------------- 1 file changed, 10 insertions(+), 72 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index ebe6740c..3034a407 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -29,98 +29,36 @@ class NetworkModuleOFT(network.NetworkModule): self.block_size = self.out_dim // self.num_blocks self.org_module: list[torch.Module] = [self.sd_module] - #self.org_weight = self.org_module[0].weight.to(self.org_module[0].weight.device, copy=True) - init_multiplier = self.multiplier() * self.calc_scale() - self.last_multiplier = init_multiplier - - self.R = self.get_weight(self.oft_blocks, init_multiplier) - - self.hooks = [] - self.merged_weight = self.merge_weight() - - #self.apply_to() - self.applied = False - self.merged = False - - def merge_weight(self): - org_weight = self.org_module[0].weight - R = self.R.to(org_weight.device, dtype=org_weight.dtype) + def merge_weight(self, R_weight, org_weight): + R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) if org_weight.dim() == 4: - weight = torch.einsum("oihw, op -> pihw", org_weight, R) + weight = torch.einsum("oihw, op -> pihw", org_weight, R_weight) else: - weight = torch.einsum("oi, op -> pi", org_weight, R) + weight = torch.einsum("oi, op -> pi", org_weight, R_weight) return weight - def replace_weight(self, new_weight): - org_sd = self.org_module[0].state_dict() - org_sd['weight'] = new_weight - self.org_module[0].load_state_dict(org_sd) - self.merged = True - - def restore_weight(self): - pass - #org_sd = self.org_module[0].state_dict() - #org_sd['weight'] = self.org_weight - #self.org_module[0].load_state_dict(org_sd) - #self.merged = False - - # FIXME: hook forward method of original linear, but how do we undo the hook when we are done? - def apply_to(self): - if not self.applied: - self.org_forward = self.org_module[0].forward - #self.org_module[0].forward = self.forward - prehook = self.org_module[0].register_forward_pre_hook(self.pre_forward_hook) - hook = self.org_module[0].register_forward_hook(self.forward_hook) - self.hooks.append(prehook) - self.hooks.append(hook) - self.applied = True - - def remove_from(self): - if self.applied: - for hook in self.hooks: - hook.remove() - self.hooks = [] - self.applied = False - def get_weight(self, oft_blocks, multiplier=None): - multiplier = multiplier.to(oft_blocks.device, dtype=oft_blocks.dtype) constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) + block_Q = oft_blocks - oft_blocks.transpose(1, 2) norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=constraint) block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) + block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I R = torch.block_diag(*block_R_weighted) return R def calc_updown(self, orig_weight): - if not self.applied: - self.apply_to() - - self.merged_weight = self.merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) + R = self.get_weight(self.oft_blocks, self.multiplier()) + merged_weight = self.merge_weight(R, orig_weight) - updown = torch.zeros_like(orig_weight, device=orig_weight.device, dtype=orig_weight.dtype) + updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight output_shape = orig_weight.shape - orig_weight = self.merged_weight - #output_shape = self.oft_blocks.shape + orig_weight = orig_weight return self.finalize_updown(updown, orig_weight, output_shape) - - def pre_forward_hook(self, module, input): - #if not self.applied: - # self.apply_to() - - multiplier = self.multiplier() * self.calc_scale() - - if not multiplier==self.last_multiplier or not self.merged: - self.R = self.get_weight(self.oft_blocks, multiplier) - self.last_multiplier = multiplier - self.merged_weight = self.merge_weight() - self.replace_weight(self.merged_weight) - - def forward_hook(self, module, args, output): - pass \ No newline at end of file -- cgit v1.2.3 From 3b8515d2c9abad7f0ccaac0215803716e861ee0e Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sun, 22 Oct 2023 09:27:48 -0700 Subject: fix: multiplier applied twice in finalize_updown --- extensions-builtin/Lora/network_oft.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 3034a407..efbdd296 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -54,7 +54,8 @@ class NetworkModuleOFT(network.NetworkModule): return R def calc_updown(self, orig_weight): - R = self.get_weight(self.oft_blocks, self.multiplier()) + multiplier = self.multiplier() * self.calc_scale() + R = self.get_weight(self.oft_blocks, multiplier) merged_weight = self.merge_weight(R, orig_weight) updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight @@ -62,3 +63,23 @@ class NetworkModuleOFT(network.NetworkModule): orig_weight = orig_weight return self.finalize_updown(updown, orig_weight, output_shape) + + # override to remove the multiplier/scale factor; it's already multiplied in get_weight + def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): + #return super().finalize_updown(updown, orig_weight, output_shape, ex_bias) + + if self.bias is not None: + updown = updown.reshape(self.bias.shape) + updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) + updown = updown.reshape(output_shape) + + if len(output_shape) == 4: + updown = updown.reshape(output_shape) + + if orig_weight.size().numel() == updown.size().numel(): + updown = updown.reshape(orig_weight.shape) + + if ex_bias is not None: + ex_bias = ex_bias * self.multiplier() + + return updown, ex_bias -- cgit v1.2.3 From 6523edb8a45d4e09f11f3b4e1d133afa6fb65e53 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sun, 22 Oct 2023 09:31:15 -0700 Subject: style: conform style --- extensions-builtin/Lora/network_oft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index efbdd296..e43c9a1d 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -63,7 +63,7 @@ class NetworkModuleOFT(network.NetworkModule): orig_weight = orig_weight return self.finalize_updown(updown, orig_weight, output_shape) - + # override to remove the multiplier/scale factor; it's already multiplied in get_weight def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): #return super().finalize_updown(updown, orig_weight, output_shape, ex_bias) -- cgit v1.2.3 From 88b2ef3b04c37ec068fdfea9ba2596645e981b46 Mon Sep 17 00:00:00 2001 From: David Benson Date: Mon, 23 Oct 2023 08:16:26 -0400 Subject: Update prompts_from_file script to allow concatenating entries with the general prompt. --- scripts/prompts_from_file.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 50320d55..1aadf113 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -108,6 +108,7 @@ class Script(scripts.Script): def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate")) checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch")) + prompt_position = gr.Radio(["start", "end"], label="Insert prompts at the", elem_id=self.elem_id("prompt_position"), value="start") prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=self.elem_id("prompt_txt")) file = gr.File(label="Upload prompt inputs", type='binary', elem_id=self.elem_id("file")) @@ -118,9 +119,9 @@ class Script(scripts.Script): # We don't shrink back to 1, because that causes the control to ignore [enter], and it may # be unclear to the user that shift-enter is needed. prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt], show_progress=False) - return [checkbox_iterate, checkbox_iterate_batch, prompt_txt] + return [checkbox_iterate, checkbox_iterate_batch, prompt_position, prompt_txt] - def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str): + def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_position, prompt_txt: str): lines = [x for x in (x.strip() for x in prompt_txt.splitlines()) if x] p.do_not_save_grid = True @@ -158,6 +159,18 @@ class Script(scripts.Script): for k, v in args.items(): setattr(copy_p, k, v) + if args.get("prompt") and p.prompt: + if prompt_position == "start": + copy_p.prompt = args.get("prompt") + " " + p.prompt + else: + copy_p.prompt = p.prompt + " " + args.get("prompt") + + if args.get("negative_prompt") and p.negative_prompt: + if prompt_position == "start": + copy_p.negative_prompt = args.get("negative_prompt") + " " + p.negative_prompt + else: + copy_p.negative_prompt = p.negative_prompt + " " + args.get("negative_prompt") + proc = process_images(copy_p) images += proc.images -- cgit v1.2.3 From dfc4c27b2402a35a1820ffa549e74bb79873aaaa Mon Sep 17 00:00:00 2001 From: David Benson Date: Mon, 23 Oct 2023 08:26:40 -0400 Subject: linting issue --- scripts/prompts_from_file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 1aadf113..3c09bb97 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -164,7 +164,7 @@ class Script(scripts.Script): copy_p.prompt = args.get("prompt") + " " + p.prompt else: copy_p.prompt = p.prompt + " " + args.get("prompt") - + if args.get("negative_prompt") and p.negative_prompt: if prompt_position == "start": copy_p.negative_prompt = args.get("negative_prompt") + " " + p.negative_prompt -- cgit v1.2.3 From 5121846d34d74aee9b55d48d35c1559a710051b0 Mon Sep 17 00:00:00 2001 From: Won-Kyu Park Date: Wed, 25 Oct 2023 21:37:55 +0900 Subject: call state.jobnext() before postproces*() --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 40598f5c..70ad1ebe 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -886,6 +886,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() + state.nextjob() + if p.scripts is not None: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) @@ -958,8 +960,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - state.nextjob() - if not infotexts: infotexts.append(Processed(p, []).infotext(p, 0)) -- cgit v1.2.3 From fbc5c531b9cfa949d60dae19420d01f8af186b55 Mon Sep 17 00:00:00 2001 From: Meerkov Date: Sun, 29 Oct 2023 15:37:08 -0700 Subject: Fix #13796 Fix comment error that makes understanding scheduling more confusing. --- modules/prompt_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 334efeef..86b7acb5 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -5,7 +5,7 @@ from collections import namedtuple from typing import List import lark -# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" +# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][: in background:0.25] [shoddy:masterful:0.5]" # will be represented with prompt_schedule like this (assuming steps=100): # [25, 'fantasy landscape with a mountain and an oak in foreground shoddy'] # [50, 'fantasy landscape with a lake and an oak in foreground in background shoddy'] -- cgit v1.2.3 From a2fad6ee055f3f4e98e46b6c2d912776fe608214 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:34:27 -0700 Subject: test implementation based on kohaku diag-oft implementation --- extensions-builtin/Lora/network_oft.py | 59 ++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index e43c9a1d..ff61b369 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,5 +1,6 @@ import torch import network +from einops import rearrange class ModuleTypeOFT(network.ModuleType): @@ -30,35 +31,51 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] - def merge_weight(self, R_weight, org_weight): - R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) - if org_weight.dim() == 4: - weight = torch.einsum("oihw, op -> pihw", org_weight, R_weight) - else: - weight = torch.einsum("oi, op -> pi", org_weight, R_weight) - return weight + # def merge_weight(self, R_weight, org_weight): + # R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) + # if org_weight.dim() == 4: + # weight = torch.einsum("oihw, op -> pihw", org_weight, R_weight) + # else: + # weight = torch.einsum("oi, op -> pi", org_weight, R_weight) + # weight = torch.einsum( + # "k n m, k n ... -> k m ...", + # self.oft_diag * scale + torch.eye(self.block_size, device=device), + # org_weight + # ) + # return weight def get_weight(self, oft_blocks, multiplier=None): - constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) + # constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) - block_Q = oft_blocks - oft_blocks.transpose(1, 2) - norm_Q = torch.norm(block_Q.flatten()) - new_norm_Q = torch.clamp(norm_Q, max=constraint) - block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) - m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) - block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) + # block_Q = oft_blocks - oft_blocks.transpose(1, 2) + # norm_Q = torch.norm(block_Q.flatten()) + # new_norm_Q = torch.clamp(norm_Q, max=constraint) + # block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) + # m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) + # block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) - block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I - R = torch.block_diag(*block_R_weighted) + # block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I + # R = torch.block_diag(*block_R_weighted) + #return R + return self.oft_blocks - return R def calc_updown(self, orig_weight): multiplier = self.multiplier() * self.calc_scale() - R = self.get_weight(self.oft_blocks, multiplier) - merged_weight = self.merge_weight(R, orig_weight) - - updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight + #R = self.get_weight(self.oft_blocks, multiplier) + R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + #merged_weight = self.merge_weight(R, orig_weight) + + orig_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) + weight = torch.einsum( + 'k n m, k n ... -> k m ...', + R * multiplier + torch.eye(self.block_size, device=orig_weight.device), + orig_weight + ) + weight = rearrange(weight, 'k m ... -> (k m) ...') + + #updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight + updown = weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight output_shape = orig_weight.shape orig_weight = orig_weight -- cgit v1.2.3 From 65ccd6305fcf72347d5ed68f03095dced865ef6e Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 2 Nov 2023 00:11:32 -0700 Subject: detect diag_oft type --- extensions-builtin/Lora/networks.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 78a97033..7f814706 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -191,10 +191,17 @@ def load_network(name, network_on_disk): key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model") sd_module = shared.sd_model.network_layer_mapping.get(key, None) + # kohya_ss OFT module elif sd_module is None and "oft_unet" in key_network_without_network_parts: key = key_network_without_network_parts.replace("oft_unet", "diffusion_model") sd_module = shared.sd_model.network_layer_mapping.get(key, None) + # KohakuBlueLeaf OFT module + if sd_module is None and "oft_diag" in key: + key = key_network_without_network_parts.replace("lora_unet", "diffusion_model") + key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model") + sd_module = shared.sd_model.network_layer_mapping.get(key, None) + if sd_module is None: keys_failed_to_match[key_network] = key continue -- cgit v1.2.3 From d727ddfccdc6d474767be9dc3bf504150e81a8a5 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 2 Nov 2023 00:13:11 -0700 Subject: no idea what i'm doing, trying to support both type of OFT, kblueleaf diag_oft has MultiheadAttn which kohya's doesn't?, attempt create new module based off network_lora.py, errors about tensor dim mismatch --- extensions-builtin/Lora/network_oft.py | 192 +++++++++++++++++++++++++-------- 1 file changed, 145 insertions(+), 47 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index ff61b369..e102eafc 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,11 +1,12 @@ import torch import network from einops import rearrange +from modules import devices class ModuleTypeOFT(network.ModuleType): def create_module(self, net: network.Network, weights: network.NetworkWeights): - if all(x in weights.w for x in ["oft_blocks"]): + if all(x in weights.w for x in ["oft_blocks"]) or all(x in weights.w for x in ["oft_diag"]): return NetworkModuleOFT(net, weights) return None @@ -16,66 +17,117 @@ class NetworkModuleOFT(network.NetworkModule): super().__init__(net, weights) - self.oft_blocks = weights.w["oft_blocks"] - self.alpha = weights.w["alpha"] - self.dim = self.oft_blocks.shape[0] - self.num_blocks = self.dim - - if "Linear" in self.sd_module.__class__.__name__: + self.lin_module = None + # kohya-ss + if "oft_blocks" in weights.w.keys(): + self.is_kohya = True + self.oft_blocks = weights.w["oft_blocks"] + self.alpha = weights.w["alpha"] + self.dim = self.oft_blocks.shape[0] + elif "oft_diag" in weights.w.keys(): + self.is_kohya = False + self.oft_blocks = weights.w["oft_diag"] + # alpha is rank if alpha is 0 or None + if self.alpha is None: + pass + self.dim = self.oft_blocks.shape[0] # FIXME: almost certainly incorrect, assumes tensor is shape [*, m, n] + else: + raise ValueError("oft_blocks or oft_diag must be in weights dict") + + is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] + is_conv = type(self.sd_module) in [torch.nn.Conv2d] + is_other_linear = type(self.sd_module) in [ torch.nn.MultiheadAttention] + #if "Linear" in self.sd_module.__class__.__name__ or is_linear: + if is_linear: self.out_dim = self.sd_module.out_features - elif "Conv" in self.sd_module.__class__.__name__: + #elif hasattr(self.sd_module, "embed_dim"): + # self.out_dim = self.sd_module.embed_dim + #else: + # raise ValueError("Linear sd_module must have out_features or embed_dim") + elif is_other_linear: + self.out_dim = self.sd_module.embed_dim + elif is_conv: self.out_dim = self.sd_module.out_channels + else: + raise ValueError("sd_module must be Linear or Conv") + - self.constraint = self.alpha * self.out_dim - self.block_size = self.out_dim // self.num_blocks + if self.is_kohya: + self.num_blocks = self.dim + self.block_size = self.out_dim // self.num_blocks + self.constraint = self.alpha * self.out_dim + #elif is_linear or is_conv: + else: + self.num_blocks, self.block_size = factorization(self.out_dim, self.dim) + self.constraint = None self.org_module: list[torch.Module] = [self.sd_module] - # def merge_weight(self, R_weight, org_weight): - # R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) - # if org_weight.dim() == 4: - # weight = torch.einsum("oihw, op -> pihw", org_weight, R_weight) - # else: - # weight = torch.einsum("oi, op -> pi", org_weight, R_weight) - # weight = torch.einsum( - # "k n m, k n ... -> k m ...", - # self.oft_diag * scale + torch.eye(self.block_size, device=device), - # org_weight - # ) - # return weight + # if is_other_linear: + # weight = self.oft_blocks.reshape(self.oft_blocks.shape[0], -1) + # module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) + # with torch.no_grad(): + # if weight.shape != module.weight.shape: + # weight = weight.reshape(module.weight.shape) + # module.weight.copy_(weight) + # module.to(device=devices.cpu, dtype=devices.dtype) + # module.weight.requires_grad_(False) + # self.lin_module = module + #return module + + def merge_weight(self, R_weight, org_weight): + R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) + if org_weight.dim() == 4: + weight = torch.einsum("oihw, op -> pihw", org_weight, R_weight) + else: + weight = torch.einsum("oi, op -> pi", org_weight, R_weight) + #weight = torch.einsum( + # "k n m, k n ... -> k m ...", + # self.oft_diag * scale + torch.eye(self.block_size, device=device), + # org_weight + #) + return weight def get_weight(self, oft_blocks, multiplier=None): - # constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) + if self.constraint is not None: + constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) - # block_Q = oft_blocks - oft_blocks.transpose(1, 2) - # norm_Q = torch.norm(block_Q.flatten()) - # new_norm_Q = torch.clamp(norm_Q, max=constraint) - # block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) - # m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) - # block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) + block_Q = oft_blocks - oft_blocks.transpose(1, 2) + norm_Q = torch.norm(block_Q.flatten()) + if self.constraint is not None: + new_norm_Q = torch.clamp(norm_Q, max=constraint) + else: + new_norm_Q = norm_Q + block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) + m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) + block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) - # block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I - # R = torch.block_diag(*block_R_weighted) - #return R - return self.oft_blocks + block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I + R = torch.block_diag(*block_R_weighted) + return R + #return self.oft_blocks def calc_updown(self, orig_weight): multiplier = self.multiplier() * self.calc_scale() - #R = self.get_weight(self.oft_blocks, multiplier) - R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - #merged_weight = self.merge_weight(R, orig_weight) - - orig_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) - weight = torch.einsum( - 'k n m, k n ... -> k m ...', - R * multiplier + torch.eye(self.block_size, device=orig_weight.device), - orig_weight - ) - weight = rearrange(weight, 'k m ... -> (k m) ...') - - #updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight - updown = weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight + R = self.get_weight(self.oft_blocks, multiplier) + #R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + merged_weight = self.merge_weight(R, orig_weight) + + #if self.lin_module is not None: + # R = self.lin_module.weight.to(orig_weight.device, dtype=orig_weight.dtype) + # weight = torch.mul(torch.mul(R, multiplier), orig_weight) + #else: + # orig_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) + # weight = torch.einsum( + # 'k n m, k n ... -> k m ...', + # R * multiplier + torch.eye(self.block_size, device=orig_weight.device), + # orig_weight + # ) + # weight = rearrange(weight, 'k m ... -> (k m) ...') + + updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight + #updown = weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight output_shape = orig_weight.shape orig_weight = orig_weight @@ -100,3 +152,49 @@ class NetworkModuleOFT(network.NetworkModule): ex_bias = ex_bias * self.multiplier() return updown, ex_bias + +# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py +def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: + ''' + return a tuple of two value of input dimension decomposed by the number closest to factor + second value is higher or equal than first value. + + In LoRA with Kroneckor Product, first value is a value for weight scale. + secon value is a value for weight. + + Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. + + examples) + factor + -1 2 4 8 16 ... + 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 + 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16 + 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25 + 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30 + 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32 + 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64 + ''' + + if factor > 0 and (dimension % factor) == 0: + m = factor + n = dimension // factor + if m > n: + n, m = m, n + return m, n + if factor < 0: + factor = dimension + m, n = 1, dimension + length = m + n + while m length or new_m>factor: + break + else: + m, n = new_m, new_n + if m > n: + n, m = m, n + return m, n + -- cgit v1.2.3 From 759515316e8ec536f34fad616e8c6a33674a164b Mon Sep 17 00:00:00 2001 From: Emily Zeng Date: Thu, 2 Nov 2023 21:54:48 -0400 Subject: added accordion settings options --- modules/shared_options.py | 2 + modules/ui.py | 502 +++++++++++++++++++++++----------------------- 2 files changed, 254 insertions(+), 250 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 0a82216f..5b07dd04 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -270,6 +270,8 @@ options_templates.update(options_section(('ui', "User interface"), { "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), + "txt2img_settings_accordion": OptionInfo(False, "Settings in txt2img hidden under Accordion").needs_reload_ui(), + "img2img_settings_accordion": OptionInfo(False, "Settings in img2img hidden under Accordion").needs_reload_ui(), })) diff --git a/modules/ui.py b/modules/ui.py index bcf39199..d05b9f55 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -344,84 +344,85 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - scripts.scripts_txt2img.prepare_ui() + with gr.Accordion("Open for Settings", open=False) if shared.opts.img2img_settings_accordion else gr.Group(): + with gr.Column(variant='compact', elem_id="txt2img_settings"): + scripts.scripts_txt2img.prepare_ui() - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", tooltip="Switch width/height") + with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", tooltip="Switch width/height") - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - elif category == "cfg": - with gr.Row(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + elif category == "cfg": + with gr.Row(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - pass + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + pass - elif category == "accordions": - with gr.Row(elem_id="txt2img_accordions", elem_classes="accordions"): - with InputAccordion(False, label="Hires. fix", elem_id="txt2img_hr") as enable_hr: - with enable_hr.extra(): - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) + elif category == "accordions": + with gr.Row(elem_id="txt2img_accordions", elem_classes="accordions"): + with InputAccordion(False, label="Hires. fix", elem_id="txt2img_hr") as enable_hr: + with enable_hr.extra(): + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") - create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") + hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") + create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") - hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") + hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - scripts.scripts_txt2img.setup_ui_for_section(category) + scripts.scripts_txt2img.setup_ui_for_section(category) - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) + elif category == "override_settings": + with FormRow(elem_id="txt2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('txt2img', row) - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = scripts.scripts_txt2img.setup_ui() + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = scripts.scripts_txt2img.setup_ui() - if category not in {"accordions"}: - scripts.scripts_txt2img.setup_ui_for_section(category) + if category not in {"accordions"}: + scripts.scripts_txt2img.setup_ui_for_section(category) hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] @@ -560,214 +561,215 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" + with gr.Accordion("Open for Settings", open=False) if shared.opts.img2img_settings_accordion else gr.Group(): + with gr.Column(variant='compact', elem_id="img2img_settings"): + copy_image_buttons = [] + copy_image_destinations = {} + + def add_copy_image_controls(tab_name, elem): + with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): + gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") + + for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): + if name == tab_name: + gr.Button(title, interactive=False) + copy_image_destinations[name] = elem + continue + + button = gr.Button(title) + copy_image_buttons.append((button, name, elem)) + + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) + + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) + add_copy_image_controls('sketch', sketch) + + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) + add_copy_image_controls('inpaint', init_img_with_mask) + + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state + + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") + + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + scripts.scripts_img2img.prepare_ui() - elif category == "denoising": - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") - elif category == "cfg": - with gr.Row(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "denoising": + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + + elif category == "cfg": + with gr.Row(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - pass + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + pass - elif category == "accordions": - with gr.Row(elem_id="img2img_accordions", elem_classes="accordions"): - scripts.scripts_img2img.setup_ui_for_section(category) + elif category == "accordions": + with gr.Row(elem_id="img2img_accordions", elem_classes="accordions"): + scripts.scripts_img2img.setup_ui_for_section(category) - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) + elif category == "override_settings": + with FormRow(elem_id="img2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('img2img', row) - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = scripts.scripts_img2img.setup_ui() + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = scripts.scripts_img2img.setup_ui() - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") + elif category == "inpaint": + with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: + with FormRow(): + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + with FormRow(): + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), + def select_img2img_tab(tab): + return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) + for i, elem in enumerate(img2img_tabs): + elem.select( + fn=lambda tab=i: select_img2img_tab(tab), + inputs=[], + outputs=[inpaint_controls, mask_alpha], + ) - if category not in {"accordions"}: - scripts.scripts_img2img.setup_ui_for_section(category) + if category not in {"accordions"}: + scripts.scripts_img2img.setup_ui_for_section(category) img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) -- cgit v1.2.3 From 8052a4971e1be48e1df2535284a7791cd1ad39ae Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 3 Nov 2023 00:59:19 -0600 Subject: Fix parenthesis auto selection Fixes #13813 --- javascript/edit-attention.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 04464100..688c2f11 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -28,7 +28,7 @@ function keyupEditAttention(event) { if (afterParen == -1) return false; let afterOpeningParen = after.indexOf(OPEN); - if (afterOpeningParen != -1 && afterOpeningParen < beforeParen) return false; + if (afterOpeningParen != -1 && afterOpeningParen < afterParen) return false; // Set the selection to the text between the parenthesis const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); -- cgit v1.2.3 From cc80a09d82afae793800a033a1f525f5dc797cff Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 4 Nov 2023 00:50:30 +0900 Subject: Update requirements_versions.txt --- requirements_versions.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements_versions.txt b/requirements_versions.txt index 7d27f2be..cb7403a9 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -29,3 +29,4 @@ torch torchdiffeq==0.2.3 torchsde==0.2.6 transformers==4.30.2 +httpx==0.24.1 -- cgit v1.2.3 From fe1967a4c4a02eccfa45b65ee19a5b0773ced31c Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Fri, 3 Nov 2023 17:52:55 -0700 Subject: skip multihead attn for now --- extensions-builtin/Lora/network_oft.py | 54 +++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index e102eafc..979a2047 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -18,6 +18,7 @@ class NetworkModuleOFT(network.NetworkModule): super().__init__(net, weights) self.lin_module = None + self.org_module: list[torch.Module] = [self.sd_module] # kohya-ss if "oft_blocks" in weights.w.keys(): self.is_kohya = True @@ -30,7 +31,7 @@ class NetworkModuleOFT(network.NetworkModule): # alpha is rank if alpha is 0 or None if self.alpha is None: pass - self.dim = self.oft_blocks.shape[0] # FIXME: almost certainly incorrect, assumes tensor is shape [*, m, n] + self.dim = self.oft_blocks.shape[1] # FIXME: almost certainly incorrect, assumes tensor is shape [*, m, n] else: raise ValueError("oft_blocks or oft_diag must be in weights dict") @@ -46,6 +47,12 @@ class NetworkModuleOFT(network.NetworkModule): # raise ValueError("Linear sd_module must have out_features or embed_dim") elif is_other_linear: self.out_dim = self.sd_module.embed_dim + #self.org_weight = self.org_module[0].weight +# if hasattr(self.sd_module, "in_proj_weight"): +# self.in_proj_dim = self.sd_module.in_proj_weight.shape[1] +# if hasattr(self.sd_module, "out_proj_weight"): +# self.out_proj_dim = self.sd_module.out_proj_weight.shape[0] +# self.in_proj_dim = self.sd_module.in_proj_weight.shape[1] elif is_conv: self.out_dim = self.sd_module.out_channels else: @@ -58,10 +65,9 @@ class NetworkModuleOFT(network.NetworkModule): self.constraint = self.alpha * self.out_dim #elif is_linear or is_conv: else: - self.num_blocks, self.block_size = factorization(self.out_dim, self.dim) + self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) self.constraint = None - self.org_module: list[torch.Module] = [self.sd_module] # if is_other_linear: # weight = self.oft_blocks.reshape(self.oft_blocks.shape[0], -1) @@ -110,25 +116,39 @@ class NetworkModuleOFT(network.NetworkModule): def calc_updown(self, orig_weight): multiplier = self.multiplier() * self.calc_scale() - R = self.get_weight(self.oft_blocks, multiplier) - #R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - merged_weight = self.merge_weight(R, orig_weight) + is_other_linear = type(self.sd_module) in [ torch.nn.MultiheadAttention] + if self.is_kohya and not is_other_linear: + R = self.get_weight(self.oft_blocks, multiplier) + #R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + merged_weight = self.merge_weight(R, orig_weight) + elif not self.is_kohya and not is_other_linear: + if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: + orig_weight=orig_weight.permute(1, 0) + R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) + #orig_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.block_size, n=self.num_blocks) + merged_weight = torch.einsum( + 'k n m, k n ... -> k m ...', + R * multiplier + torch.eye(self.block_size, device=orig_weight.device), + merged_weight + ) + merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') + if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: + orig_weight=orig_weight.permute(1, 0) + #merged_weight=merged_weight.permute(1, 0) + updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight + #updown = weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight + output_shape = orig_weight.shape + else: + # skip for now + updown = torch.zeros([orig_weight.shape[1], orig_weight.shape[1]], device=orig_weight.device, dtype=orig_weight.dtype) + output_shape = (orig_weight.shape[1], orig_weight.shape[1]) #if self.lin_module is not None: # R = self.lin_module.weight.to(orig_weight.device, dtype=orig_weight.dtype) # weight = torch.mul(torch.mul(R, multiplier), orig_weight) #else: - # orig_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) - # weight = torch.einsum( - # 'k n m, k n ... -> k m ...', - # R * multiplier + torch.eye(self.block_size, device=orig_weight.device), - # orig_weight - # ) - # weight = rearrange(weight, 'k m ... -> (k m) ...') - - updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight - #updown = weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight - output_shape = orig_weight.shape + orig_weight = orig_weight return self.finalize_updown(updown, orig_weight, output_shape) -- cgit v1.2.3 From f6c8201e5663ca2182a66c8eca63ce4801d52849 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Fri, 3 Nov 2023 19:35:15 -0700 Subject: refactor: move factorization to lyco_helpers, separate calc_updown for kohya and kb --- extensions-builtin/Lora/lyco_helpers.py | 47 ++++++++++++ extensions-builtin/Lora/network_oft.py | 131 ++++++++------------------------ 2 files changed, 77 insertions(+), 101 deletions(-) diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py index 279b34bc..1679a0ce 100644 --- a/extensions-builtin/Lora/lyco_helpers.py +++ b/extensions-builtin/Lora/lyco_helpers.py @@ -19,3 +19,50 @@ def rebuild_cp_decomposition(up, down, mid): up = up.reshape(up.size(0), -1) down = down.reshape(down.size(0), -1) return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down) + + +# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py +def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: + ''' + return a tuple of two value of input dimension decomposed by the number closest to factor + second value is higher or equal than first value. + + In LoRA with Kroneckor Product, first value is a value for weight scale. + secon value is a value for weight. + + Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. + + examples) + factor + -1 2 4 8 16 ... + 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 + 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16 + 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25 + 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30 + 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32 + 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64 + ''' + + if factor > 0 and (dimension % factor) == 0: + m = factor + n = dimension // factor + if m > n: + n, m = m, n + return m, n + if factor < 0: + factor = dimension + m, n = 1, dimension + length = m + n + while m length or new_m>factor: + break + else: + m, n = new_m, new_n + if m > n: + n, m = m, n + return m, n + diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 979a2047..2be67fe5 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,7 +1,7 @@ import torch import network +from lyco_helpers import factorization from einops import rearrange -from modules import devices class ModuleTypeOFT(network.ModuleType): @@ -11,7 +11,8 @@ class ModuleTypeOFT(network.ModuleType): return None -# adapted from kohya's implementation https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py +# adapted from kohya-ss' implementation https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py +# and KohakuBlueleaf's implementation https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/diag_oft.py class NetworkModuleOFT(network.NetworkModule): def __init__(self, net: network.Network, weights: network.NetworkWeights): @@ -19,6 +20,7 @@ class NetworkModuleOFT(network.NetworkModule): self.lin_module = None self.org_module: list[torch.Module] = [self.sd_module] + # kohya-ss if "oft_blocks" in weights.w.keys(): self.is_kohya = True @@ -37,61 +39,31 @@ class NetworkModuleOFT(network.NetworkModule): is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] - is_other_linear = type(self.sd_module) in [ torch.nn.MultiheadAttention] - #if "Linear" in self.sd_module.__class__.__name__ or is_linear: + is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] + if is_linear: self.out_dim = self.sd_module.out_features - #elif hasattr(self.sd_module, "embed_dim"): - # self.out_dim = self.sd_module.embed_dim - #else: - # raise ValueError("Linear sd_module must have out_features or embed_dim") elif is_other_linear: self.out_dim = self.sd_module.embed_dim - #self.org_weight = self.org_module[0].weight -# if hasattr(self.sd_module, "in_proj_weight"): -# self.in_proj_dim = self.sd_module.in_proj_weight.shape[1] -# if hasattr(self.sd_module, "out_proj_weight"): -# self.out_proj_dim = self.sd_module.out_proj_weight.shape[0] -# self.in_proj_dim = self.sd_module.in_proj_weight.shape[1] elif is_conv: self.out_dim = self.sd_module.out_channels else: raise ValueError("sd_module must be Linear or Conv") - if self.is_kohya: self.num_blocks = self.dim self.block_size = self.out_dim // self.num_blocks self.constraint = self.alpha * self.out_dim - #elif is_linear or is_conv: else: self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) self.constraint = None - - # if is_other_linear: - # weight = self.oft_blocks.reshape(self.oft_blocks.shape[0], -1) - # module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) - # with torch.no_grad(): - # if weight.shape != module.weight.shape: - # weight = weight.reshape(module.weight.shape) - # module.weight.copy_(weight) - # module.to(device=devices.cpu, dtype=devices.dtype) - # module.weight.requires_grad_(False) - # self.lin_module = module - #return module - def merge_weight(self, R_weight, org_weight): R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) if org_weight.dim() == 4: weight = torch.einsum("oihw, op -> pihw", org_weight, R_weight) else: weight = torch.einsum("oi, op -> pi", org_weight, R_weight) - #weight = torch.einsum( - # "k n m, k n ... -> k m ...", - # self.oft_diag * scale + torch.eye(self.block_size, device=device), - # org_weight - #) return weight def get_weight(self, oft_blocks, multiplier=None): @@ -111,48 +83,51 @@ class NetworkModuleOFT(network.NetworkModule): block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I R = torch.block_diag(*block_R_weighted) return R - #return self.oft_blocks + def calc_updown_kohya(self, orig_weight, multiplier): + R = self.get_weight(self.oft_blocks, multiplier) + merged_weight = self.merge_weight(R, orig_weight) - def calc_updown(self, orig_weight): - multiplier = self.multiplier() * self.calc_scale() - is_other_linear = type(self.sd_module) in [ torch.nn.MultiheadAttention] - if self.is_kohya and not is_other_linear: - R = self.get_weight(self.oft_blocks, multiplier) - #R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - merged_weight = self.merge_weight(R, orig_weight) - elif not self.is_kohya and not is_other_linear: + updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight + output_shape = orig_weight.shape + orig_weight = orig_weight + return self.finalize_updown(updown, orig_weight, output_shape) + + def calc_updown_kb(self, orig_weight, multiplier): + is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] + + if not is_other_linear: if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: orig_weight=orig_weight.permute(1, 0) + R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) - #orig_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.block_size, n=self.num_blocks) merged_weight = torch.einsum( 'k n m, k n ... -> k m ...', R * multiplier + torch.eye(self.block_size, device=orig_weight.device), - merged_weight + merged_weight ) merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') + if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: orig_weight=orig_weight.permute(1, 0) - #merged_weight=merged_weight.permute(1, 0) + updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight - #updown = weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight output_shape = orig_weight.shape else: - # skip for now + # FIXME: skip MultiheadAttention for now updown = torch.zeros([orig_weight.shape[1], orig_weight.shape[1]], device=orig_weight.device, dtype=orig_weight.dtype) output_shape = (orig_weight.shape[1], orig_weight.shape[1]) - #if self.lin_module is not None: - # R = self.lin_module.weight.to(orig_weight.device, dtype=orig_weight.dtype) - # weight = torch.mul(torch.mul(R, multiplier), orig_weight) - #else: - - orig_weight = orig_weight - return self.finalize_updown(updown, orig_weight, output_shape) + def calc_updown(self, orig_weight): + multiplier = self.multiplier() * self.calc_scale() + if self.is_kohya: + return self.calc_updown_kohya(orig_weight, multiplier) + else: + return self.calc_updown_kb(orig_weight, multiplier) + # override to remove the multiplier/scale factor; it's already multiplied in get_weight def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): #return super().finalize_updown(updown, orig_weight, output_shape, ex_bias) @@ -172,49 +147,3 @@ class NetworkModuleOFT(network.NetworkModule): ex_bias = ex_bias * self.multiplier() return updown, ex_bias - -# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py -def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: - ''' - return a tuple of two value of input dimension decomposed by the number closest to factor - second value is higher or equal than first value. - - In LoRA with Kroneckor Product, first value is a value for weight scale. - secon value is a value for weight. - - Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. - - examples) - factor - -1 2 4 8 16 ... - 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 - 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16 - 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25 - 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30 - 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32 - 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64 - ''' - - if factor > 0 and (dimension % factor) == 0: - m = factor - n = dimension // factor - if m > n: - n, m = m, n - return m, n - if factor < 0: - factor = dimension - m, n = 1, dimension - length = m + n - while m length or new_m>factor: - break - else: - m, n = new_m, new_n - if m > n: - n, m = m, n - return m, n - -- cgit v1.2.3 From 329c8bacce706811776e1c1c6a0d39b46886a268 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sat, 4 Nov 2023 14:54:36 -0700 Subject: refactor: use same updown for both kohya OFT and LyCORIS diag-oft --- extensions-builtin/Lora/network_oft.py | 91 +++++++++++++++++++++++++++------- 1 file changed, 74 insertions(+), 17 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 2be67fe5..e4aa082b 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -2,6 +2,7 @@ import torch import network from lyco_helpers import factorization from einops import rearrange +from modules import devices class ModuleTypeOFT(network.ModuleType): @@ -24,12 +25,14 @@ class NetworkModuleOFT(network.NetworkModule): # kohya-ss if "oft_blocks" in weights.w.keys(): self.is_kohya = True - self.oft_blocks = weights.w["oft_blocks"] + self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) self.alpha = weights.w["alpha"] - self.dim = self.oft_blocks.shape[0] + self.dim = self.oft_blocks.shape[0] # lora dim + #self.oft_blocks = rearrange(self.oft_blocks, 'k m ... -> (k m) ...') elif "oft_diag" in weights.w.keys(): self.is_kohya = False - self.oft_blocks = weights.w["oft_diag"] + self.oft_blocks = weights.w["oft_diag"] # (num_blocks, block_size, block_size) + # alpha is rank if alpha is 0 or None if self.alpha is None: pass @@ -51,12 +54,57 @@ class NetworkModuleOFT(network.NetworkModule): raise ValueError("sd_module must be Linear or Conv") if self.is_kohya: - self.num_blocks = self.dim - self.block_size = self.out_dim // self.num_blocks + #self.num_blocks = self.dim + #self.block_size = self.out_dim // self.num_blocks + #self.block_size = self.dim + #self.num_blocks = self.out_dim // self.block_size self.constraint = self.alpha * self.out_dim + self.num_blocks, self.block_size = factorization(self.out_dim, self.dim) else: - self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) self.constraint = None + self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) + + if is_other_linear: + self.lin_module = self.create_module(weights.w, "oft_diag", none_ok=True) + + + def create_module(self, weights, key, none_ok=False): + weight = weights.get(key) + + if weight is None and none_ok: + return None + + is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention] + is_conv = type(self.sd_module) in [torch.nn.Conv2d] + + if is_linear: + weight = weight.reshape(weight.shape[0], -1) + module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) + elif is_conv and key == "lora_down.weight" or key == "dyn_up": + if len(weight.shape) == 2: + weight = weight.reshape(weight.shape[0], -1, 1, 1) + + if weight.shape[2] != 1 or weight.shape[3] != 1: + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) + else: + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) + elif is_conv and key == "lora_mid.weight": + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) + elif is_conv and key == "lora_up.weight" or key == "dyn_down": + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) + else: + raise AssertionError(f'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}') + + with torch.no_grad(): + if weight.shape != module.weight.shape: + weight = weight.reshape(module.weight.shape) + module.weight.copy_(weight) + + module.to(device=devices.cpu, dtype=devices.dtype) + module.weight.requires_grad_(False) + + return module + def merge_weight(self, R_weight, org_weight): R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) @@ -77,7 +125,8 @@ class NetworkModuleOFT(network.NetworkModule): else: new_norm_Q = norm_Q block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) - m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) + m_I = torch.eye(self.num_blocks, device=oft_blocks.device).unsqueeze(0).repeat(self.block_size, 1, 1) + #m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I @@ -97,25 +146,33 @@ class NetworkModuleOFT(network.NetworkModule): is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] if not is_other_linear: - if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: - orig_weight=orig_weight.permute(1, 0) + #if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: + # orig_weight=orig_weight.permute(1, 0) + + oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + + # without this line the results are significantly worse / less accurate + oft_blocks = oft_blocks - oft_blocks.transpose(1, 2) + + R = oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + R = R * multiplier + torch.eye(self.block_size, device=orig_weight.device) - R = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) merged_weight = torch.einsum( 'k n m, k n ... -> k m ...', - R * multiplier + torch.eye(self.block_size, device=orig_weight.device), + R, merged_weight ) merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') - if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: - orig_weight=orig_weight.permute(1, 0) + #if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: + # orig_weight=orig_weight.permute(1, 0) updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight output_shape = orig_weight.shape else: # FIXME: skip MultiheadAttention for now + #up = self.lin_module.weight.to(orig_weight.device, dtype=orig_weight.dtype) updown = torch.zeros([orig_weight.shape[1], orig_weight.shape[1]], device=orig_weight.device, dtype=orig_weight.dtype) output_shape = (orig_weight.shape[1], orig_weight.shape[1]) @@ -123,10 +180,10 @@ class NetworkModuleOFT(network.NetworkModule): def calc_updown(self, orig_weight): multiplier = self.multiplier() * self.calc_scale() - if self.is_kohya: - return self.calc_updown_kohya(orig_weight, multiplier) - else: - return self.calc_updown_kb(orig_weight, multiplier) + #if self.is_kohya: + # return self.calc_updown_kohya(orig_weight, multiplier) + #else: + return self.calc_updown_kb(orig_weight, multiplier) # override to remove the multiplier/scale factor; it's already multiplied in get_weight def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): -- cgit v1.2.3 From bbf00a96afb2215f13cc72a7908225ae300c423d Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Sat, 4 Nov 2023 14:56:47 -0700 Subject: refactor: remove unused function --- extensions-builtin/Lora/network_oft.py | 47 ---------------------------------- 1 file changed, 47 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index e4aa082b..93402bb2 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -2,7 +2,6 @@ import torch import network from lyco_helpers import factorization from einops import rearrange -from modules import devices class ModuleTypeOFT(network.ModuleType): @@ -54,58 +53,12 @@ class NetworkModuleOFT(network.NetworkModule): raise ValueError("sd_module must be Linear or Conv") if self.is_kohya: - #self.num_blocks = self.dim - #self.block_size = self.out_dim // self.num_blocks - #self.block_size = self.dim - #self.num_blocks = self.out_dim // self.block_size self.constraint = self.alpha * self.out_dim self.num_blocks, self.block_size = factorization(self.out_dim, self.dim) else: self.constraint = None self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) - if is_other_linear: - self.lin_module = self.create_module(weights.w, "oft_diag", none_ok=True) - - - def create_module(self, weights, key, none_ok=False): - weight = weights.get(key) - - if weight is None and none_ok: - return None - - is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention] - is_conv = type(self.sd_module) in [torch.nn.Conv2d] - - if is_linear: - weight = weight.reshape(weight.shape[0], -1) - module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) - elif is_conv and key == "lora_down.weight" or key == "dyn_up": - if len(weight.shape) == 2: - weight = weight.reshape(weight.shape[0], -1, 1, 1) - - if weight.shape[2] != 1 or weight.shape[3] != 1: - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) - else: - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) - elif is_conv and key == "lora_mid.weight": - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) - elif is_conv and key == "lora_up.weight" or key == "dyn_down": - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) - else: - raise AssertionError(f'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}') - - with torch.no_grad(): - if weight.shape != module.weight.shape: - weight = weight.reshape(module.weight.shape) - module.weight.copy_(weight) - - module.to(device=devices.cpu, dtype=devices.dtype) - module.weight.requires_grad_(False) - - return module - - def merge_weight(self, R_weight, org_weight): R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) if org_weight.dim() == 4: -- cgit v1.2.3 From 2b06cefe66684ed2648d3221efbc36aeaae99a2f Mon Sep 17 00:00:00 2001 From: gibiee <37574274+gibiee@users.noreply.github.com> Date: Sun, 5 Nov 2023 11:37:23 +0900 Subject: correct a typo modify "defaul" to "default" --- modules/cmd_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 4e602a84..a9fb9bfa 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -107,7 +107,7 @@ parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, req parser.add_argument("--disable-tls-verify", action="store_false", help="When passed, enables the use of self-signed certificates.", default=None) parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None) parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True) -parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions") +parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the default in earlier versions") parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) -- cgit v1.2.3 From 6b8c661c49796bba093ca8a8301e81d28afb9832 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 5 Nov 2023 08:55:54 +0300 Subject: add a visible checkbox to input accordion --- javascript/inputAccordion.js | 79 ++++++++++++++++++++++++++++++-------------- style.css | 5 +++ 2 files changed, 59 insertions(+), 25 deletions(-) diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js index f2839852..8fc01230 100644 --- a/javascript/inputAccordion.js +++ b/javascript/inputAccordion.js @@ -1,37 +1,66 @@ -var observerAccordionOpen = new MutationObserver(function(mutations) { - mutations.forEach(function(mutationRecord) { - var elem = mutationRecord.target; - var open = elem.classList.contains('open'); +function inputAccordionChecked(id, checked) { + var accordion = gradioApp().getElementById(id); + accordion.visibleCheckbox.checked = checked; + accordion.onVisibleCheckboxChange(); +} - var accordion = elem.parentNode; - accordion.classList.toggle('input-accordion-open', open); +function setupAccordion(accordion){ + var labelWrap = accordion.querySelector('.label-wrap'); + var gradioCheckbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); + var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + var span = labelWrap.querySelector('span'); + var linked = true; - var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); - checkbox.checked = open; - updateInput(checkbox); + var isOpen = function(){ return labelWrap.classList.contains('open'); } - var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); - if (extra) { - extra.style.display = open ? "" : "none"; - } + var observerAccordionOpen = new MutationObserver(function(mutations) { + mutations.forEach(function(mutationRecord) { + accordion.classList.toggle('input-accordion-open', isOpen()); + + if(linked){ + accordion.visibleCheckbox.checked = isOpen(); + accordion.onVisibleCheckboxChange(); + } + }); }); -}); + observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); -function inputAccordionChecked(id, checked) { - var label = gradioApp().querySelector('#' + id + " .label-wrap"); - if (label.classList.contains('open') != checked) { - label.click(); + if (extra) { + labelWrap.insertBefore(extra, labelWrap.lastElementChild); + } + + accordion.onChecked = function(checked){ + if (isOpen() != checked) { + labelWrap.click(); + } } + + var visibleCheckbox = document.createElement('INPUT'); + visibleCheckbox.type = 'checkbox'; + visibleCheckbox.checked = isOpen(); + visibleCheckbox.id = accordion.id + "-visible-checkbox"; + visibleCheckbox.className = gradioCheckbox.className + " input-accordion-checkbox"; + span.insertBefore(visibleCheckbox, span.firstChild); + + accordion.visibleCheckbox = visibleCheckbox; + accordion.onVisibleCheckboxChange = function(){ + if(linked && isOpen() != visibleCheckbox.checked) { + labelWrap.click(); + } + + gradioCheckbox.checked = visibleCheckbox.checked; + updateInput(gradioCheckbox); + }; + + visibleCheckbox.addEventListener('click', function(event){ + linked = false; + event.stopPropagation(); + }); + visibleCheckbox.addEventListener('input', accordion.onVisibleCheckboxChange); } onUiLoaded(function() { for (var accordion of gradioApp().querySelectorAll('.input-accordion')) { - var labelWrap = accordion.querySelector('.label-wrap'); - observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); - - var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); - if (extra) { - labelWrap.insertBefore(extra, labelWrap.lastElementChild); - } + setupAccordion(accordion); } }); diff --git a/style.css b/style.css index 115626cd..9a1181e8 100644 --- a/style.css +++ b/style.css @@ -204,6 +204,11 @@ div.block.gradio-accordion { padding: 8px 8px; } +input[type="checkbox"].input-accordion-checkbox{ + vertical-align: sub; + margin-right: 0.5em; +} + /* txt2img/img2img specific */ -- cgit v1.2.3 From 16ab17429016a1154b9aa83244cdbfc7ba463d72 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 5 Nov 2023 09:20:05 +0300 Subject: eslint --- javascript/inputAccordion.js | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js index 8fc01230..7570309a 100644 --- a/javascript/inputAccordion.js +++ b/javascript/inputAccordion.js @@ -4,20 +4,22 @@ function inputAccordionChecked(id, checked) { accordion.onVisibleCheckboxChange(); } -function setupAccordion(accordion){ +function setupAccordion(accordion) { var labelWrap = accordion.querySelector('.label-wrap'); var gradioCheckbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); var span = labelWrap.querySelector('span'); var linked = true; - var isOpen = function(){ return labelWrap.classList.contains('open'); } + var isOpen = function() { + return labelWrap.classList.contains('open'); + }; var observerAccordionOpen = new MutationObserver(function(mutations) { mutations.forEach(function(mutationRecord) { accordion.classList.toggle('input-accordion-open', isOpen()); - if(linked){ + if (linked) { accordion.visibleCheckbox.checked = isOpen(); accordion.onVisibleCheckboxChange(); } @@ -29,22 +31,22 @@ function setupAccordion(accordion){ labelWrap.insertBefore(extra, labelWrap.lastElementChild); } - accordion.onChecked = function(checked){ + accordion.onChecked = function(checked) { if (isOpen() != checked) { labelWrap.click(); } - } + }; var visibleCheckbox = document.createElement('INPUT'); visibleCheckbox.type = 'checkbox'; visibleCheckbox.checked = isOpen(); - visibleCheckbox.id = accordion.id + "-visible-checkbox"; + visibleCheckbox.id = accordion.id + "-visible-checkbox"; visibleCheckbox.className = gradioCheckbox.className + " input-accordion-checkbox"; span.insertBefore(visibleCheckbox, span.firstChild); accordion.visibleCheckbox = visibleCheckbox; - accordion.onVisibleCheckboxChange = function(){ - if(linked && isOpen() != visibleCheckbox.checked) { + accordion.onVisibleCheckboxChange = function() { + if (linked && isOpen() != visibleCheckbox.checked) { labelWrap.click(); } @@ -52,7 +54,7 @@ function setupAccordion(accordion){ updateInput(gradioCheckbox); }; - visibleCheckbox.addEventListener('click', function(event){ + visibleCheckbox.addEventListener('click', function(event) { linked = false; event.stopPropagation(); }); -- cgit v1.2.3 From d9499f4301018ebd2977685d098381aa4111d2ae Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 5 Nov 2023 10:12:50 +0300 Subject: properly apply sort order for extra network cards when selected from dropdown allow selection of default sort order in settings remove 'Default' sort order, replace with 'Name' --- javascript/extraNetworks.js | 27 ++++++++++++++++----------- modules/shared_options.py | 2 ++ modules/ui_extra_networks.py | 6 ++++-- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index ac26718f..a4d1d9d9 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -27,7 +27,6 @@ function setupExtraNetworksForTab(tabname) { var showDirsDiv = gradioApp().getElementById(tabname + '_extra_show_dirs'); var showDirs = gradioApp().querySelector('#' + tabname + '_extra_show_dirs input'); - sort.dataset.sortkey = 'sortDefault'; tabs.appendChild(searchDiv); tabs.appendChild(sort); tabs.appendChild(sortOrder); @@ -49,20 +48,23 @@ function setupExtraNetworksForTab(tabname) { elem.style.display = visible ? "" : "none"; }); + + applySort(); }; var applySort = function() { + var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card'); + var reverse = sortOrder.classList.contains("sortReverse"); - var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim(); - sortKey = sortKey ? "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1) : ""; - var sortKeyStore = sortKey ? sortKey + (reverse ? "Reverse" : "") : ""; - if (!sortKey || sortKeyStore == sort.dataset.sortkey) { + var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim() || "name"; + sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1); + var sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length; + + if (sortKeyStore == sort.dataset.sortkey) { return; } - sort.dataset.sortkey = sortKeyStore; - var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card'); cards.forEach(function(card) { card.originalParentElement = card.parentElement; }); @@ -88,15 +90,13 @@ function setupExtraNetworksForTab(tabname) { }; search.addEventListener("input", applyFilter); - applyFilter(); - ["change", "blur", "click"].forEach(function(evt) { - sort.querySelector("input").addEventListener(evt, applySort); - }); sortOrder.addEventListener("click", function() { sortOrder.classList.toggle("sortReverse"); applySort(); }); + applyFilter(); + extraNetworksApplySort[tabname] = applySort; extraNetworksApplyFilter[tabname] = applyFilter; var showDirsUpdate = function() { @@ -113,7 +113,12 @@ function applyExtraNetworkFilter(tabname) { setTimeout(extraNetworksApplyFilter[tabname], 1); } +function applyExtraNetworkSort(tabname) { + setTimeout(extraNetworksApplySort[tabname], 1); +} + var extraNetworksApplyFilter = {}; +var extraNetworksApplySort = {}; var activePromptTextarea = {}; function setupExtraNetworks() { diff --git a/modules/shared_options.py b/modules/shared_options.py index 0a82216f..6543e440 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -234,6 +234,8 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), + "extra_networks_card_order_field": OptionInfo("Name", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), + "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 59d6ecc6..fc729917 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -381,8 +381,8 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): related_tabs.append(tab) edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True) - dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") - button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False, tooltip="Invert sort order") + dropdown_sort = gr.Dropdown(choices=['Name', 'Date Created', 'Date Modified', ], value=shared.opts.extra_networks_card_order_field, elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") + button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes=["sortorder"] + ([] if shared.opts.extra_networks_card_order == "Ascending" else ["sortReverse"]), visible=False, tooltip="Invert sort order") button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False) checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False) @@ -395,6 +395,8 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): for tab in related_tabs: tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False) + dropdown_sort.change(fn=lambda: None, _js="function(){ applyExtraNetworkSort('" + tabname + "'); }") + def pages_html(): if not ui.pages_contents: return refresh() -- cgit v1.2.3 From ff1609f91ea0e9a90ba7b6ecc6d794c39c1f8c8f Mon Sep 17 00:00:00 2001 From: Ritesh Gangnani Date: Sun, 5 Nov 2023 19:13:49 +0530 Subject: Add SSD-1B as a supported model --- modules/sd_hijack.py | 11 +++++++++++ modules/sd_models.py | 8 ++++++-- modules/sd_models_types.py | 5 ++++- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 592f0055..d19f853e 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -180,6 +180,17 @@ class StableDiffusionModelHijack: except Exception as e: errors.display(e, "applying cross attention optimization") undo_optimizations() + + def conv_ssd(self, m): + delattr(m.model.diffusion_model.middle_block, '1') + delattr(m.model.diffusion_model.middle_block, '2') + for i in ['9','8','7','6','5','4']: + delattr(m.model.diffusion_model.input_blocks[7][1].transformer_blocks,i) + delattr(m.model.diffusion_model.input_blocks[8][1].transformer_blocks,i) + delattr(m.model.diffusion_model.output_blocks[0][1].transformer_blocks,i) + delattr(m.model.diffusion_model.output_blocks[1][1].transformer_blocks,i) + delattr(m.model.diffusion_model.output_blocks[4][1].transformer_blocks,'1') + delattr(m.model.diffusion_model.output_blocks[5][1].transformer_blocks,'1') def hijack(self, m): conditioner = getattr(m, 'conditioner', None) diff --git a/modules/sd_models.py b/modules/sd_models.py index 930d0bee..ef96d29d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -346,10 +346,14 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer model.is_sdxl = hasattr(model, 'conditioner') model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model') model.is_sd1 = not model.is_sdxl and not model.is_sd2 - + model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys() + if model.is_sdxl: sd_models_xl.extend_sdxl(model) - + + if model.is_ssd: + sd_hijack.model_hijack.conv_ssd(model) + model.load_state_dict(state_dict, strict=False) timer.record("apply weights to model") diff --git a/modules/sd_models_types.py b/modules/sd_models_types.py index 5ffd2f4f..1f28942a 100644 --- a/modules/sd_models_types.py +++ b/modules/sd_models_types.py @@ -22,7 +22,10 @@ class WebuiSdModel(LatentDiffusion): """structure with additional information about the file with model's weights""" is_sdxl: bool - """True if the model's architecture is SDXL""" + """True if the model's architecture is SDXL or SSD""" + + is_ssd: bool + """True if the model is SSD""" is_sd2: bool """True if the model's architecture is SD 2.x""" -- cgit v1.2.3 From 44db35fb1ad5d07837e890a0fd3c00addfb0402c Mon Sep 17 00:00:00 2001 From: Ritesh Gangnani Date: Sun, 5 Nov 2023 19:15:38 +0530 Subject: Added memory clearance after deletion --- modules/sd_hijack.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d19f853e..059ffe8f 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -1,3 +1,5 @@ +import gc + import torch from torch.nn.functional import silu from types import MethodType @@ -190,7 +192,9 @@ class StableDiffusionModelHijack: delattr(m.model.diffusion_model.output_blocks[0][1].transformer_blocks,i) delattr(m.model.diffusion_model.output_blocks[1][1].transformer_blocks,i) delattr(m.model.diffusion_model.output_blocks[4][1].transformer_blocks,'1') - delattr(m.model.diffusion_model.output_blocks[5][1].transformer_blocks,'1') + delattr(m.model.diffusion_model.output_blocks[5][1].transformer_blocks,'1') + torch.cuda.empty_cache() + gc.collect() def hijack(self, m): conditioner = getattr(m, 'conditioner', None) -- cgit v1.2.3 From 44c5097375ae4cf40300c09473bb46cf6c5d6cb7 Mon Sep 17 00:00:00 2001 From: Ritesh Gangnani Date: Sun, 5 Nov 2023 20:31:57 +0530 Subject: Use devices.torch_gc() instead of empty_cache() --- modules/sd_hijack.py | 5 +---- modules/sd_models.py | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 059ffe8f..0a7e5135 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -1,5 +1,3 @@ -import gc - import torch from torch.nn.functional import silu from types import MethodType @@ -193,8 +191,7 @@ class StableDiffusionModelHijack: delattr(m.model.diffusion_model.output_blocks[1][1].transformer_blocks,i) delattr(m.model.diffusion_model.output_blocks[4][1].transformer_blocks,'1') delattr(m.model.diffusion_model.output_blocks[5][1].transformer_blocks,'1') - torch.cuda.empty_cache() - gc.collect() + devices.torch_gc() def hijack(self, m): conditioner = getattr(m, 'conditioner', None) diff --git a/modules/sd_models.py b/modules/sd_models.py index ef96d29d..2242c363 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -347,7 +347,6 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model') model.is_sd1 = not model.is_sdxl and not model.is_sd2 model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys() - if model.is_sdxl: sd_models_xl.extend_sdxl(model) -- cgit v1.2.3 From 4d4a9e733219f8c065a4ab6c5ab42836db7330fe Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 5 Nov 2023 19:19:55 +0300 Subject: added compact prompt option --- extensions-builtin/mobile/javascript/mobile.js | 2 + javascript/extraNetworks.js | 33 ++++ modules/shared_items.py | 2 + modules/shared_options.py | 1 + modules/ui.py | 247 +++++++++---------------- modules/ui_common.py | 15 +- modules/ui_extra_networks.py | 16 +- modules/ui_extra_networks_checkpoints.py | 2 + modules/ui_toprow.py | 141 ++++++++++++++ style.css | 23 ++- 10 files changed, 314 insertions(+), 168 deletions(-) create mode 100644 modules/ui_toprow.py diff --git a/extensions-builtin/mobile/javascript/mobile.js b/extensions-builtin/mobile/javascript/mobile.js index 652f07ac..bff1aced 100644 --- a/extensions-builtin/mobile/javascript/mobile.js +++ b/extensions-builtin/mobile/javascript/mobile.js @@ -12,6 +12,8 @@ function isMobile() { } function reportWindowSize() { + if (gradioApp().querySelector('.toprow-compact-tools')) return; // not applicable for compact prompt layout + var currentlyMobile = isMobile(); if (currentlyMobile == isSetupForMobile) return; isSetupForMobile = currentlyMobile; diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index a4d1d9d9..a1bf29a8 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -26,6 +26,8 @@ function setupExtraNetworksForTab(tabname) { var refresh = gradioApp().getElementById(tabname + '_extra_refresh'); var showDirsDiv = gradioApp().getElementById(tabname + '_extra_show_dirs'); var showDirs = gradioApp().querySelector('#' + tabname + '_extra_show_dirs input'); + var promptContainer = gradioApp().querySelector('.prompt-container-compact#' + tabname + '_prompt_container'); + var negativePrompt = gradioApp().querySelector('#' + tabname + '_neg_prompt'); tabs.appendChild(searchDiv); tabs.appendChild(sort); @@ -109,6 +111,37 @@ function setupExtraNetworksForTab(tabname) { showDirsUpdate(); } +function extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt) { + if (!gradioApp().querySelector('.toprow-compact-tools')) return; // only applicable for compact prompt layout + + var promptContainer = gradioApp().getElementById(tabname + '_prompt_container'); + var prompt = gradioApp().getElementById(tabname + '_prompt_row'); + var negPrompt = gradioApp().getElementById(tabname + '_neg_prompt_row'); + var elem = id ? gradioApp().getElementById(id) : null; + + if (showNegativePrompt && elem) { + elem.insertBefore(negPrompt, elem.firstChild); + } else { + promptContainer.insertBefore(negPrompt, promptContainer.firstChild); + } + + if (showPrompt && elem) { + elem.insertBefore(prompt, elem.firstChild); + } else { + promptContainer.insertBefore(prompt, promptContainer.firstChild); + } +} + + +function extraNetworksUrelatedTabSelected(tabname) { // called from python when user selects an unrelated tab (generate) + extraNetworksMovePromptToTab(tabname, '', false, false); +} + +function extraNetworksTabSelected(tabname, id, showPrompt, showNegativePrompt) { // called from python when user selects an extra networks tab + extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt); + +} + function applyExtraNetworkFilter(tabname) { setTimeout(extraNetworksApplyFilter[tabname], 1); } diff --git a/modules/shared_items.py b/modules/shared_items.py index b1459f8c..5024b426 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -67,6 +67,8 @@ def reload_hypernetworks(): ui_reorder_categories_builtin_items = [ + "prompt", + "image", "inpaint", "sampler", "accordions", diff --git a/modules/shared_options.py b/modules/shared_options.py index 6543e440..4e3d7541 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -272,6 +272,7 @@ options_templates.update(options_section(('ui', "User interface"), { "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), + "compact_prompt_box": OptionInfo(True, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(), })) diff --git a/modules/ui.py b/modules/ui.py index bcf39199..2454eb36 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,7 +12,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, scripts, sd_samplers, processing, ui_extra_networks, ui_toprow from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -25,7 +25,6 @@ import modules.hypernetworks.ui as hypernetworks_ui import modules.textual_inversion.ui as textual_inversion_ui import modules.textual_inversion.textual_inversion as textual_inversion import modules.shared as shared -import modules.images from modules import prompt_parser from modules.sd_hijack import model_hijack from modules.generation_parameters_copypaste import image_from_url_text @@ -177,79 +176,6 @@ def update_negative_prompt_token_counter(text, steps): return update_token_counter(text, steps, is_positive=False) -class Toprow: - """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation""" - - def __init__(self, is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - self.id_part = id_part - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - self.prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - self.prompt_img = gr.File(label="", elem_id=f"{id_part}_prompt_image", file_count="single", type="binary", visible=False) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - self.button_interrogate = None - self.button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - self.button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - self.button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - self.interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - self.skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - self.submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - self.skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - self.interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.") - self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt", tooltip="Clear prompt") - self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{id_part}_style_apply", tooltip="Apply all selected styles to prompts.") - self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False, tooltip="Restore progress") - - self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - self.token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - self.negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - self.clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[self.prompt, self.negative_prompt], - outputs=[self.prompt, self.negative_prompt], - ) - - self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt) - self.ui_styles.setup_apply_button(self.apply_styles) - - self.prompt_img.change( - fn=modules.images.image_data, - inputs=[self.prompt_img], - outputs=[self.prompt, self.prompt_img], - show_progress=False, - ) - def setup_progressbar(*args, **kwargs): pass @@ -288,8 +214,8 @@ def apply_setting(key, value): return getattr(opts, key) -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) +def create_output_panel(tabname, outdir, toprow=None): + return ui_common.create_output_panel(tabname, outdir, toprow) def create_sampler_and_steps_selection(choices, tabname): @@ -336,7 +262,7 @@ def create_ui(): scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - toprow = Toprow(is_img2img=False) + toprow = ui_toprow.Toprow(is_img2img=False, is_compact=shared.opts.compact_prompt_box) dummy_component = gr.Label(visible=False) @@ -348,6 +274,9 @@ def create_ui(): scripts.scripts_txt2img.prepare_ui() for category in ordered_ui_categories(): + if category == "prompt": + toprow.create_inline_toprow_prompts() + if category == "sampler": steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") @@ -442,7 +371,7 @@ def create_ui(): show_progress=False, ) - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples, toprow) txt2img_args = dict( fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), @@ -554,7 +483,7 @@ def create_ui(): scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - toprow = Toprow(is_img2img=True) + toprow = ui_toprow.Toprow(is_img2img=True, is_compact=shared.opts.compact_prompt_box) extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs") extra_tabs.__enter__() @@ -577,85 +506,89 @@ def create_ui(): button = gr.Button(title) copy_image_buttons.append((button, name, elem)) - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - scripts.scripts_img2img.prepare_ui() for category in ordered_ui_categories(): + if category == "prompt": + toprow.create_inline_toprow_prompts() + + if category == "image": + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) + + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) + add_copy_image_controls('sketch', sketch) + + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) + add_copy_image_controls('inpaint', init_img_with_mask) + + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state + + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") + + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], + ) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + if category == "sampler": steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") @@ -769,7 +702,7 @@ def create_ui(): if category not in {"accordions"}: scripts.scripts_img2img.setup_ui_for_section(category) - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples, toprow) img2img_args = dict( fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), diff --git a/modules/ui_common.py b/modules/ui_common.py index 84a7d7f2..032ec4af 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -104,7 +104,7 @@ def save_files(js_data, images, do_make_zip, index): return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}") -def create_output_panel(tabname, outdir): +def create_output_panel(tabname, outdir, toprow=None): def open_folder(f): if not os.path.exists(f): @@ -130,12 +130,15 @@ Requested path was: {f} else: sp.Popen(["xdg-open", path]) - with gr.Column(variant='panel', elem_id=f"{tabname}_results"): - with gr.Group(elem_id=f"{tabname}_gallery_container"): - result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None) + with gr.Column(elem_id=f"{tabname}_results"): + if toprow: + toprow.create_inline_toprow_image() - generation_info = None - with gr.Column(): + with gr.Column(variant='panel', elem_id=f"{tabname}_results_panel"): + with gr.Group(elem_id=f"{tabname}_gallery_container"): + result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None) + + generation_info = None with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"): open_folder_button = ToolButton(folder_symbol, elem_id=f'{tabname}_open_folder', visible=not shared.cmd_opts.hide_ui_dir_config, tooltip="Open images output directory.") diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index fc729917..7907cd63 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -103,6 +103,7 @@ class ExtraNetworksPage: self.name = title.lower() self.id_page = self.name.replace(" ", "_") self.card_page = shared.html("extra-networks-card.html") + self.allow_prompt = True self.allow_negative_prompt = False self.metadata = {} self.items = {} @@ -367,7 +368,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): related_tabs = [] for page in ui.stored_extra_pages: - with gr.Tab(page.title, id=page.id_page) as tab: + with gr.Tab(page.title, elem_id=f"{tabname}_{page.id_page}", elem_classes=["extra-page"]) as tab: elem_id = f"{tabname}_{page.id_page}_cards_html" page_elem = gr.HTML('Loading...', elem_id=elem_id) ui.pages.append(page_elem) @@ -389,11 +390,18 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False) ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False) + tab_controls = [edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs] + for tab in unrelated_tabs: - tab.select(fn=lambda: [gr.update(visible=False) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False) + tab.select(fn=lambda: [gr.update(visible=False) for _ in tab_controls], _js='function(){ extraNetworksUrelatedTabSelected("' + tabname + '"); }', inputs=[], outputs=tab_controls, show_progress=False) + + for page, tab in zip(ui.stored_extra_pages, related_tabs): + allow_prompt = "true" if page.allow_prompt else "false" + allow_negative_prompt = "true" if page.allow_negative_prompt else "false" + + jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');' - for tab in related_tabs: - tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False) + tab.select(fn=lambda: [gr.update(visible=True) for _ in tab_controls], _js='function(){ ' + jscode + ' }', inputs=[], outputs=tab_controls, show_progress=False) dropdown_sort.change(fn=lambda: None, _js="function(){ applyExtraNetworkSort('" + tabname + "'); }") diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index ca6c2607..2fc0ed43 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -10,6 +10,8 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def __init__(self): super().__init__('Checkpoints') + self.allow_prompt = False + def refresh(self): shared.refresh_checkpoints() diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py new file mode 100644 index 00000000..985b5a2d --- /dev/null +++ b/modules/ui_toprow.py @@ -0,0 +1,141 @@ +import gradio as gr + +from modules import shared, ui_prompt_styles +import modules.images + +from modules.ui_components import ToolButton + + +class Toprow: + """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation""" + + prompt = None + prompt_img = None + negative_prompt = None + + button_interrogate = None + button_deepbooru = None + + interrupt = None + skip = None + submit = None + + paste = None + clear_prompt_button = None + apply_styles = None + restore_progress_button = None + + token_counter = None + token_button = None + negative_token_counter = None + negative_token_button = None + + ui_styles = None + + submit_box = None + + def __init__(self, is_img2img, is_compact=False): + id_part = "img2img" if is_img2img else "txt2img" + self.id_part = id_part + self.is_img2img = is_img2img + self.is_compact = is_compact + + if not is_compact: + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + self.create_classic_toprow() + else: + self.create_submit_box() + + def create_classic_toprow(self): + self.create_prompts() + + with gr.Column(scale=1, elem_id=f"{self.id_part}_actions_column"): + self.create_submit_box() + + self.create_tools_row() + + self.create_styles_ui() + + def create_inline_toprow_prompts(self): + if not self.is_compact: + return + + self.create_prompts() + + with gr.Row(elem_classes=["toprow-compact-stylerow"]): + with gr.Column(elem_classes=["toprow-compact-tools"]): + self.create_tools_row() + with gr.Column(): + self.create_styles_ui() + + def create_inline_toprow_image(self): + if not self.is_compact: + return + + self.submit_box.render() + + def create_prompts(self): + with gr.Column(elem_id=f"{self.id_part}_prompt_container", elem_classes=["prompt-container-compact"] if self.is_compact else [], scale=6): + with gr.Row(elem_id=f"{self.id_part}_prompt_row", elem_classes=["prompt-row"]): + self.prompt = gr.Textbox(label="Prompt", elem_id=f"{self.id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + self.prompt_img = gr.File(label="", elem_id=f"{self.id_part}_prompt_image", file_count="single", type="binary", visible=False) + + with gr.Row(elem_id=f"{self.id_part}_neg_prompt_row", elem_classes=["prompt-row"]): + self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{self.id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + self.prompt_img.change( + fn=modules.images.image_data, + inputs=[self.prompt_img], + outputs=[self.prompt, self.prompt_img], + show_progress=False, + ) + + def create_submit_box(self): + with gr.Row(elem_id=f"{self.id_part}_generate_box", elem_classes=["generate-box"] + (["generate-box-compact"] if self.is_compact else []), render=not self.is_compact) as submit_box: + self.submit_box = submit_box + + self.interrupt = gr.Button('Interrupt', elem_id=f"{self.id_part}_interrupt", elem_classes="generate-box-interrupt") + self.skip = gr.Button('Skip', elem_id=f"{self.id_part}_skip", elem_classes="generate-box-skip") + self.submit = gr.Button('Generate', elem_id=f"{self.id_part}_generate", variant='primary') + + self.skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) + + self.interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + def create_tools_row(self): + with gr.Row(elem_id=f"{self.id_part}_tools"): + from modules.ui import paste_symbol, clear_prompt_symbol, restore_progress_symbol + + self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.") + self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{self.id_part}_clear_prompt", tooltip="Clear prompt") + self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{self.id_part}_style_apply", tooltip="Apply all selected styles to prompts.") + + if self.is_img2img: + self.button_interrogate = ToolButton('📎', tooltip='Interrogate CLIP - use CLIP neural network to create a text describing the image, and put it into the prompt field', elem_id="interrogate") + self.button_deepbooru = ToolButton('📦', tooltip='Interrogate DeepBooru - use DeepBooru neural network to create a text describing the image, and put it into the prompt field', elem_id="deepbooru") + + self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{self.id_part}_restore_progress", visible=False, tooltip="Restore progress") + + self.token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_token_counter", elem_classes=["token-counter"]) + self.token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_token_button") + self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_negative_token_counter", elem_classes=["token-counter"]) + self.negative_token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_negative_token_button") + + self.clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[self.prompt, self.negative_prompt], + outputs=[self.prompt, self.negative_prompt], + ) + + def create_styles_ui(self): + self.ui_styles = ui_prompt_styles.UiPromptStyles(self.id_part, self.prompt, self.negative_prompt) + self.ui_styles.setup_apply_button(self.apply_styles) diff --git a/style.css b/style.css index 9a1181e8..73162022 100644 --- a/style.css +++ b/style.css @@ -296,6 +296,13 @@ input[type="checkbox"].input-accordion-checkbox{ min-height: 4.5em; } +#txt2img_generate, #img2img_generate { + min-height: 4.5em; +} +.generate-box-compact #txt2img_generate, .generate-box-compact #img2img_generate { + min-height: 3em; +} + @media screen and (min-width: 2500px) { #txt2img_gallery, #img2img_gallery { min-height: 768px; @@ -403,6 +410,15 @@ div#extras_scale_to_tab div.form{ min-width: 0.5em; } +div.toprow-compact-stylerow{ + margin: 0.5em 0; +} + +div.toprow-compact-tools{ + min-width: fit-content !important; + max-width: fit-content; +} + /* settings */ #quicksettings { align-items: end; @@ -525,7 +541,8 @@ table.popup-table .link{ height: 20px; background: #b4c0cc; border-radius: 3px !important; - top: -20px; + top: -14px; + left: 0px; width: 100%; } @@ -823,6 +840,10 @@ footer { /* extra networks UI */ +.extra-page .prompt{ + margin: 0 0 0.5em 0; +} + .extra-network-cards{ height: calc(100vh - 24rem); overflow: clip scroll; -- cgit v1.2.3 From c3699d4fd185d5a7285c5519f9bb4b6fec236d9f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 5 Nov 2023 19:23:48 +0300 Subject: compact prompt option disabled by default --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 4e3d7541..a9964fcb 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -272,7 +272,7 @@ options_templates.update(options_section(('ui', "User interface"), { "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), - "compact_prompt_box": OptionInfo(True, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(), + "compact_prompt_box": OptionInfo(False, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(), })) -- cgit v1.2.3 From 80d639a440929e9effe4620ce74333de283e7efc Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 5 Nov 2023 19:32:21 +0300 Subject: linter --- modules/sd_hijack.py | 2 +- modules/sd_models.py | 4 ++-- modules/sd_models_types.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 4fff418d..c6d17764 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -183,7 +183,7 @@ class StableDiffusionModelHijack: except Exception as e: errors.display(e, "applying cross attention optimization") undo_optimizations() - + def conv_ssd(self, m): delattr(m.model.diffusion_model.middle_block, '1') delattr(m.model.diffusion_model.middle_block, '2') diff --git a/modules/sd_models.py b/modules/sd_models.py index d76dc580..1036a3b1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -355,10 +355,10 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys() if model.is_sdxl: sd_models_xl.extend_sdxl(model) - + if model.is_ssd: sd_hijack.model_hijack.conv_ssd(model) - + if shared.opts.sd_checkpoint_cache > 0: # cache newly loaded model checkpoints_loaded[checkpoint_info] = state_dict.copy() diff --git a/modules/sd_models_types.py b/modules/sd_models_types.py index 1f28942a..f911fbb6 100644 --- a/modules/sd_models_types.py +++ b/modules/sd_models_types.py @@ -23,7 +23,7 @@ class WebuiSdModel(LatentDiffusion): is_sdxl: bool """True if the model's architecture is SDXL or SSD""" - + is_ssd: bool """True if the model is SSD""" -- cgit v1.2.3 From 6ad666e4794a57dd65790dd6a259d5d4330d45ed Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 5 Nov 2023 19:46:20 +0300 Subject: more changes for #13865: fix formatting, rename the function, add comment and add a readme entry --- README.md | 1 + modules/sd_hijack.py | 24 +++++++++++++----------- modules/sd_models.py | 2 +- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index c7a4e363..25ba070e 100644 --- a/README.md +++ b/README.md @@ -91,6 +91,7 @@ A browser interface based on Gradio library for Stable Diffusion. - Eased resolution restriction: generated image's dimensions must be a multiple of 8 rather than 64 - Now with a license! - Reorder elements in the UI from settings screen +- [Segmind Stable Diffusion](https://huggingface.co/segmind/SSD-1B) support ## Installation and Running Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for: diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index c6d17764..fba23c38 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -184,17 +184,19 @@ class StableDiffusionModelHijack: errors.display(e, "applying cross attention optimization") undo_optimizations() - def conv_ssd(self, m): - delattr(m.model.diffusion_model.middle_block, '1') - delattr(m.model.diffusion_model.middle_block, '2') - for i in ['9','8','7','6','5','4']: - delattr(m.model.diffusion_model.input_blocks[7][1].transformer_blocks,i) - delattr(m.model.diffusion_model.input_blocks[8][1].transformer_blocks,i) - delattr(m.model.diffusion_model.output_blocks[0][1].transformer_blocks,i) - delattr(m.model.diffusion_model.output_blocks[1][1].transformer_blocks,i) - delattr(m.model.diffusion_model.output_blocks[4][1].transformer_blocks,'1') - delattr(m.model.diffusion_model.output_blocks[5][1].transformer_blocks,'1') - devices.torch_gc() + def convert_sdxl_to_ssd(self, m): + """Converts an SDXL model to a Segmind Stable Diffusion model (see https://huggingface.co/segmind/SSD-1B)""" + + delattr(m.model.diffusion_model.middle_block, '1') + delattr(m.model.diffusion_model.middle_block, '2') + for i in ['9', '8', '7', '6', '5', '4']: + delattr(m.model.diffusion_model.input_blocks[7][1].transformer_blocks, i) + delattr(m.model.diffusion_model.input_blocks[8][1].transformer_blocks, i) + delattr(m.model.diffusion_model.output_blocks[0][1].transformer_blocks, i) + delattr(m.model.diffusion_model.output_blocks[1][1].transformer_blocks, i) + delattr(m.model.diffusion_model.output_blocks[4][1].transformer_blocks, '1') + delattr(m.model.diffusion_model.output_blocks[5][1].transformer_blocks, '1') + devices.torch_gc() def hijack(self, m): conditioner = getattr(m, 'conditioner', None) diff --git a/modules/sd_models.py b/modules/sd_models.py index 1036a3b1..841402e8 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -357,7 +357,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer sd_models_xl.extend_sdxl(model) if model.is_ssd: - sd_hijack.model_hijack.conv_ssd(model) + sd_hijack.model_hijack.convert_sdxl_to_ssd(model) if shared.opts.sd_checkpoint_cache > 0: # cache newly loaded model -- cgit v1.2.3 From 656437e0a50212778746c67785d23b0ea14a8837 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 6 Nov 2023 10:32:21 +0300 Subject: fix img2img_tabs error --- modules/ui.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 2454eb36..accdb457 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -689,19 +689,19 @@ def create_ui(): with gr.Column(scale=4): inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - if category not in {"accordions"}: scripts.scripts_img2img.setup_ui_for_section(category) + def select_img2img_tab(tab): + return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), + + for i, elem in enumerate(img2img_tabs): + elem.select( + fn=lambda tab=i: select_img2img_tab(tab), + inputs=[], + outputs=[inpaint_controls, mask_alpha], + ) + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples, toprow) img2img_args = dict( -- cgit v1.2.3 From 9c1c0da026cb7ef091a0f3fa24b14ae8634f6de5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 6 Nov 2023 11:17:36 +0300 Subject: fix exception related to the pix2pix --- modules/sd_hijack.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index fba23c38..0157e19f 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -256,8 +256,12 @@ class StableDiffusionModelHijack: self.layers = flatten(m) + import modules.models.diffusion.ddpm_edit + if isinstance(m, ldm.models.diffusion.ddpm.LatentDiffusion): sd_unet.original_forward = ldm_original_forward + elif isinstance(m, modules.models.diffusion.ddpm_edit.LatentDiffusion): + sd_unet.original_forward = ldm_original_forward elif isinstance(m, sgm.models.diffusion.DiffusionEngine): sd_unet.original_forward = sgm_original_forward else: -- cgit v1.2.3 From 9ba991cad8a15a99f71f5b2ec5feff7dd9d270d7 Mon Sep 17 00:00:00 2001 From: GerryDE Date: Tue, 7 Nov 2023 03:09:08 +0100 Subject: Add option to set notification sound volume --- javascript/notification.js | 6 +++++- modules/shared_options.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/javascript/notification.js b/javascript/notification.js index 6d799561..3ee972ae 100644 --- a/javascript/notification.js +++ b/javascript/notification.js @@ -26,7 +26,11 @@ onAfterUiUpdate(function() { lastHeadImg = headImg; // play notification sound if available - gradioApp().querySelector('#audio_notification audio')?.play(); + const notificationAudio = gradioApp().querySelector('#audio_notification audio'); + if (notificationAudio) { + notificationAudio.volume = opts.notification_volume / 100.0 || 1.0; + notificationAudio.play(); + } if (document.hasFocus()) return; diff --git a/modules/shared_options.py b/modules/shared_options.py index a9964fcb..d40db530 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -64,6 +64,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), "notification_audio": OptionInfo(True, "Play notification sound after image generation").info("notification.mp3 should be present in the root directory").needs_reload_ui(), + "notification_volume": OptionInfo(100, "Notification sound volume", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}).info("in %"), })) options_templates.update(options_section(('saving-paths', "Paths for saving"), { -- cgit v1.2.3 From 5e80d9ee99c5899e5e2b130408ffb65a0585a62a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 7 Nov 2023 11:33:16 +0300 Subject: fix pix2pix producing bad results --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 70ad1ebe..b0e240a4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -296,7 +296,7 @@ class StableDiffusionProcessing: return conditioning def edit_image_conditioning(self, source_image): - conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) + conditioning_image = shared.sd_model.encode_first_stage(source_image).mode() return conditioning_image -- cgit v1.2.3 From a625a7bb817cbf6a97d2030dc3a8015a046bd388 Mon Sep 17 00:00:00 2001 From: Emily Zeng Date: Thu, 9 Nov 2023 13:15:06 -0500 Subject: moved nested with to single line to remove extra tabs --- modules/ui.py | 591 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 295 insertions(+), 296 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 4a3e60d1..0faccbd3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -270,88 +270,87 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Accordion("Open for Settings", open=False) if shared.opts.img2img_settings_accordion else gr.Group(): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - scripts.scripts_txt2img.prepare_ui() + with gr.Accordion("Open for Settings", open=False), gr.Column(variant='compact', elem_id="txt2img_settings") if shared.opts.img2img_settings_accordion else gr.Column(variant='compact', elem_id="txt2img_settings"): + scripts.scripts_txt2img.prepare_ui() - for category in ordered_ui_categories(): - if category == "prompt": - toprow.create_inline_toprow_prompts() + for category in ordered_ui_categories(): + if category == "prompt": + toprow.create_inline_toprow_prompts() - if category == "sampler": - steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") + if category == "sampler": + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", tooltip="Switch width/height") + with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", tooltip="Switch width/height") - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - elif category == "cfg": - with gr.Row(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + elif category == "cfg": + with gr.Row(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - pass + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + pass - elif category == "accordions": - with gr.Row(elem_id="txt2img_accordions", elem_classes="accordions"): - with InputAccordion(False, label="Hires. fix", elem_id="txt2img_hr") as enable_hr: - with enable_hr.extra(): - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) + elif category == "accordions": + with gr.Row(elem_id="txt2img_accordions", elem_classes="accordions"): + with InputAccordion(False, label="Hires. fix", elem_id="txt2img_hr") as enable_hr: + with enable_hr.extra(): + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") - create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") + hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") + create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") - hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") + hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - scripts.scripts_txt2img.setup_ui_for_section(category) + scripts.scripts_txt2img.setup_ui_for_section(category) - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) + elif category == "override_settings": + with FormRow(elem_id="txt2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('txt2img', row) - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = scripts.scripts_txt2img.setup_ui() + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = scripts.scripts_txt2img.setup_ui() - if category not in {"accordions"}: - scripts.scripts_txt2img.setup_ui_for_section(category) + if category not in {"accordions"}: + scripts.scripts_txt2img.setup_ui_for_section(category) hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] @@ -490,258 +489,258 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Accordion("Open for Settings", open=False) if shared.opts.img2img_settings_accordion else gr.Group(): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "prompt": - toprow.create_inline_toprow_prompts() - - if category == "image": - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], + with gr.Accordion("Open for Settings", open=False), gr.Column(variant='compact', elem_id="img2img_settings") if shared.opts.img2img_settings_accordion else gr.Column(variant='compact', elem_id="img2img_settings"): + copy_image_buttons = [] + copy_image_destinations = {} + + def add_copy_image_controls(tab_name, elem): + with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): + gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") + + for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): + if name == tab_name: + gr.Button(title, interactive=False) + copy_image_destinations[name] = elem + continue + + button = gr.Button(title) + copy_image_buttons.append((button, name, elem)) + + scripts.scripts_img2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "prompt": + toprow.create_inline_toprow_prompts() + + if category == "image": + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) + + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) + add_copy_image_controls('sketch', sketch) + + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) + add_copy_image_controls('inpaint', init_img_with_mask) + + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state + + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") + + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], + ) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + + if category == "sampler": + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + + scripts.scripts_img2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) - if category == "sampler": - steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - scripts.scripts_img2img.prepare_ui() + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") + elif category == "denoising": + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "denoising": - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - - elif category == "cfg": - with gr.Row(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) + elif category == "cfg": + with gr.Row(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - pass + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + pass - elif category == "accordions": - with gr.Row(elem_id="img2img_accordions", elem_classes="accordions"): - scripts.scripts_img2img.setup_ui_for_section(category) + elif category == "accordions": + with gr.Row(elem_id="img2img_accordions", elem_classes="accordions"): + scripts.scripts_img2img.setup_ui_for_section(category) - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) + elif category == "override_settings": + with FormRow(elem_id="img2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('img2img', row) - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = scripts.scripts_img2img.setup_ui() + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = scripts.scripts_img2img.setup_ui() - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") + elif category == "inpaint": + with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: + with FormRow(): + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + with FormRow(): + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - if category not in {"accordions"}: - scripts.scripts_img2img.setup_ui_for_section(category) + if category not in {"accordions"}: + scripts.scripts_img2img.setup_ui_for_section(category) + def select_img2img_tab(tab): return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), -- cgit v1.2.3 From 9aa4d098f07655d99cd16e8e9c984d043dbf9006 Mon Sep 17 00:00:00 2001 From: Emily Zeng Date: Thu, 9 Nov 2023 13:25:24 -0500 Subject: removed changes that weren't merged properly --- modules/ui.py | 51 +-------------------------------------------------- 1 file changed, 1 insertion(+), 50 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 0faccbd3..3eec7839 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -592,55 +592,6 @@ def create_ui(): if category == "sampler": steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") - elif category == "dimensions": with FormRow(): with gr.Column(elem_id="img2img_column_size", scale=4): @@ -740,7 +691,7 @@ def create_ui(): if category not in {"accordions"}: scripts.scripts_img2img.setup_ui_for_section(category) - + def select_img2img_tab(tab): return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), -- cgit v1.2.3 From ff2952f10551aab2000002079d5f862af979e964 Mon Sep 17 00:00:00 2001 From: Emily Zeng Date: Thu, 9 Nov 2023 13:35:52 -0500 Subject: multiline with statement for readibility --- modules/ui.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 3eec7839..bf06776e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -270,7 +270,9 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Accordion("Open for Settings", open=False), gr.Column(variant='compact', elem_id="txt2img_settings") if shared.opts.img2img_settings_accordion else gr.Column(variant='compact', elem_id="txt2img_settings"): + with gr.Accordion("Open for Settings", open=False), gr.Column(variant='compact', elem_id="txt2img_settings") \ + if shared.opts.img2img_settings_accordion else gr.Column(variant='compact', elem_id="txt2img_settings"): + scripts.scripts_txt2img.prepare_ui() for category in ordered_ui_categories(): @@ -489,7 +491,9 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Accordion("Open for Settings", open=False), gr.Column(variant='compact', elem_id="img2img_settings") if shared.opts.img2img_settings_accordion else gr.Column(variant='compact', elem_id="img2img_settings"): + with gr.Accordion("Open for Settings", open=False), gr.Column(variant='compact', elem_id="img2img_settings") \ + if shared.opts.img2img_settings_accordion else gr.Column(variant='compact', elem_id="img2img_settings"): + copy_image_buttons = [] copy_image_destinations = {} -- cgit v1.2.3 -- cgit v1.2.3 From 6d77a6e1c6b27ae82b2186cfc36cc4ad2a5e9ecf Mon Sep 17 00:00:00 2001 From: "fuchen.ljl" Date: Fri, 10 Nov 2023 14:40:39 +0800 Subject: Update README.md Modify the stablediffusion dependency address --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4e083440..1c97ecbb 100644 --- a/README.md +++ b/README.md @@ -146,7 +146,7 @@ For the purposes of getting Google and other search engines to crawl the wiki, h ## Credits Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file. -- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers +- Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers - k-diffusion - https://github.com/crowsonkb/k-diffusion.git - GFPGAN - https://github.com/TencentARC/GFPGAN.git - CodeFormer - https://github.com/sczhou/CodeFormer -- cgit v1.2.3 From 66767e3876dde8d0ef27ce00254cd6b75332f036 Mon Sep 17 00:00:00 2001 From: "Alessandro de Oliveira Faria (A.K.A. CABELO)" Date: Fri, 10 Nov 2023 03:45:44 -0300 Subject: - opensuse compatibility --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 4e083440..89e54a61 100644 --- a/README.md +++ b/README.md @@ -121,6 +121,8 @@ Alternatively, use online services (like Google Colab): sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0 # Red Hat-based: sudo dnf install wget git python3 +# openSUSE-based: +sudo zypper install wget git python3 # Arch-based: sudo pacman -S wget git python3 ``` -- cgit v1.2.3 From 7ff54005fee46ce188544db75c27de61ae279001 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 9 Nov 2023 23:47:53 -0700 Subject: Enable prompt hotkeys in style editor --- modules/ui_prompt_styles.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py index 85eb3a64..d6f8d4c7 100644 --- a/modules/ui_prompt_styles.py +++ b/modules/ui_prompt_styles.py @@ -64,10 +64,10 @@ class UiPromptStyles: self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") with gr.Row(): - self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3) + self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3, elem_classes=["prompt"]) with gr.Row(): - self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3) + self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3, elem_classes=["prompt"]) with gr.Row(): self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False) -- cgit v1.2.3 From 6a86b3ad9bc7bb9a58dc4228ecf93a3a511ed122 Mon Sep 17 00:00:00 2001 From: "Alessandro de Oliveira Faria (A.K.A. CABELO)" Date: Fri, 10 Nov 2023 14:15:34 -0300 Subject: Compatibility with Debian 11, Fedora 34+ and openSUSE 15.4+ --- README.md | 4 ++-- webui.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 89e54a61..d4aa376b 100644 --- a/README.md +++ b/README.md @@ -120,9 +120,9 @@ Alternatively, use online services (like Google Colab): # Debian-based: sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0 # Red Hat-based: -sudo dnf install wget git python3 +sudo dnf install wget git python3 gperftools-libs libglvnd-glx # openSUSE-based: -sudo zypper install wget git python3 +sudo zypper install wget git python3 libtcmalloc4 libglvnd # Arch-based: sudo pacman -S wget git python3 ``` diff --git a/webui.sh b/webui.sh index 3d0f87ee..5c23c1d8 100755 --- a/webui.sh +++ b/webui.sh @@ -87,7 +87,7 @@ delimiter="################################################################" printf "\n%s\n" "${delimiter}" printf "\e[1m\e[32mInstall script for stable-diffusion + Web UI\n" -printf "\e[1m\e[34mTested on Debian 11 (Bullseye)\e[0m" +printf "\e[1m\e[34mTested on Debian 11 (Bullseye), Fedora 34+ and openSUSE Leap 15.4 or newer.\e[0m" printf "\n%s\n" "${delimiter}" # Do not run as root @@ -222,7 +222,7 @@ fi # Try using TCMalloc on Linux prepare_tcmalloc() { if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then - TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)" + TCMALLOC="$(PATH=/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)" if [[ ! -z "${TCMALLOC}" ]]; then echo "Using TCMalloc: ${TCMALLOC}" export LD_PRELOAD="${TCMALLOC}" -- cgit v1.2.3 From 5432d9301359945b595d5e6649c7a64b4bb0bfca Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 11 Nov 2023 03:38:55 +0900 Subject: fix added accordion settings options --- modules/ui.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index bf06776e..f28de354 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -4,6 +4,7 @@ import os import sys from functools import reduce import warnings +from contextlib import suppress import gradio as gr import gradio.utils @@ -270,9 +271,7 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Accordion("Open for Settings", open=False), gr.Column(variant='compact', elem_id="txt2img_settings") \ - if shared.opts.img2img_settings_accordion else gr.Column(variant='compact', elem_id="txt2img_settings"): - + with gr.Accordion("Open for Settings", open=False) if shared.opts.txt2img_settings_accordion else suppress(), gr.Column(variant='compact', elem_id="txt2img_settings"): scripts.scripts_txt2img.prepare_ui() for category in ordered_ui_categories(): @@ -491,8 +490,7 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Accordion("Open for Settings", open=False), gr.Column(variant='compact', elem_id="img2img_settings") \ - if shared.opts.img2img_settings_accordion else gr.Column(variant='compact', elem_id="img2img_settings"): + with gr.Accordion("Open for Settings", open=False) if shared.opts.img2img_settings_accordion else suppress(), gr.Column(variant='compact', elem_id="img2img_settings"): copy_image_buttons = [] copy_image_destinations = {} -- cgit v1.2.3 From 3a4a6c43a4ca31056d5c09bb54e3eef24e6cf864 Mon Sep 17 00:00:00 2001 From: Emily Zeng Date: Fri, 10 Nov 2023 16:06:01 -0500 Subject: ExitStack as alternative to suppress --- modules/ui.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f28de354..ba0d8542 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -4,7 +4,7 @@ import os import sys from functools import reduce import warnings -from contextlib import suppress +from contextlib import ExitStack import gradio as gr import gradio.utils @@ -271,7 +271,11 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Accordion("Open for Settings", open=False) if shared.opts.txt2img_settings_accordion else suppress(), gr.Column(variant='compact', elem_id="txt2img_settings"): + with ExitStack() as stack: + if shared.opts.txt2img_settings_accordion: + stack.enter_context(gr.Accordion("Open for Settings", open=False)) + stack.enter_context(gr.Column(variant='compact', elem_id="txt2img_settings")) + scripts.scripts_txt2img.prepare_ui() for category in ordered_ui_categories(): @@ -490,7 +494,10 @@ def create_ui(): extra_tabs.__enter__() with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False): - with gr.Accordion("Open for Settings", open=False) if shared.opts.img2img_settings_accordion else suppress(), gr.Column(variant='compact', elem_id="img2img_settings"): + with ExitStack() as stack: + if shared.opts.img2img_settings_accordion: + stack.enter_context(gr.Accordion("Open for Settings", open=False)) + stack.enter_context(gr.Column(variant='compact', elem_id="img2img_settings")) copy_image_buttons = [] copy_image_destinations = {} -- cgit v1.2.3 From 0fc7dc1c04a046d95588651ffc4e71a7d40378d3 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 04:01:13 -0600 Subject: implementing script metadata and DAG sorting mechanism --- modules/extensions.py | 80 ++++++++++++++++++++++++---- modules/scripts.py | 141 ++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 197 insertions(+), 24 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index bf9a1878..e317a404 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,3 +1,5 @@ +import configparser +import functools import os import threading @@ -23,8 +25,9 @@ class Extension: lock = threading.Lock() cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version'] - def __init__(self, name, path, enabled=True, is_builtin=False): + def __init__(self, name, path, enabled=True, is_builtin=False, canonical_name=None): self.name = name + self.canonical_name = canonical_name or name.lower() self.path = path self.enabled = enabled self.status = '' @@ -37,6 +40,17 @@ class Extension: self.remote = None self.have_info_from_repo = False + @functools.cached_property + def metadata(self): + if os.path.isfile(os.path.join(self.path, "sd_webui_metadata.ini")): + try: + config = configparser.ConfigParser() + config.read(os.path.join(self.path, "sd_webui_metadata.ini")) + return config + except Exception: + errors.report(f"Error reading sd_webui_metadata.ini for extension {self.canonical_name}.", exc_info=True) + return None + def to_dict(self): return {x: getattr(self, x) for x in self.cached_fields} @@ -136,9 +150,6 @@ class Extension: def list_extensions(): extensions.clear() - if not os.path.isdir(extensions_dir): - return - if shared.cmd_opts.disable_all_extensions: print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***") elif shared.opts.disable_all_extensions == "all": @@ -148,18 +159,69 @@ def list_extensions(): elif shared.opts.disable_all_extensions == "extra": print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***") - extension_paths = [] + extension_dependency_map = {} + + # scan through extensions directory and load metadata for dirname in [extensions_dir, extensions_builtin_dir]: if not os.path.isdir(dirname): - return + continue for extension_dirname in sorted(os.listdir(dirname)): path = os.path.join(dirname, extension_dirname) if not os.path.isdir(path): continue - extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir)) + canonical_name = extension_dirname + requires = None + + if os.path.isfile(os.path.join(path, "sd_webui_metadata.ini")): + try: + config = configparser.ConfigParser() + config.read(os.path.join(path, "sd_webui_metadata.ini")) + canonical_name = config.get("Extension", "Name", fallback=canonical_name) + requires = config.get("Extension", "Requires", fallback=None) + continue + except Exception: + errors.report(f"Error reading sd_webui_metadata.ini for extension {extension_dirname}. " + f"Will load regardless.", exc_info=True) + + canonical_name = canonical_name.lower().strip() + + # check for duplicated canonical names + if canonical_name in extension_dependency_map: + errors.report(f"Duplicate canonical name \"{canonical_name}\" found in extensions " + f"\"{extension_dirname}\" and \"{extension_dependency_map[canonical_name]['dirname']}\". " + f"The current loading extension will be discarded.", exc_info=False) + continue - for dirname, path, is_builtin in extension_paths: - extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin) + # we want to wash the data to lowercase and remove whitespaces just in case + requires = [x.strip() for x in requires.lower().split(',')] if requires else [] + + extension_dependency_map[canonical_name] = { + "dirname": extension_dirname, + "path": path, + "requires": requires, + } + + # check for requirements + for (_, extension_data) in extension_dependency_map.items(): + dirname, path, requires = extension_data['dirname'], extension_data['path'], extension_data['requires'] + requirement_met = True + for req in requires: + if req not in extension_dependency_map: + errors.report(f"Extension \"{dirname}\" requires \"{req}\" which is not installed. " + f"The current loading extension will be discarded.", exc_info=False) + requirement_met = False + break + dep_dirname = extension_dependency_map[req]['dirname'] + if dep_dirname in shared.opts.disabled_extensions: + errors.report(f"Extension \"{dirname}\" requires \"{dep_dirname}\" which is disabled. " + f"The current loading extension will be discarded.", exc_info=False) + requirement_met = False + break + + is_builtin = dirname == extensions_builtin_dir + extension = Extension(name=dirname, path=path, + enabled=dirname not in shared.opts.disabled_extensions and requirement_met, + is_builtin=is_builtin) extensions.append(extension) diff --git a/modules/scripts.py b/modules/scripts.py index 5c6e0226..e92a34a0 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -2,6 +2,7 @@ import os import re import sys import inspect +from graphlib import TopologicalSorter, CycleError from collections import namedtuple from dataclasses import dataclass @@ -314,15 +315,131 @@ ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedi def list_scripts(scriptdirname, extension, *, include_extensions=True): scripts_list = [] - - basedir = os.path.join(paths.script_path, scriptdirname) - if os.path.exists(basedir): - for filename in sorted(os.listdir(basedir)): - scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename))) + script_dependency_map = {} + + # build script dependency map + + root_script_basedir = os.path.join(paths.script_path, scriptdirname) + if os.path.exists(root_script_basedir): + for filename in sorted(os.listdir(root_script_basedir)): + script_dependency_map[filename] = { + "extension": None, + "extension_dirname": None, + "script_file": ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename)), + "requires": [], + "load_before": [], + "load_after": [], + } if include_extensions: for ext in extensions.active(): - scripts_list += ext.list_files(scriptdirname, extension) + extension_scripts_list = ext.list_files(scriptdirname, extension) + for extension_script in extension_scripts_list: + # this is built on the assumption that script name is unique. + # I think bad thing is gonna happen if name collide in the current implementation anyway, but we + # will need to refactor here if this assumption is broken later on. + if extension_script.filename in script_dependency_map: + errors.report(f"Duplicate script name \"{extension_script.filename}\" found in extensions " + f"\"{ext.name}\" and \"{script_dependency_map[extension_script.filename]['extension_dirname'] or 'builtin'}\". " + f"The current loading file will be discarded.", exc_info=False) + continue + + relative_path = scriptdirname + "/" + extension_script.filename + + requires = None + load_before = None + load_after = None + + if ext.metadata is not None: + requires = ext.metadata.get(relative_path, "Requires", fallback=None) + load_before = ext.metadata.get(relative_path, "Before", fallback=None) + load_after = ext.metadata.get(relative_path, "After", fallback=None) + + requires = [x.strip() for x in requires.split(',')] if requires else [] + load_after = [x.strip() for x in load_after.split(',')] if load_after else [] + load_before = [x.strip() for x in load_before.split(',')] if load_before else [] + + script_dependency_map[extension_script.filename] = { + "extension": ext.canonical_name, + "extension_dirname": ext.name, + "script_file": extension_script, + "requires": requires, + "load_before": load_before, + "load_after": load_after, + } + + # resolve dependencies + + loaded_extensions = set() + for _, script_data in script_dependency_map.items(): + if script_data['extension'] is not None: + loaded_extensions.add(script_data['extension']) + + for script_filename, script_data in script_dependency_map.items(): + # load before requires inverse dependency + # in this case, append the script name into the load_after list of the specified script + for load_before_script in script_data['load_before']: + if load_before_script.startswith('ext:'): + # if this requires an extension to be loaded before + required_extension = load_before_script[4:] + for _, script_data2 in script_dependency_map.items(): + if script_data2['extension'] == required_extension: + script_data2['load_after'].append(script_filename) + break + else: + # if this requires an individual script to be loaded before + if load_before_script in script_dependency_map: + script_dependency_map[load_before_script]['load_after'].append(script_filename) + + # resolve extension name in load_after lists + for load_after_script in script_data['load_after']: + if load_after_script.startswith('ext:'): + # if this requires an extension to be loaded after + required_extension = load_after_script[4:] + for script_file_name2, script_data2 in script_dependency_map.items(): + if script_data2['extension'] == required_extension: + script_data['load_after'].append(script_file_name2) + + # remove all extension names in load_after lists + script_data['load_after'] = [x for x in script_data['load_after'] if not x.startswith('ext:')] + + # build the DAG + sorter = TopologicalSorter() + for script_filename, script_data in script_dependency_map.items(): + requirement_met = True + for required_script in script_data['requires']: + if required_script.startswith('ext:'): + # if this requires an extension to be installed + required_extension = required_script[4:] + if required_extension not in loaded_extensions: + errors.report(f"Script \"{script_filename}\" requires extension \"{required_extension}\" to " + f"be loaded, but it is not. Skipping.", + exc_info=False) + requirement_met = False + break + else: + # if this requires an individual script to be loaded + if required_script not in script_dependency_map: + errors.report(f"Script \"{script_filename}\" requires script \"{required_script}\" to " + f"be loaded, but it is not. Skipping.", + exc_info=False) + requirement_met = False + break + if not requirement_met: + continue + + sorter.add(script_filename, *script_data['load_after']) + + # sort the scripts + try: + ordered_script = sorter.static_order() + except CycleError: + errors.report("Cycle detected in script dependencies. Scripts will load in ascending order.", exc_info=True) + ordered_script = script_dependency_map.keys() + + for script_filename in ordered_script: + script_data = script_dependency_map[script_filename] + scripts_list.append(script_data['script_file']) scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] @@ -365,15 +482,9 @@ def load_scripts(): elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing): postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module)) - def orderby(basedir): - # 1st webui, 2nd extensions-builtin, 3rd extensions - priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0} - for key in priority: - if basedir.startswith(key): - return priority[key] - return 9999 - - for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]): + # here the scripts_list is already ordered + # processing_script is not considered though + for scriptfile in scripts_list: try: if scriptfile.basedir != paths.script_path: sys.path = [scriptfile.basedir] + sys.path -- cgit v1.2.3 From 0d1924c48be3d02650e87b12a4f53165a8b4a599 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 04:03:55 -0600 Subject: populate loaded_extensions from extension list instead --- modules/scripts.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index e92a34a0..7cdf288d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -371,9 +371,8 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): # resolve dependencies loaded_extensions = set() - for _, script_data in script_dependency_map.items(): - if script_data['extension'] is not None: - loaded_extensions.add(script_data['extension']) + for ext in extensions.active(): + loaded_extensions.add(ext.canonical_name) for script_filename, script_data in script_dependency_map.items(): # load before requires inverse dependency -- cgit v1.2.3 From bc1a450124ab643fc0c3ea7630d875afb4b84b84 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 04:08:45 -0600 Subject: reverse the extension load order so builtin extensions load earlier natively --- modules/extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index e317a404..7583a3b0 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -162,7 +162,7 @@ def list_extensions(): extension_dependency_map = {} # scan through extensions directory and load metadata - for dirname in [extensions_dir, extensions_builtin_dir]: + for dirname in [extensions_builtin_dir, extensions_dir]: if not os.path.isdir(dirname): continue -- cgit v1.2.3 From 294f8a514f982248cda1cafda30d35566f3a0321 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Sat, 11 Nov 2023 23:28:12 +0900 Subject: add hyperTile https://github.com/tfernd/HyperTile --- modules/processing.py | 27 ++++++++++++++++++++++++--- modules/shared_options.py | 2 ++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index b0e240a4..e2309534 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -799,6 +799,16 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] + unet_object = p.sd_model.model + vae_model = p.sd_model.first_stage_model + try: + from hyper_tile import split_attention, flush + except (ImportError, ModuleNotFoundError): # pip install git+https://github.com/tfernd/HyperTile@2ef64b2800d007d305755c33550537410310d7df + split_attention = lambda *args, **kwargs: lambda x: x # return a no-op context manager + flush = lambda: None + import random + saved_rng_state = random.getstate() + random.seed(p.seed) # hyper_tile uses random, so we need to seed it with torch.no_grad(), p.sd_model.ema_scope(): with devices.autocast(): @@ -866,15 +876,25 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: shared.state.job = f"Batch {n+1} out of {p.n_iter}" with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): - samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) + # get largest tile size available, which is 2^x which is factor of gcd of p.width and p.height + gcd = math.gcd(p.width, p.height) + largest_tile_size_available = 1 + while gcd % (largest_tile_size_available * 2) == 0: + largest_tile_size_available *= 2 + aspect_ratio = p.width / p.height + with split_attention(vae_model, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 128), disable=not shared.opts.hypertile_split_vae_attn): + with split_attention(unet_object, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn): + flush() + samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) if getattr(samples_ddim, 'already_decoded', False): x_samples_ddim = samples_ddim else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - - x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) + with split_attention(vae_model, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 128), disable=not shared.opts.hypertile_split_vae_attn): + flush() + x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) @@ -980,6 +1000,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.grid_save: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True) + random.setstate(saved_rng_state) if not p.disable_extra_networks and p.extra_network_data: extra_networks.deactivate(p, p.extra_network_data) diff --git a/modules/shared_options.py b/modules/shared_options.py index d40db530..d9650265 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -200,6 +200,8 @@ options_templates.update(options_section(('optimizations', "Optimizations"), { "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), + "hypertile_split_unet_attn" : OptionInfo(False, "Split attention in Unet with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), + "hypertile_split_vae_attn": OptionInfo(False, "Split attention in VAE with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.3 From 7af576e745c79a9539e40bc158e695192ae79f25 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 10:46:47 -0600 Subject: remove the assumption of same name --- modules/scripts.py | 81 ++++++++++++++++++++---------------------------------- 1 file changed, 30 insertions(+), 51 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 7cdf288d..7ad22245 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -335,15 +335,9 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): for ext in extensions.active(): extension_scripts_list = ext.list_files(scriptdirname, extension) for extension_script in extension_scripts_list: - # this is built on the assumption that script name is unique. - # I think bad thing is gonna happen if name collide in the current implementation anyway, but we - # will need to refactor here if this assumption is broken later on. - if extension_script.filename in script_dependency_map: - errors.report(f"Duplicate script name \"{extension_script.filename}\" found in extensions " - f"\"{ext.name}\" and \"{script_dependency_map[extension_script.filename]['extension_dirname'] or 'builtin'}\". " - f"The current loading file will be discarded.", exc_info=False) - continue - + script_canonical_name = ext.canonical_name + "/" + extension_script.filename + if ext.is_builtin: + script_canonical_name = "builtin/" + script_canonical_name relative_path = scriptdirname + "/" + extension_script.filename requires = None @@ -359,7 +353,7 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): load_after = [x.strip() for x in load_after.split(',')] if load_after else [] load_before = [x.strip() for x in load_before.split(',')] if load_before else [] - script_dependency_map[extension_script.filename] = { + script_dependency_map[script_canonical_name] = { "extension": ext.canonical_name, "extension_dirname": ext.name, "script_file": extension_script, @@ -374,60 +368,45 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): for ext in extensions.active(): loaded_extensions.add(ext.canonical_name) - for script_filename, script_data in script_dependency_map.items(): + for script_canonical_name, script_data in script_dependency_map.items(): # load before requires inverse dependency # in this case, append the script name into the load_after list of the specified script for load_before_script in script_data['load_before']: - if load_before_script.startswith('ext:'): - # if this requires an extension to be loaded before - required_extension = load_before_script[4:] + # if this requires an individual script to be loaded before + if load_before_script in script_dependency_map: + script_dependency_map[load_before_script]['load_after'].append(script_canonical_name) + elif load_before_script in loaded_extensions: for _, script_data2 in script_dependency_map.items(): - if script_data2['extension'] == required_extension: - script_data2['load_after'].append(script_filename) + if script_data2['extension'] == load_before_script: + script_data2['load_after'].append(script_canonical_name) break - else: - # if this requires an individual script to be loaded before - if load_before_script in script_dependency_map: - script_dependency_map[load_before_script]['load_after'].append(script_filename) # resolve extension name in load_after lists - for load_after_script in script_data['load_after']: - if load_after_script.startswith('ext:'): - # if this requires an extension to be loaded after - required_extension = load_after_script[4:] - for script_file_name2, script_data2 in script_dependency_map.items(): - if script_data2['extension'] == required_extension: - script_data['load_after'].append(script_file_name2) - - # remove all extension names in load_after lists - script_data['load_after'] = [x for x in script_data['load_after'] if not x.startswith('ext:')] + for load_after_script in list(script_data['load_after']): + if load_after_script not in script_dependency_map and load_after_script in loaded_extensions: + script_data['load_after'].remove(load_after_script) + for script_canonical_name2, script_data2 in script_dependency_map.items(): + if script_data2['extension'] == load_after_script: + script_data['load_after'].remove(script_canonical_name2) + break # build the DAG sorter = TopologicalSorter() - for script_filename, script_data in script_dependency_map.items(): + for script_canonical_name, script_data in script_dependency_map.items(): requirement_met = True for required_script in script_data['requires']: - if required_script.startswith('ext:'): - # if this requires an extension to be installed - required_extension = required_script[4:] - if required_extension not in loaded_extensions: - errors.report(f"Script \"{script_filename}\" requires extension \"{required_extension}\" to " - f"be loaded, but it is not. Skipping.", - exc_info=False) - requirement_met = False - break - else: - # if this requires an individual script to be loaded - if required_script not in script_dependency_map: - errors.report(f"Script \"{script_filename}\" requires script \"{required_script}\" to " - f"be loaded, but it is not. Skipping.", - exc_info=False) - requirement_met = False - break + # if this requires an individual script to be loaded + if required_script not in script_dependency_map and required_script not in loaded_extensions: + errors.report(f"Script \"{script_canonical_name}\" " + f"requires \"{required_script}\" to " + f"be loaded, but it is not. Skipping.", + exc_info=False) + requirement_met = False + break if not requirement_met: continue - sorter.add(script_filename, *script_data['load_after']) + sorter.add(script_canonical_name, *script_data['load_after']) # sort the scripts try: @@ -436,8 +415,8 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): errors.report("Cycle detected in script dependencies. Scripts will load in ascending order.", exc_info=True) ordered_script = script_dependency_map.keys() - for script_filename in ordered_script: - script_data = script_dependency_map[script_filename] + for script_canonical_name in ordered_script: + script_data = script_dependency_map[script_canonical_name] scripts_list.append(script_data['script_file']) scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] -- cgit v1.2.3 From 520e52f846892cc2b207b738b4180fa863c7b38f Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 10:58:26 -0600 Subject: allow comma and whitespace as separator --- modules/extensions.py | 9 ++++++--- modules/scripts.py | 6 +++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 7583a3b0..795af996 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -2,6 +2,7 @@ import configparser import functools import os import threading +import re from modules import shared, errors, cache, scripts from modules.gitpython_hack import Repo @@ -48,7 +49,8 @@ class Extension: config.read(os.path.join(self.path, "sd_webui_metadata.ini")) return config except Exception: - errors.report(f"Error reading sd_webui_metadata.ini for extension {self.canonical_name}.", exc_info=True) + errors.report(f"Error reading sd_webui_metadata.ini for extension {self.canonical_name}.", + exc_info=True) return None def to_dict(self): @@ -70,6 +72,7 @@ class Extension: self.do_read_info_from_repo() return self.to_dict() + try: d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo) self.from_dict(d) @@ -194,8 +197,8 @@ def list_extensions(): f"The current loading extension will be discarded.", exc_info=False) continue - # we want to wash the data to lowercase and remove whitespaces just in case - requires = [x.strip() for x in requires.lower().split(',')] if requires else [] + # both "," and " " are accepted as separator + requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] extension_dependency_map[canonical_name] = { "dirname": extension_dirname, diff --git a/modules/scripts.py b/modules/scripts.py index 7ad22245..5dd0555d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -349,9 +349,9 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): load_before = ext.metadata.get(relative_path, "Before", fallback=None) load_after = ext.metadata.get(relative_path, "After", fallback=None) - requires = [x.strip() for x in requires.split(',')] if requires else [] - load_after = [x.strip() for x in load_after.split(',')] if load_after else [] - load_before = [x.strip() for x in load_before.split(',')] if load_before else [] + requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] + load_after = list(filter(None, re.split(r"[,\s]+", load_after.lower()))) if load_after else [] + load_before = list(filter(None, re.split(r"[,\s]+", load_before.lower()))) if load_before else [] script_dependency_map[script_canonical_name] = { "extension": ext.canonical_name, -- cgit v1.2.3 From 48d6102b3105bb0179c8eab14ec7930945aca326 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 11:17:26 -0600 Subject: fix --- modules/extensions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index 795af996..5536db3e 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -183,7 +183,6 @@ def list_extensions(): config.read(os.path.join(path, "sd_webui_metadata.ini")) canonical_name = config.get("Extension", "Name", fallback=canonical_name) requires = config.get("Extension", "Requires", fallback=None) - continue except Exception: errors.report(f"Error reading sd_webui_metadata.ini for extension {extension_dirname}. " f"Will load regardless.", exc_info=True) -- cgit v1.2.3 From 3bb32befe9523a6acefbab7fe099f91660f41ea9 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sat, 11 Nov 2023 11:58:19 -0600 Subject: bug fix --- modules/scripts.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 5dd0555d..b1f4504a 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -322,6 +322,9 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): root_script_basedir = os.path.join(paths.script_path, scriptdirname) if os.path.exists(root_script_basedir): for filename in sorted(os.listdir(root_script_basedir)): + if not os.path.isfile(os.path.join(root_script_basedir, filename)): + continue + script_dependency_map[filename] = { "extension": None, "extension_dirname": None, @@ -335,19 +338,27 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): for ext in extensions.active(): extension_scripts_list = ext.list_files(scriptdirname, extension) for extension_script in extension_scripts_list: + if not os.path.isfile(extension_script.path): + continue + script_canonical_name = ext.canonical_name + "/" + extension_script.filename if ext.is_builtin: script_canonical_name = "builtin/" + script_canonical_name relative_path = scriptdirname + "/" + extension_script.filename - requires = None - load_before = None - load_after = None + requires = '' + load_before = '' + load_after = '' if ext.metadata is not None: - requires = ext.metadata.get(relative_path, "Requires", fallback=None) - load_before = ext.metadata.get(relative_path, "Before", fallback=None) - load_after = ext.metadata.get(relative_path, "After", fallback=None) + requires = ext.metadata.get(relative_path, "Requires", fallback='') + load_before = ext.metadata.get(relative_path, "Before", fallback='') + load_after = ext.metadata.get(relative_path, "After", fallback='') + + # propagate directory level metadata + requires = requires + ',' + ext.metadata.get(scriptdirname, "Requires", fallback='') + load_before = load_before + ',' + ext.metadata.get(scriptdirname, "Before", fallback='') + load_after = load_after + ',' + ext.metadata.get(scriptdirname, "After", fallback='') requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] load_after = list(filter(None, re.split(r"[,\s]+", load_after.lower()))) if load_after else [] @@ -387,7 +398,7 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): script_data['load_after'].remove(load_after_script) for script_canonical_name2, script_data2 in script_dependency_map.items(): if script_data2['extension'] == load_after_script: - script_data['load_after'].remove(script_canonical_name2) + script_data['load_after'].append(script_canonical_name2) break # build the DAG -- cgit v1.2.3 From f6762d2ad95e3de39fc900b3fd528310e512831f Mon Sep 17 00:00:00 2001 From: Tom Haelbich <65122811+h43lb1t0@users.noreply.github.com> Date: Sun, 12 Nov 2023 14:14:16 +0100 Subject: dir buttons start with / so only the correct dir will be shown and not dirs with a substrings as name from the dir --- modules/ui_extra_networks.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 063bd7b8..43a94b74 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -138,8 +138,9 @@ class ExtraNetworksPage: continue subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/") - while subdir.startswith("/"): - subdir = subdir[1:] + + if not subdir.startswith("/"): + subdir = "/" + subdir is_empty = len(os.listdir(x)) == 0 if not is_empty and not subdir.endswith("/"): -- cgit v1.2.3 From 8048f36072c8a281b8c8c79235df63a748ab7361 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sun, 12 Nov 2023 17:12:50 -0700 Subject: Lint --- modules/ui_extra_networks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 43a94b74..bd673285 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -138,7 +138,6 @@ class ExtraNetworksPage: continue subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/") - if not subdir.startswith("/"): subdir = "/" + subdir -- cgit v1.2.3 From 94e966956666ba13b368aaf781628085e3d4f7e3 Mon Sep 17 00:00:00 2001 From: kaalibro Date: Mon, 13 Nov 2023 14:51:06 +0600 Subject: Fixes generation restart not working for some users when 'Ctrl+Enter' is pressed --- script.js | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/script.js b/script.js index 5f6ee354..c0e678ea 100644 --- a/script.js +++ b/script.js @@ -133,9 +133,18 @@ document.addEventListener('keydown', function(e) { if (isEnter && isModifierKey) { if (interruptButton.style.display === 'block') { interruptButton.click(); - setTimeout(function() { - generateButton.click(); - }, 500); + const callback = (mutationList) => { + for (const mutation of mutationList) { + if (mutation.type === 'attributes' && mutation.attributeName === 'style') { + if (interruptButton.style.display === 'none') { + generateButton.click(); + observer.disconnect(); + } + } + } + }; + const observer = new MutationObserver(callback); + observer.observe(interruptButton, {attributes: true}); } else { generateButton.click(); } -- cgit v1.2.3 From c1c816006e47f3b7dcf1512594fd31817242e7fa Mon Sep 17 00:00:00 2001 From: kaalibro Date: Mon, 13 Nov 2023 22:01:52 +0600 Subject: Adds 'Path' sorting for Extra network cards --- modules/shared_options.py | 2 +- modules/ui_extra_networks.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index d40db530..8fc7ef1d 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -235,7 +235,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), - "extra_networks_card_order_field": OptionInfo("Name", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), + "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 7907cd63..f03e2033 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -279,6 +279,7 @@ class ExtraNetworksPage: "date_created": int(stat.st_ctime or 0), "date_modified": int(stat.st_mtime or 0), "name": pth.name.lower(), + "path": str(pth.parent).lower(), } def find_preview(self, path): @@ -382,7 +383,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): related_tabs.append(tab) edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True) - dropdown_sort = gr.Dropdown(choices=['Name', 'Date Created', 'Date Modified', ], value=shared.opts.extra_networks_card_order_field, elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") + dropdown_sort = gr.Dropdown(choices=['Path', 'Name', 'Date Created', 'Date Modified', ], value=shared.opts.extra_networks_card_order_field, elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes=["sortorder"] + ([] if shared.opts.extra_networks_card_order == "Ascending" else ["sortReverse"]), visible=False, tooltip="Invert sort order") button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False) checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False) -- cgit v1.2.3 From a292d2c47f51fc71cc186709bdf3706f0944b7d6 Mon Sep 17 00:00:00 2001 From: AngelBottomless Date: Wed, 15 Nov 2023 14:26:37 +0900 Subject: hotfix: call shared.state.end() after postprocessing done --- modules/postprocessing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index cf04d38b..fd0c0cc9 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -78,7 +78,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, image_data.close() devices.torch_gc() - + shared.state.end() return outputs, ui_common.plaintext_to_html(infotext), '' -- cgit v1.2.3 From b29fc6d4de8812b25c520a46676cda13c3fe64ca Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Sat, 11 Nov 2023 23:43:13 +0900 Subject: Implement Hypertile Co-Authored-By: Kieran Hunt --- modules/hypertile.py | 333 ++++++++++++++++++++++++++++++++++++++++++++++++++ modules/processing.py | 65 ++++------ 2 files changed, 358 insertions(+), 40 deletions(-) create mode 100644 modules/hypertile.py diff --git a/modules/hypertile.py b/modules/hypertile.py new file mode 100644 index 00000000..ab1c74c0 --- /dev/null +++ b/modules/hypertile.py @@ -0,0 +1,333 @@ +""" +Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE +Warn : The patch works well only if the input image has a width and height that are multiples of 128 +Author : @tfernd Github : https://github.com/tfernd/HyperTile +""" + +from __future__ import annotations +from typing import Callable +from typing_extensions import Literal + +import logging +from functools import wraps, cache +from contextlib import contextmanager + +import math +import torch.nn as nn +import random + +from einops import rearrange + +# TODO add SD-XL layers +DEPTH_LAYERS = { + 0: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.0.attentions.0.transformer_blocks.0.attn1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.1.1.transformer_blocks.0.attn1", + "input_blocks.2.1.transformer_blocks.0.attn1", + "output_blocks.9.1.transformer_blocks.0.attn1", + "output_blocks.10.1.transformer_blocks.0.attn1", + "output_blocks.11.1.transformer_blocks.0.attn1", + # SD 1.5 VAE + "decoder.mid_block.attentions.0", + ], + 1: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.1.attentions.0.transformer_blocks.0.attn1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.0.attn1", + "input_blocks.5.1.transformer_blocks.0.attn1", + "output_blocks.6.1.transformer_blocks.0.attn1", + "output_blocks.7.1.transformer_blocks.0.attn1", + "output_blocks.8.1.transformer_blocks.0.attn1", + ], + 2: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.2.attentions.0.transformer_blocks.0.attn1", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.7.1.transformer_blocks.0.attn1", + "input_blocks.8.1.transformer_blocks.0.attn1", + "output_blocks.3.1.transformer_blocks.0.attn1", + "output_blocks.4.1.transformer_blocks.0.attn1", + "output_blocks.5.1.transformer_blocks.0.attn1", + ], + 3: [ + # SD 1.5 U-Net (diffusers) + "mid_block.attentions.0.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "middle_block.1.transformer_blocks.0.attn1", + ], +} +# XL layers, thanks for GitHub@gel-crabs for the help +DEPTH_LAYERS_XL = { + 0: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.0.attentions.0.transformer_blocks.0.attn1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.0.attn1", + "input_blocks.5.1.transformer_blocks.0.attn1", + "output_blocks.3.1.transformer_blocks.0.attn1", + "output_blocks.4.1.transformer_blocks.0.attn1", + "output_blocks.5.1.transformer_blocks.0.attn1", + # SD 1.5 VAE + "decoder.mid_block.attentions.0", + "decoder.mid.attn_1", + ], + 1: [ + # SD 1.5 U-Net (diffusers) + #"down_blocks.1.attentions.0.transformer_blocks.0.attn1", + #"down_blocks.1.attentions.1.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.0.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.1.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.1.attn1", + "input_blocks.5.1.transformer_blocks.1.attn1", + "output_blocks.3.1.transformer_blocks.1.attn1", + "output_blocks.4.1.transformer_blocks.1.attn1", + "output_blocks.5.1.transformer_blocks.1.attn1", + "input_blocks.7.1.transformer_blocks.0.attn1", + "input_blocks.8.1.transformer_blocks.0.attn1", + "output_blocks.0.1.transformer_blocks.0.attn1", + "output_blocks.1.1.transformer_blocks.0.attn1", + "output_blocks.2.1.transformer_blocks.0.attn1", + "input_blocks.7.1.transformer_blocks.1.attn1", + "input_blocks.8.1.transformer_blocks.1.attn1", + "output_blocks.0.1.transformer_blocks.1.attn1", + "output_blocks.1.1.transformer_blocks.1.attn1", + "output_blocks.2.1.transformer_blocks.1.attn1", + "input_blocks.7.1.transformer_blocks.2.attn1", + "input_blocks.8.1.transformer_blocks.2.attn1", + "output_blocks.0.1.transformer_blocks.2.attn1", + "output_blocks.1.1.transformer_blocks.2.attn1", + "output_blocks.2.1.transformer_blocks.2.attn1", + "input_blocks.7.1.transformer_blocks.3.attn1", + "input_blocks.8.1.transformer_blocks.3.attn1", + "output_blocks.0.1.transformer_blocks.3.attn1", + "output_blocks.1.1.transformer_blocks.3.attn1", + "output_blocks.2.1.transformer_blocks.3.attn1", + "input_blocks.7.1.transformer_blocks.4.attn1", + "input_blocks.8.1.transformer_blocks.4.attn1", + "output_blocks.0.1.transformer_blocks.4.attn1", + "output_blocks.1.1.transformer_blocks.4.attn1", + "output_blocks.2.1.transformer_blocks.4.attn1", + "input_blocks.7.1.transformer_blocks.5.attn1", + "input_blocks.8.1.transformer_blocks.5.attn1", + "output_blocks.0.1.transformer_blocks.5.attn1", + "output_blocks.1.1.transformer_blocks.5.attn1", + "output_blocks.2.1.transformer_blocks.5.attn1", + "input_blocks.7.1.transformer_blocks.6.attn1", + "input_blocks.8.1.transformer_blocks.6.attn1", + "output_blocks.0.1.transformer_blocks.6.attn1", + "output_blocks.1.1.transformer_blocks.6.attn1", + "output_blocks.2.1.transformer_blocks.6.attn1", + "input_blocks.7.1.transformer_blocks.7.attn1", + "input_blocks.8.1.transformer_blocks.7.attn1", + "output_blocks.0.1.transformer_blocks.7.attn1", + "output_blocks.1.1.transformer_blocks.7.attn1", + "output_blocks.2.1.transformer_blocks.7.attn1", + "input_blocks.7.1.transformer_blocks.8.attn1", + "input_blocks.8.1.transformer_blocks.8.attn1", + "output_blocks.0.1.transformer_blocks.8.attn1", + "output_blocks.1.1.transformer_blocks.8.attn1", + "output_blocks.2.1.transformer_blocks.8.attn1", + "input_blocks.7.1.transformer_blocks.9.attn1", + "input_blocks.8.1.transformer_blocks.9.attn1", + "output_blocks.0.1.transformer_blocks.9.attn1", + "output_blocks.1.1.transformer_blocks.9.attn1", + "output_blocks.2.1.transformer_blocks.9.attn1", + ], + 2: [ + # SD 1.5 U-Net (diffusers) + "mid_block.attentions.0.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "middle_block.1.transformer_blocks.0.attn1", + "middle_block.1.transformer_blocks.1.attn1", + "middle_block.1.transformer_blocks.2.attn1", + "middle_block.1.transformer_blocks.3.attn1", + "middle_block.1.transformer_blocks.4.attn1", + "middle_block.1.transformer_blocks.5.attn1", + "middle_block.1.transformer_blocks.6.attn1", + "middle_block.1.transformer_blocks.7.attn1", + "middle_block.1.transformer_blocks.8.attn1", + "middle_block.1.transformer_blocks.9.attn1", + ], +} + + +RNG_INSTANCE = random.Random() + +def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: + """ + Returns a random divisor of value that + x * min_value <= value + if max_options is 1, the behavior is deterministic + """ + min_value = min(min_value, value) + + # All big divisors of value (inclusive) + divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order + + ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order + + idx = RNG_INSTANCE.randint(0, len(ns) - 1) + + return ns[idx] + +def set_hypertile_seed(seed: int) -> None: + RNG_INSTANCE.seed(seed) + +def largest_tile_size_available(width:int, height:int) -> int: + """ + Calculates the largest tile size available for a given width and height + Tile size is always a power of 2 + """ + gcd = math.gcd(width, height) + largest_tile_size_available = 1 + while gcd % (largest_tile_size_available * 2) == 0: + largest_tile_size_available *= 2 + return largest_tile_size_available + +def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]: + """ + Finds h and w such that h*w = hw and h/w = aspect_ratio + We check all possible divisors of hw and return the closest to the aspect ratio + """ + divisors = [i for i in range(2, hw + 1) if hw % i == 0] # all divisors of hw + pairs = [(i, hw // i) for i in divisors] # all pairs of divisors of hw + ratios = [w/h for h, w in pairs] # all ratios of pairs of divisors of hw + closest_ratio = min(ratios, key=lambda x: abs(x - aspect_ratio)) # closest ratio to aspect_ratio + closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio + return closest_pair + +@cache +def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]: + """ + Finds h and w such that h*w = hw and h/w = aspect_ratio + """ + h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) + # find h and w such that h*w = hw and h/w = aspect_ratio + if h * w != hw: + w_candidate = hw / h + # check if w is an integer + if not w_candidate.is_integer(): + h_candidate = hw / w + # check if h is an integer + if not h_candidate.is_integer(): + return iterative_closest_divisors(hw, aspect_ratio) + else: + h = int(h_candidate) + else: + w = int(w_candidate) + return h, w + +@contextmanager +def split_attention( + layer: nn.Module, + /, + aspect_ratio: float, # width/height + tile_size: int = 128, # 128 for VAE + swap_size: int = 1, # 1 for VAE + *, + disable: bool = False, + max_depth: Literal[0, 1, 2, 3] = 0, # ! Try 0 or 1 + scale_depth: bool = True, # scale the tile-size depending on the depth + is_sdxl: bool = False, # is the model SD-XL +): + # Hijacks AttnBlock from ldm and Attention from diffusers + + if disable: + logging.info(f"Attention for {layer.__class__.__qualname__} not splitted") + yield + return + + latent_tile_size = max(128, tile_size) // 8 + + def self_attn_forward(forward: Callable, depth: int, layer_name: str, module: nn.Module) -> Callable: + @wraps(forward) + def wrapper(*args, **kwargs): + x = args[0] + + # VAE + if x.ndim == 4: + b, c, h, w = x.shape + + nh = random_divisor(h, latent_tile_size, swap_size) + nw = random_divisor(w, latent_tile_size, swap_size) + + if nh * nw > 1: + x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles + + out = forward(x, *args[1:], **kwargs) + + if nh * nw > 1: + out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw) + + # U-Net + else: + hw: int = x.size(1) + h, w = find_hw_candidates(hw, aspect_ratio) + assert h * w == hw, f"Invalid aspect ratio {aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}" + + factor = 2**depth if scale_depth else 1 + nh = random_divisor(h, latent_tile_size * factor, swap_size) + nw = random_divisor(w, latent_tile_size * factor, swap_size) + + module._split_sizes_hypertile.append((nh, nw)) # type: ignore + + if nh * nw > 1: + x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + + out = forward(x, *args[1:], **kwargs) + + if nh * nw > 1: + out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) + out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + + return out + + return wrapper + + # Handle hijacking the forward method and recovering afterwards + try: + if is_sdxl: + layers = DEPTH_LAYERS_XL + else: + layers = DEPTH_LAYERS + for depth in range(max_depth + 1): + for layer_name, module in layer.named_modules(): + if any(layer_name.endswith(try_name) for try_name in layers[depth]): + # print input shape for debugging + logging.debug(f"HyperTile hijacking attention layer at depth {depth}: {layer_name}") + # hijack + module._original_forward_hypertile = module.forward + module.forward = self_attn_forward(module.forward, depth, layer_name, module) + module._split_sizes_hypertile = [] + yield + finally: + for layer_name, module in layer.named_modules(): + # remove hijack + if hasattr(module, "_original_forward_hypertile"): + if module._split_sizes_hypertile: + logging.debug(f"layer {layer_name} splitted with ({module._split_sizes_hypertile})") + # recover + module.forward = module._original_forward_hypertile + del module._original_forward_hypertile + del module._split_sizes_hypertile diff --git a/modules/processing.py b/modules/processing.py index e2309534..e19a09a3 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -24,6 +24,7 @@ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths import modules.face_restoration +from modules.hypertile import split_attention, set_hypertile_seed, largest_tile_size_available import modules.images as images import modules.styles import modules.sd_models as sd_models @@ -799,17 +800,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] - unet_object = p.sd_model.model - vae_model = p.sd_model.first_stage_model - try: - from hyper_tile import split_attention, flush - except (ImportError, ModuleNotFoundError): # pip install git+https://github.com/tfernd/HyperTile@2ef64b2800d007d305755c33550537410310d7df - split_attention = lambda *args, **kwargs: lambda x: x # return a no-op context manager - flush = lambda: None - import random - saved_rng_state = random.getstate() - random.seed(p.seed) # hyper_tile uses random, so we need to seed it - with torch.no_grad(), p.sd_model.ema_scope(): with devices.autocast(): p.init(p.all_prompts, p.all_seeds, p.all_subseeds) @@ -871,29 +861,20 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.comment(comment) p.extra_generation_params.update(model_hijack.extra_generation_params) - + set_hypertile_seed(p.seed) + # add batch size + hypertile status to information to reproduce the run if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): - # get largest tile size available, which is 2^x which is factor of gcd of p.width and p.height - gcd = math.gcd(p.width, p.height) - largest_tile_size_available = 1 - while gcd % (largest_tile_size_available * 2) == 0: - largest_tile_size_available *= 2 - aspect_ratio = p.width / p.height - with split_attention(vae_model, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 128), disable=not shared.opts.hypertile_split_vae_attn): - with split_attention(unet_object, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn): - flush() - samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) + samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) if getattr(samples_ddim, 'already_decoded', False): x_samples_ddim = samples_ddim else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with split_attention(vae_model, aspect_ratio=aspect_ratio, tile_size=min(largest_tile_size_available, 128), disable=not shared.opts.hypertile_split_vae_attn): - flush() + with split_attention(p.sd_model.first_stage_model, aspect_ratio = p.width / p.height, tile_size=min(largest_tile_size_available(p.width, p.height), 128), disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1000,7 +981,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.grid_save: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True) - random.setstate(saved_rng_state) if not p.disable_extra_networks and p.extra_network_data: extra_networks.deactivate(p, p.extra_network_data) @@ -1161,24 +1141,25 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - + aspect_ratio = self.width / self.height x = self.rng.next() - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + tile_size = largest_tile_size_available(self.width, self.height) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 128), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with split_attention(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + devices.torch_gc() + samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x - if not self.enable_hr: return samples if self.latent_scale_mode is None: - decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) else: decoded_samples = None with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=self.hr_checkpoint_info) - - devices.torch_gc() - return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts): @@ -1186,7 +1167,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples self.is_hr_pass = True - target_width = self.hr_upscale_to_x target_height = self.hr_upscale_to_y @@ -1264,18 +1244,19 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.scripts is not None: self.scripts.before_hr(self) - - samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + tile_size = largest_tile_size_available(target_width, target_height) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=1, disable=not opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with split_attention(self.sd_model.model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=3, max_depth=1,scale_depth=True, disable=not opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.sampler = None devices.torch_gc() - - decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=1, disable=not opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) self.is_hr_pass = False - return decoded_samples def close(self): @@ -1550,8 +1531,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.initial_noise_multiplier != 1.0: self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier x *= self.initial_noise_multiplier - - samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) + aspect_ratio = self.width / self.height + tile_size = largest_tile_size_available(self.width, self.height) + with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 128), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with split_attention(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + devices.torch_gc() + samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) if self.mask is not None: samples = samples * self.nmask + self.init_latent * self.mask -- cgit v1.2.3 From af45872fdb8a66ffd6a405d99120e0bacbb4a170 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:15:14 +0900 Subject: copy LDM VAE key from XL --- modules/hypertile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/hypertile.py b/modules/hypertile.py index ab1c74c0..32d8604c 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -35,6 +35,7 @@ DEPTH_LAYERS = { "output_blocks.11.1.transformer_blocks.0.attn1", # SD 1.5 VAE "decoder.mid_block.attentions.0", + "decoder.mid.attn_1", ], 1: [ # SD 1.5 U-Net (diffusers) -- cgit v1.2.3 From d6d0b22e6657fc84039e82ee735a57101bfe7c17 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 15 Nov 2023 03:08:50 -0800 Subject: fix: ignore calc_scale() for COFT which has very small alpha --- extensions-builtin/Lora/network_oft.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 93402bb2..c45a8d23 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -99,12 +99,9 @@ class NetworkModuleOFT(network.NetworkModule): is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] if not is_other_linear: - #if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: - # orig_weight=orig_weight.permute(1, 0) - oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - # without this line the results are significantly worse / less accurate + # ensure skew-symmetric matrix oft_blocks = oft_blocks - oft_blocks.transpose(1, 2) R = oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) @@ -118,9 +115,6 @@ class NetworkModuleOFT(network.NetworkModule): ) merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') - #if is_other_linear and orig_weight.shape[0] != orig_weight.shape[1]: - # orig_weight=orig_weight.permute(1, 0) - updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight output_shape = orig_weight.shape else: @@ -132,10 +126,10 @@ class NetworkModuleOFT(network.NetworkModule): return self.finalize_updown(updown, orig_weight, output_shape) def calc_updown(self, orig_weight): - multiplier = self.multiplier() * self.calc_scale() - #if self.is_kohya: - # return self.calc_updown_kohya(orig_weight, multiplier) - #else: + # if alpha is a very small number as in coft, calc_scale will return a almost zero number so we ignore it + #multiplier = self.multiplier() * self.calc_scale() + multiplier = self.multiplier() + return self.calc_updown_kb(orig_weight, multiplier) # override to remove the multiplier/scale factor; it's already multiplied in get_weight -- cgit v1.2.3 From eb667e715ad3eea981f6263c143ab0422e5340c9 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 15 Nov 2023 18:28:48 -0800 Subject: feat: LyCORIS/kohya OFT network support --- extensions-builtin/Lora/network_oft.py | 108 ++++++++------------------------- 1 file changed, 26 insertions(+), 82 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index c45a8d23..05c37811 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -11,8 +11,8 @@ class ModuleTypeOFT(network.ModuleType): return None -# adapted from kohya-ss' implementation https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py -# and KohakuBlueleaf's implementation https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/diag_oft.py +# Supports both kohya-ss' implementation of COFT https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py +# and KohakuBlueleaf's implementation of OFT/COFT https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/diag_oft.py class NetworkModuleOFT(network.NetworkModule): def __init__(self, net: network.Network, weights: network.NetworkWeights): @@ -25,117 +25,61 @@ class NetworkModuleOFT(network.NetworkModule): if "oft_blocks" in weights.w.keys(): self.is_kohya = True self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) - self.alpha = weights.w["alpha"] + self.alpha = weights.w["alpha"] # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim - #self.oft_blocks = rearrange(self.oft_blocks, 'k m ... -> (k m) ...') + # LyCORIS elif "oft_diag" in weights.w.keys(): self.is_kohya = False - self.oft_blocks = weights.w["oft_diag"] # (num_blocks, block_size, block_size) - - # alpha is rank if alpha is 0 or None - if self.alpha is None: - pass - self.dim = self.oft_blocks.shape[1] # FIXME: almost certainly incorrect, assumes tensor is shape [*, m, n] - else: - raise ValueError("oft_blocks or oft_diag must be in weights dict") + self.oft_blocks = weights.w["oft_diag"] + # self.alpha is unused + self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] - is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] + is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported if is_linear: self.out_dim = self.sd_module.out_features - elif is_other_linear: - self.out_dim = self.sd_module.embed_dim elif is_conv: self.out_dim = self.sd_module.out_channels - else: - raise ValueError("sd_module must be Linear or Conv") + elif is_other_linear: + self.out_dim = self.sd_module.embed_dim if self.is_kohya: self.constraint = self.alpha * self.out_dim - self.num_blocks, self.block_size = factorization(self.out_dim, self.dim) + self.num_blocks = self.dim + self.block_size = self.out_dim // self.dim else: self.constraint = None self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) - def merge_weight(self, R_weight, org_weight): - R_weight = R_weight.to(org_weight.device, dtype=org_weight.dtype) - if org_weight.dim() == 4: - weight = torch.einsum("oihw, op -> pihw", org_weight, R_weight) - else: - weight = torch.einsum("oi, op -> pi", org_weight, R_weight) - return weight - - def get_weight(self, oft_blocks, multiplier=None): - if self.constraint is not None: - constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype) - - block_Q = oft_blocks - oft_blocks.transpose(1, 2) - norm_Q = torch.norm(block_Q.flatten()) - if self.constraint is not None: - new_norm_Q = torch.clamp(norm_Q, max=constraint) - else: - new_norm_Q = norm_Q - block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) - m_I = torch.eye(self.num_blocks, device=oft_blocks.device).unsqueeze(0).repeat(self.block_size, 1, 1) - #m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1) - block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse()) + def calc_updown_kb(self, orig_weight, multiplier): + oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + oft_blocks = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix - block_R_weighted = multiplier * block_R + (1 - multiplier) * m_I - R = torch.block_diag(*block_R_weighted) - return R + R = oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + R = R * multiplier + torch.eye(self.block_size, device=orig_weight.device) - def calc_updown_kohya(self, orig_weight, multiplier): - R = self.get_weight(self.oft_blocks, multiplier) - merged_weight = self.merge_weight(R, orig_weight) + # This errors out for MultiheadAttention, might need to be handled up-stream + merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) + merged_weight = torch.einsum( + 'k n m, k n ... -> k m ...', + R, + merged_weight + ) + merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight output_shape = orig_weight.shape - orig_weight = orig_weight - return self.finalize_updown(updown, orig_weight, output_shape) - - def calc_updown_kb(self, orig_weight, multiplier): - is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] - - if not is_other_linear: - oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - - # ensure skew-symmetric matrix - oft_blocks = oft_blocks - oft_blocks.transpose(1, 2) - - R = oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - R = R * multiplier + torch.eye(self.block_size, device=orig_weight.device) - - merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) - merged_weight = torch.einsum( - 'k n m, k n ... -> k m ...', - R, - merged_weight - ) - merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') - - updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight - output_shape = orig_weight.shape - else: - # FIXME: skip MultiheadAttention for now - #up = self.lin_module.weight.to(orig_weight.device, dtype=orig_weight.dtype) - updown = torch.zeros([orig_weight.shape[1], orig_weight.shape[1]], device=orig_weight.device, dtype=orig_weight.dtype) - output_shape = (orig_weight.shape[1], orig_weight.shape[1]) - return self.finalize_updown(updown, orig_weight, output_shape) def calc_updown(self, orig_weight): - # if alpha is a very small number as in coft, calc_scale will return a almost zero number so we ignore it - #multiplier = self.multiplier() * self.calc_scale() + # if alpha is a very small number as in coft, calc_scale() will return a almost zero number so we ignore it multiplier = self.multiplier() - return self.calc_updown_kb(orig_weight, multiplier) # override to remove the multiplier/scale factor; it's already multiplied in get_weight def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): - #return super().finalize_updown(updown, orig_weight, output_shape, ex_bias) - if self.bias is not None: updown = updown.reshape(self.bias.shape) updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) -- cgit v1.2.3 From bcfaf3979a9f93e37c418b58c75b02d9570b4354 Mon Sep 17 00:00:00 2001 From: AngelBottomless Date: Thu, 16 Nov 2023 18:43:16 +0900 Subject: convert/add hypertile options --- modules/hypertile.py | 36 ++++++++++++++++++++++++++++++++++++ modules/processing.py | 21 +++++++++++---------- modules/shared_options.py | 6 ++++++ 3 files changed, 53 insertions(+), 10 deletions(-) diff --git a/modules/hypertile.py b/modules/hypertile.py index 32d8604c..fee24a8c 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -332,3 +332,39 @@ def split_attention( module.forward = module._original_forward_hypertile del module._original_forward_hypertile del module._split_sizes_hypertile + +def hypertile_context_vae(model:nn.Module, aspect_ratio:float, tile_size:int, opts): + """ + Returns context manager for VAE + """ + enabled = not opts.hypertile_split_vae_attn + swap_size = opts.hypertile_swap_size_vae + max_depth = opts.hypertile_max_depth_vae + tile_size_max = opts.hypertile_max_tile_vae + return split_attention( + model, + aspect_ratio=aspect_ratio, + tile_size=min(tile_size, tile_size_max), + swap_size=swap_size, + disable=not enabled, + max_depth=max_depth, + is_sdxl=False, + ) + +def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, opts, is_sdxl:bool): + """ + Returns context manager for U-Net + """ + enabled = not opts.hypertile_split_unet_attn + swap_size = opts.hypertile_swap_size_unet + max_depth = opts.hypertile_max_depth_unet + tile_size_max = opts.hypertile_max_tile_unet + return split_attention( + model, + aspect_ratio=aspect_ratio, + tile_size=min(tile_size, tile_size_max), + swap_size=swap_size, + disable=not enabled, + max_depth=max_depth, + is_sdxl=is_sdxl, + ) \ No newline at end of file diff --git a/modules/processing.py b/modules/processing.py index e19a09a3..c622ff33 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -24,7 +24,7 @@ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths import modules.face_restoration -from modules.hypertile import split_attention, set_hypertile_seed, largest_tile_size_available +from modules.hypertile import set_hypertile_seed, largest_tile_size_available, hypertile_context_unet, hypertile_context_vae import modules.images as images import modules.styles import modules.sd_models as sd_models @@ -874,7 +874,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with split_attention(p.sd_model.first_stage_model, aspect_ratio = p.width / p.height, tile_size=min(largest_tile_size_available(p.width, p.height), 128), disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_unet(p.sd_model.first_stage_model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1144,8 +1144,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): aspect_ratio = self.width / self.height x = self.rng.next() tile_size = largest_tile_size_available(self.width, self.height) - with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 128), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): - with split_attention(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): + with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): devices.torch_gc() samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x @@ -1153,7 +1153,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples if self.latent_scale_mode is None: - with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) else: decoded_samples = None @@ -1245,15 +1245,16 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.scripts is not None: self.scripts.before_hr(self) tile_size = largest_tile_size_available(target_width, target_height) - with split_attention(self.sd_model.first_stage_model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=1, disable=not opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): - with split_attention(self.sd_model.model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=3, max_depth=1,scale_depth=True, disable=not opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + aspect_ratio = self.width / self.height + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): + with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.sampler = None devices.torch_gc() - with split_attention(self.sd_model.first_stage_model, aspect_ratio=target_width / target_height, tile_size=min(tile_size, 256), swap_size=1, disable=not opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) self.is_hr_pass = False @@ -1533,8 +1534,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): x *= self.initial_noise_multiplier aspect_ratio = self.width / self.height tile_size = largest_tile_size_available(self.width, self.height) - with split_attention(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 128), swap_size=1, disable=not shared.opts.hypertile_split_vae_attn, is_sdxl=shared.sd_model.is_sdxl): - with split_attention(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=min(tile_size, 256), swap_size=2, disable=not shared.opts.hypertile_split_unet_attn, is_sdxl=shared.sd_model.is_sdxl): + with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): + with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): devices.torch_gc() samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) diff --git a/modules/shared_options.py b/modules/shared_options.py index d9650265..28a48906 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -202,6 +202,12 @@ options_templates.update(options_section(('optimizations', "Optimizations"), { "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), "hypertile_split_unet_attn" : OptionInfo(False, "Split attention in Unet with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), "hypertile_split_vae_attn": OptionInfo(False, "Split attention in VAE with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), + "hypertile_max_depth_vae" : OptionInfo(3, "Max depth for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_max_depth_unet" : OptionInfo(3, "Max depth for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_max_tile_vae" : OptionInfo(128, "Max tile size for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_max_tile_unet" : OptionInfo(256, "Max tile size for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_swap_size_unet": OptionInfo(3, "Swap size for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), + "hypertile_swap_size_vae": OptionInfo(3, "Swap size for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.3 From 472c22cc8a46b825545d5c86bd2745269430d7b0 Mon Sep 17 00:00:00 2001 From: AngelBottomless Date: Thu, 16 Nov 2023 19:03:45 +0900 Subject: fix ruff - add newline --- modules/hypertile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hypertile.py b/modules/hypertile.py index fee24a8c..86acecdc 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -367,4 +367,4 @@ def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, o disable=not enabled, max_depth=max_depth, is_sdxl=is_sdxl, - ) \ No newline at end of file + ) -- cgit v1.2.3 From 236eb82c3a91960ba5db7b82efbe0f6a9fd7cf24 Mon Sep 17 00:00:00 2001 From: Lucas Daniel Velazquez M <19197331+Luxter77@users.noreply.github.com> Date: Thu, 16 Nov 2023 13:20:33 -0300 Subject: Adds tqdm handler to logging_config.py for progress bar integration --- modules/logging_config.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/modules/logging_config.py b/modules/logging_config.py index 7db23d4b..ce831b5c 100644 --- a/modules/logging_config.py +++ b/modules/logging_config.py @@ -1,6 +1,19 @@ import os import logging +from tqdm.auto import tqdm + +class TqdmLoggingHandler(logging.Handler): + def __init__(self, level=logging.INFO): + super().__init__(level) + + def emit(self, record): + try: + msg = self.format(record) + tqdm.write(msg) + self.flush() + except Exception: + self.handleError(record) def setup_logging(loglevel): if loglevel is None: @@ -12,5 +25,6 @@ def setup_logging(loglevel): level=log_level, format='%(asctime)s %(levelname)s [%(name)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', + handlers=[TqdmLoggingHandler()] ) -- cgit v1.2.3 From cdb60a690dcd35e865eb0caef6c6d8ff64e1b0d5 Mon Sep 17 00:00:00 2001 From: Lucas Daniel Velazquez M <19197331+Luxter77@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:43:59 -0300 Subject: Take into account tqdm not being installed before first boot for logging --- modules/logging_config.py | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/modules/logging_config.py b/modules/logging_config.py index ce831b5c..99ed2855 100644 --- a/modules/logging_config.py +++ b/modules/logging_config.py @@ -1,30 +1,41 @@ import os import logging -from tqdm.auto import tqdm +try: + from tqdm.auto import tqdm -class TqdmLoggingHandler(logging.Handler): - def __init__(self, level=logging.INFO): - super().__init__(level) + class TqdmLoggingHandler(logging.Handler): + def __init__(self, level=logging.INFO): + super().__init__(level) - def emit(self, record): - try: - msg = self.format(record) - tqdm.write(msg) - self.flush() - except Exception: - self.handleError(record) + def emit(self, record): + try: + msg = self.format(record) + tqdm.write(msg) + self.flush() + except Exception: + self.handleError(record) + + TQDM_IMPORTED = True +except ImportError: + # tqdm does not exist before first launch + # I will import once the UI finishes seting up the enviroment and reloads. + TQDM_IMPORTED = False def setup_logging(loglevel): if loglevel is None: loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL") + loghandlers = [] + + if TQDM_IMPORTED: + loghandlers.append(TqdmLoggingHandler()) + if loglevel: log_level = getattr(logging, loglevel.upper(), None) or logging.INFO logging.basicConfig( level=log_level, format='%(asctime)s %(levelname)s [%(name)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', - handlers=[TqdmLoggingHandler()] + handlers=[] ) - -- cgit v1.2.3 From 7021cdb1de12be3071ecb67aa8d2e34e7a0ec5ab Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 16 Nov 2023 17:53:57 -0300 Subject: actually adds handler to logging_config.py --- modules/logging_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/logging_config.py b/modules/logging_config.py index 99ed2855..79269875 100644 --- a/modules/logging_config.py +++ b/modules/logging_config.py @@ -37,5 +37,5 @@ def setup_logging(loglevel): level=log_level, format='%(asctime)s %(levelname)s [%(name)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', - handlers=[] + handlers=loghandlers ) -- cgit v1.2.3 From c40be2252ab1c8c289562db208c5ac6618bd8545 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 17 Nov 2023 09:22:27 +0900 Subject: Fix critical issue - unet apply --- modules/processing.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c622ff33..2fda7f33 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -874,7 +874,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with hypertile_context_unet(p.sd_model.first_stage_model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_unet(p.sd_model.model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1145,7 +1145,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = self.rng.next() tile_size = largest_tile_size_available(self.width, self.height) with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): devices.torch_gc() samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x @@ -1247,7 +1247,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): tile_size = largest_tile_size_available(target_width, target_height) aspect_ratio = self.width / self.height with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) @@ -1535,7 +1535,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): aspect_ratio = self.width / self.height tile_size = largest_tile_size_available(self.width, self.height) with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): devices.torch_gc() samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) -- cgit v1.2.3 From c0725ba2d098a6a78610e7d96ee75f63a32d4e52 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 17 Nov 2023 09:34:50 +0900 Subject: Fix inverted option issue I'm pretty sure I was sleepy while implementing this --- modules/hypertile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypertile.py b/modules/hypertile.py index 86acecdc..3a1468c6 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -337,7 +337,7 @@ def hypertile_context_vae(model:nn.Module, aspect_ratio:float, tile_size:int, op """ Returns context manager for VAE """ - enabled = not opts.hypertile_split_vae_attn + enabled = opts.hypertile_split_vae_attn swap_size = opts.hypertile_swap_size_vae max_depth = opts.hypertile_max_depth_vae tile_size_max = opts.hypertile_max_tile_vae @@ -355,7 +355,7 @@ def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, o """ Returns context manager for U-Net """ - enabled = not opts.hypertile_split_unet_attn + enabled = opts.hypertile_split_unet_attn swap_size = opts.hypertile_swap_size_unet max_depth = opts.hypertile_max_depth_unet tile_size_max = opts.hypertile_max_tile_unet -- cgit v1.2.3 From ffd0f8ddc309688636ac1ac10d82b72ab6b466bf Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 17 Nov 2023 09:54:33 +0900 Subject: set empty value for SD XL 3rd layer --- modules/hypertile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/hypertile.py b/modules/hypertile.py index 3a1468c6..be898fce 100644 --- a/modules/hypertile.py +++ b/modules/hypertile.py @@ -170,6 +170,7 @@ DEPTH_LAYERS_XL = { "middle_block.1.transformer_blocks.8.attn1", "middle_block.1.transformer_blocks.9.attn1", ], + 3 : [] # TODO - separate layers for SD-XL } -- cgit v1.2.3 From 97431f29feb17ffc96ca95e9b3efec87be9d8b3a Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 17 Nov 2023 10:05:28 +0900 Subject: fix double gc and decoding with unet context --- modules/processing.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 2fda7f33..36c2be5e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -874,7 +874,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with hypertile_context_unet(p.sd_model.model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): + with hypertile_context_vae(p.sd_model.first_stage_model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), opts=shared.opts): x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1146,11 +1146,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): tile_size = largest_tile_size_available(self.width, self.height) with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - devices.torch_gc() samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x if not self.enable_hr: return samples + devices.torch_gc() if self.latent_scale_mode is None: with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): @@ -1536,7 +1536,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): tile_size = largest_tile_size_available(self.width, self.height) with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - devices.torch_gc() samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) if self.mask is not None: -- cgit v1.2.3 From 4f2a4a361511ca3b8cdd7373f6c7d723583e8fdb Mon Sep 17 00:00:00 2001 From: storyicon Date: Fri, 17 Nov 2023 09:48:18 +0000 Subject: feat: fix randn found element of type float at pos 2 Signed-off-by: storyicon --- modules/rng.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/rng.py b/modules/rng.py index 9e8ba2ee..8934d39b 100644 --- a/modules/rng.py +++ b/modules/rng.py @@ -110,7 +110,7 @@ class ImageRNG: self.is_first = True def first(self): - noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], self.seed_resize_from_h // 8, self.seed_resize_from_w // 8) + noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], int(self.seed_resize_from_h) // 8, int(self.seed_resize_from_w // 8)) xs = [] -- cgit v1.2.3 From bde439ef67776be126d6a8c569a23d54dbc3e707 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Sun, 19 Nov 2023 00:58:47 -0600 Subject: use metadata.ini for meta filename --- modules/extensions.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 5536db3e..f3988d02 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -43,13 +43,13 @@ class Extension: @functools.cached_property def metadata(self): - if os.path.isfile(os.path.join(self.path, "sd_webui_metadata.ini")): + if os.path.isfile(os.path.join(self.path, "metadata.ini")): try: config = configparser.ConfigParser() - config.read(os.path.join(self.path, "sd_webui_metadata.ini")) + config.read(os.path.join(self.path, "metadata.ini")) return config except Exception: - errors.report(f"Error reading sd_webui_metadata.ini for extension {self.canonical_name}.", + errors.report(f"Error reading metadata.ini for extension {self.canonical_name}.", exc_info=True) return None @@ -177,14 +177,14 @@ def list_extensions(): canonical_name = extension_dirname requires = None - if os.path.isfile(os.path.join(path, "sd_webui_metadata.ini")): + if os.path.isfile(os.path.join(path, "metadata.ini")): try: config = configparser.ConfigParser() - config.read(os.path.join(path, "sd_webui_metadata.ini")) + config.read(os.path.join(path, "metadata.ini")) canonical_name = config.get("Extension", "Name", fallback=canonical_name) requires = config.get("Extension", "Requires", fallback=None) except Exception: - errors.report(f"Error reading sd_webui_metadata.ini for extension {extension_dirname}. " + errors.report(f"Error reading metadata.ini for extension {extension_dirname}. " f"Will load regardless.", exc_info=True) canonical_name = canonical_name.lower().strip() -- cgit v1.2.3 From dea5e43c8359b663d5599efc99278c258747db61 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 18 Nov 2023 04:23:03 +0900 Subject: Option to show batch img2img results in UI shared.opts.img2img_batch_show_results_limit limit the number of images return to the UI for batch img2img default limit 32 0 no images are shown -1 unlimited, all images are shown --- modules/img2img.py | 24 ++++++++++++++++++++---- modules/shared_options.py | 1 + 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 52cb577a..c583290a 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -44,6 +44,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal steps = p.steps override_settings = p.override_settings sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None)) + batch_results = None + discard_further_results = False for i, image in enumerate(images): state.job = f"{i+1} out of {len(images)}" if state.skipped: @@ -127,7 +129,21 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal if proc is None: p.override_settings.pop('save_images_replace_action', None) - process_images(p) + proc = process_images(p) + + if not discard_further_results and proc: + if batch_results: + batch_results.images.extend(proc.images) + batch_results.infotexts.extend(proc.infotexts) + else: + batch_results = proc + + if 0 <= shared.opts.img2img_batch_show_results_limit < len(batch_results.images): + discard_further_results = True + batch_results.images = batch_results.images[:int(shared.opts.img2img_batch_show_results_limit)] + batch_results.infotexts = batch_results.infotexts[:int(shared.opts.img2img_batch_show_results_limit)] + + return batch_results def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): @@ -212,10 +228,10 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s with closing(p): if is_batch: assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir) - process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir) - - processed = Processed(p, [], p.seed, "") + if processed is None: + processed = Processed(p, [], p.seed, "") else: processed = modules.scripts.scripts_img2img.run(p, *args) if processed is None: diff --git a/modules/shared_options.py b/modules/shared_options.py index d40db530..1ee8c7ad 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -189,6 +189,7 @@ options_templates.update(options_section(('img2img', "img2img"), { "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), + "img2img_batch_show_results_limit": OptionInfo(32, "Show the first N batch img2img results in UI", gr.Slider, {"minimum": -1, "maximum": 1000, "step": 1}).info('0: disable, -1: show all images. Too many images can cause lag'), })) options_templates.update(options_section(('optimizations', "Optimizations"), { -- cgit v1.2.3 From 6d337bf23dae990e7b6717da4d5f2e54f212685c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 20 Nov 2023 01:38:31 +0900 Subject: save sysinfo as .json GitHub now allows uploading of .json files in issues --- modules/launch_utils.py | 2 +- modules/ui.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 8cdbafa5..264ec9ca 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -441,7 +441,7 @@ def dump_sysinfo(): import datetime text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json" with open(filename, "w", encoding="utf8") as file: file.write(text) diff --git a/modules/ui.py b/modules/ui.py index ba0d8542..b82f3c5e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1308,7 +1308,7 @@ def setup_ui_api(app): from fastapi.responses import PlainTextResponse text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json" return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) -- cgit v1.2.3 From 9b471436b2226458a767077707ea102e331b5d78 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 20 Nov 2023 14:47:09 +0300 Subject: rework extensions metadata: use custom sorter that doesn't mess the order as much and ignores cyclic errors, use classes with named fields instead of dictionaries, eliminate some duplicated code --- modules/extensions.py | 132 +++++++++++++++++++++------------------ modules/scripts.py | 169 +++++++++++++++++++++++--------------------------- 2 files changed, 148 insertions(+), 153 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index f3988d02..1899cd52 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,5 +1,6 @@ +from __future__ import annotations + import configparser -import functools import os import threading import re @@ -8,7 +9,6 @@ from modules import shared, errors, cache, scripts from modules.gitpython_hack import Repo from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 -extensions = [] os.makedirs(extensions_dir, exist_ok=True) @@ -22,13 +22,56 @@ def active(): return [x for x in extensions if x.enabled] +class ExtensionMetadata: + filename = "metadata.ini" + config: configparser.ConfigParser + canonical_name: str + requires: list + + def __init__(self, path, canonical_name): + self.config = configparser.ConfigParser() + + filepath = os.path.join(path, self.filename) + if os.path.isfile(filepath): + try: + self.config.read(filepath) + except Exception: + errors.report(f"Error reading {self.filename} for extension {canonical_name}.", exc_info=True) + + self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name) + self.canonical_name = canonical_name.lower().strip() + + self.requires = self.get_script_requirements("Requires", "Extension") + + def get_script_requirements(self, field, section, extra_section=None): + """reads a list of requirements from the config; field is the name of the field in the ini file, + like Requires or Before, and section is the name of the [section] in the ini file; additionally, + reads more requirements from [extra_section] if specified.""" + + x = self.config.get(section, field, fallback='') + + if extra_section: + x = x + ', ' + self.config.get(extra_section, field, fallback='') + + return self.parse_list(x.lower()) + + def parse_list(self, text): + """converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])""" + + if not text: + return [] + + # both "," and " " are accepted as separator + return [x for x in re.split(r"[,\s]+", text.strip()) if x] + + class Extension: lock = threading.Lock() cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version'] + metadata: ExtensionMetadata - def __init__(self, name, path, enabled=True, is_builtin=False, canonical_name=None): + def __init__(self, name, path, enabled=True, is_builtin=False, metadata=None): self.name = name - self.canonical_name = canonical_name or name.lower() self.path = path self.enabled = enabled self.status = '' @@ -40,18 +83,8 @@ class Extension: self.branch = None self.remote = None self.have_info_from_repo = False - - @functools.cached_property - def metadata(self): - if os.path.isfile(os.path.join(self.path, "metadata.ini")): - try: - config = configparser.ConfigParser() - config.read(os.path.join(self.path, "metadata.ini")) - return config - except Exception: - errors.report(f"Error reading metadata.ini for extension {self.canonical_name}.", - exc_info=True) - return None + self.metadata = metadata if metadata else ExtensionMetadata(self.path, name.lower()) + self.canonical_name = metadata.canonical_name def to_dict(self): return {x: getattr(self, x) for x in self.cached_fields} @@ -162,7 +195,7 @@ def list_extensions(): elif shared.opts.disable_all_extensions == "extra": print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***") - extension_dependency_map = {} + loaded_extensions = {} # scan through extensions directory and load metadata for dirname in [extensions_builtin_dir, extensions_dir]: @@ -175,55 +208,30 @@ def list_extensions(): continue canonical_name = extension_dirname - requires = None + metadata = ExtensionMetadata(path, canonical_name) - if os.path.isfile(os.path.join(path, "metadata.ini")): - try: - config = configparser.ConfigParser() - config.read(os.path.join(path, "metadata.ini")) - canonical_name = config.get("Extension", "Name", fallback=canonical_name) - requires = config.get("Extension", "Requires", fallback=None) - except Exception: - errors.report(f"Error reading metadata.ini for extension {extension_dirname}. " - f"Will load regardless.", exc_info=True) + # check for duplicated canonical names + already_loaded_extension = loaded_extensions.get(metadata.canonical_name) + if already_loaded_extension is not None: + errors.report(f'Duplicate canonical name "{canonical_name}" found in extensions "{extension_dirname}" and "{already_loaded_extension.name}". Former will be discarded.', exc_info=False) + continue - canonical_name = canonical_name.lower().strip() + is_builtin = dirname == extensions_builtin_dir + extension = Extension(name=extension_dirname, path=path, enabled=extension_dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin, metadata=metadata) + extensions.append(extension) + loaded_extensions[canonical_name] = extension - # check for duplicated canonical names - if canonical_name in extension_dependency_map: - errors.report(f"Duplicate canonical name \"{canonical_name}\" found in extensions " - f"\"{extension_dirname}\" and \"{extension_dependency_map[canonical_name]['dirname']}\". " - f"The current loading extension will be discarded.", exc_info=False) + # check for requirements + for extension in extensions: + for req in extension.metadata.requires: + required_extension = loaded_extensions.get(req) + if required_extension is None: + errors.report(f'Extension "{extension.name}" requires "{req}" which is not installed.', exc_info=False) continue - # both "," and " " are accepted as separator - requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] + if not extension.enabled: + errors.report(f'Extension "{extension.name}" requires "{required_extension.name}" which is disabled.', exc_info=False) + continue - extension_dependency_map[canonical_name] = { - "dirname": extension_dirname, - "path": path, - "requires": requires, - } - # check for requirements - for (_, extension_data) in extension_dependency_map.items(): - dirname, path, requires = extension_data['dirname'], extension_data['path'], extension_data['requires'] - requirement_met = True - for req in requires: - if req not in extension_dependency_map: - errors.report(f"Extension \"{dirname}\" requires \"{req}\" which is not installed. " - f"The current loading extension will be discarded.", exc_info=False) - requirement_met = False - break - dep_dirname = extension_dependency_map[req]['dirname'] - if dep_dirname in shared.opts.disabled_extensions: - errors.report(f"Extension \"{dirname}\" requires \"{dep_dirname}\" which is disabled. " - f"The current loading extension will be discarded.", exc_info=False) - requirement_met = False - break - - is_builtin = dirname == extensions_builtin_dir - extension = Extension(name=dirname, path=path, - enabled=dirname not in shared.opts.disabled_extensions and requirement_met, - is_builtin=is_builtin) - extensions.append(extension) +extensions: list[Extension] = [] diff --git a/modules/scripts.py b/modules/scripts.py index b1f4504a..b0689a23 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -2,7 +2,6 @@ import os import re import sys import inspect -from graphlib import TopologicalSorter, CycleError from collections import namedtuple from dataclasses import dataclass @@ -312,27 +311,57 @@ scripts_data = [] postprocessing_scripts_data = [] ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"]) +def topological_sort(dependencies): + """Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies. + Ignores errors relating to missing dependeencies or circular dependencies + """ + + visited = {} + result = [] + + def inner(name): + visited[name] = True + + for dep in dependencies.get(name, []): + if dep in dependencies and dep not in visited: + inner(dep) + + result.append(name) + + for depname in dependencies: + if depname not in visited: + inner(depname) + + return result + + +@dataclass +class ScriptWithDependencies: + script_canonical_name: str + file: ScriptFile + requires: list + load_before: list + load_after: list + def list_scripts(scriptdirname, extension, *, include_extensions=True): - scripts_list = [] - script_dependency_map = {} + scripts = {} - # build script dependency map + loaded_extensions = {ext.canonical_name: ext for ext in extensions.active()} + loaded_extensions_scripts = {ext.canonical_name: [] for ext in extensions.active()} + # build script dependency map root_script_basedir = os.path.join(paths.script_path, scriptdirname) if os.path.exists(root_script_basedir): for filename in sorted(os.listdir(root_script_basedir)): if not os.path.isfile(os.path.join(root_script_basedir, filename)): continue - script_dependency_map[filename] = { - "extension": None, - "extension_dirname": None, - "script_file": ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename)), - "requires": [], - "load_before": [], - "load_after": [], - } + if os.path.splitext(filename)[1].lower() != extension: + continue + + script_file = ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename)) + scripts[filename] = ScriptWithDependencies(filename, script_file, [], [], []) if include_extensions: for ext in extensions.active(): @@ -341,96 +370,54 @@ def list_scripts(scriptdirname, extension, *, include_extensions=True): if not os.path.isfile(extension_script.path): continue - script_canonical_name = ext.canonical_name + "/" + extension_script.filename - if ext.is_builtin: - script_canonical_name = "builtin/" + script_canonical_name + script_canonical_name = ("builtin/" if ext.is_builtin else "") + ext.canonical_name + "/" + extension_script.filename relative_path = scriptdirname + "/" + extension_script.filename - requires = '' - load_before = '' - load_after = '' - - if ext.metadata is not None: - requires = ext.metadata.get(relative_path, "Requires", fallback='') - load_before = ext.metadata.get(relative_path, "Before", fallback='') - load_after = ext.metadata.get(relative_path, "After", fallback='') - - # propagate directory level metadata - requires = requires + ',' + ext.metadata.get(scriptdirname, "Requires", fallback='') - load_before = load_before + ',' + ext.metadata.get(scriptdirname, "Before", fallback='') - load_after = load_after + ',' + ext.metadata.get(scriptdirname, "After", fallback='') - - requires = list(filter(None, re.split(r"[,\s]+", requires.lower()))) if requires else [] - load_after = list(filter(None, re.split(r"[,\s]+", load_after.lower()))) if load_after else [] - load_before = list(filter(None, re.split(r"[,\s]+", load_before.lower()))) if load_before else [] - - script_dependency_map[script_canonical_name] = { - "extension": ext.canonical_name, - "extension_dirname": ext.name, - "script_file": extension_script, - "requires": requires, - "load_before": load_before, - "load_after": load_after, - } + script = ScriptWithDependencies( + script_canonical_name=script_canonical_name, + file=extension_script, + requires=ext.metadata.get_script_requirements("Requires", relative_path, scriptdirname), + load_before=ext.metadata.get_script_requirements("Before", relative_path, scriptdirname), + load_after=ext.metadata.get_script_requirements("After", relative_path, scriptdirname), + ) - # resolve dependencies + scripts[script_canonical_name] = script + loaded_extensions_scripts[ext.canonical_name].append(script) - loaded_extensions = set() - for ext in extensions.active(): - loaded_extensions.add(ext.canonical_name) - - for script_canonical_name, script_data in script_dependency_map.items(): + for script_canonical_name, script in scripts.items(): # load before requires inverse dependency # in this case, append the script name into the load_after list of the specified script - for load_before_script in script_data['load_before']: + for load_before in script.load_before: # if this requires an individual script to be loaded before - if load_before_script in script_dependency_map: - script_dependency_map[load_before_script]['load_after'].append(script_canonical_name) - elif load_before_script in loaded_extensions: - for _, script_data2 in script_dependency_map.items(): - if script_data2['extension'] == load_before_script: - script_data2['load_after'].append(script_canonical_name) - break - - # resolve extension name in load_after lists - for load_after_script in list(script_data['load_after']): - if load_after_script not in script_dependency_map and load_after_script in loaded_extensions: - script_data['load_after'].remove(load_after_script) - for script_canonical_name2, script_data2 in script_dependency_map.items(): - if script_data2['extension'] == load_after_script: - script_data['load_after'].append(script_canonical_name2) - break - - # build the DAG - sorter = TopologicalSorter() - for script_canonical_name, script_data in script_dependency_map.items(): - requirement_met = True - for required_script in script_data['requires']: - # if this requires an individual script to be loaded - if required_script not in script_dependency_map and required_script not in loaded_extensions: - errors.report(f"Script \"{script_canonical_name}\" " - f"requires \"{required_script}\" to " - f"be loaded, but it is not. Skipping.", - exc_info=False) - requirement_met = False - break - if not requirement_met: - continue + other_script = scripts.get(load_before) + if other_script: + other_script.load_after.append(script_canonical_name) - sorter.add(script_canonical_name, *script_data['load_after']) + # if this requires an extension + other_extension_scripts = loaded_extensions_scripts.get(load_before) + if other_extension_scripts: + for other_script in other_extension_scripts: + other_script.load_after.append(script_canonical_name) - # sort the scripts - try: - ordered_script = sorter.static_order() - except CycleError: - errors.report("Cycle detected in script dependencies. Scripts will load in ascending order.", exc_info=True) - ordered_script = script_dependency_map.keys() + # if After mentions an extension, remove it and instead add all of its scripts + for load_after in list(script.load_after): + if load_after not in scripts and load_after in loaded_extensions_scripts: + script.load_after.remove(load_after) + + for other_script in loaded_extensions_scripts.get(load_after, []): + script.load_after.append(other_script.script_canonical_name) + + dependencies = {} + + for script_canonical_name, script in scripts.items(): + for required_script in script.requires: + if required_script not in scripts and required_script not in loaded_extensions: + errors.report(f'Script "{script_canonical_name}" requires "{required_script}" to be loaded, but it is not.', exc_info=False) - for script_canonical_name in ordered_script: - script_data = script_dependency_map[script_canonical_name] - scripts_list.append(script_data['script_file']) + dependencies[script_canonical_name] = script.load_after - scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] + ordered_scripts = topological_sort(dependencies) + scripts_list = [scripts[script_canonical_name].file for script_canonical_name in ordered_scripts] return scripts_list -- cgit v1.2.3 From 314ae1535ea172fcdb0f5b3b2eecc5d4ce9112b5 Mon Sep 17 00:00:00 2001 From: Tom Haelbich Date: Mon, 20 Nov 2023 16:19:54 +0100 Subject: added option for default behavior of dir buttons --- modules/shared_options.py | 1 + modules/ui_extra_networks.py | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 00b273fa..1d2dca79 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -224,6 +224,7 @@ options_templates.update(options_section(('interrogate', "Interrogate"), { options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), + "extra_networks_dir_button_function": OptionInfo(False, "Add a '/' to the beginning of directory buttons").info("Buttons will display the contents of the selected directory without acting as a search filter."), "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index bd673285..27a37295 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -138,8 +138,13 @@ class ExtraNetworksPage: continue subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/") - if not subdir.startswith("/"): - subdir = "/" + subdir + + if shared.opts.extra_networks_dir_button_function: + if not subdir.startswith("/"): + subdir = "/" + subdir + else: + while subdir.startswith("/"): + subdir = subdir[1:] is_empty = len(os.listdir(x)) == 0 if not is_empty and not subdir.endswith("/"): -- cgit v1.2.3 From 58c19545c83fa6925c9ce2216ee64964eb5129ce Mon Sep 17 00:00:00 2001 From: hidenorly Date: Tue, 21 Nov 2023 01:13:53 +0900 Subject: Add FP32 fallback support on sd_vae_approx This tries to execute interpolate with FP32 if it failed. Background is that on some environment such as Mx chip MacOS devices, we get error as follows: ``` "torch/nn/functional.py", line 3931, in interpolate return torch._C._nn.upsample_nearest2d(input, output_size, scale_factors) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RuntimeError: "upsample_nearest2d_channels_last" not implemented for 'Half' ``` In this case, ```--no-half``` doesn't help to solve. Therefore this commits add the FP32 fallback execution to solve it. Note that the submodule may require additional modifications. The following is the example modification on the other submodule. ```repositories/stable-diffusion-stability-ai/ldm/modules/diffusionmodules/openaimodel.py class Upsample(nn.Module): ..snip.. def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: try: x = F.interpolate(x, scale_factor=2, mode="nearest") except: x = F.interpolate(x.to(th.float32), scale_factor=2, mode="nearest").to(x.dtype) if self.use_conv: x = self.conv(x) return x ..snip.. ``` You can see the FP32 fallback execution as same as sd_vae_approx.py. --- modules/sd_vae_approx.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py index 3965e223..8370493f 100644 --- a/modules/sd_vae_approx.py +++ b/modules/sd_vae_approx.py @@ -21,7 +21,13 @@ class VAEApprox(nn.Module): def forward(self, x): extra = 11 - x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2)) + try: + x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2)) + except RuntimeError as e: + if "not implemented for" in str(e) and "Half" in str(e): + x = nn.functional.interpolate(x.to(torch.float32), (x.shape[2] * 2, x.shape[3] * 2)).to(x.dtype) + else: + print(f"An unexpected RuntimeError occurred: {str(e)}") x = nn.functional.pad(x, (extra, extra, extra, extra)) for layer in [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8, ]: -- cgit v1.2.3 From 8aa51f682c17d85f4585b9471860224568d25e95 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 21 Nov 2023 08:32:00 +0300 Subject: fix [Bug]: (Dev Branch) Placing "Dimensions" first in "ui_reorder_list" prevents start #14047 --- modules/ui.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index b82f3c5e..08e0ad77 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -635,12 +635,6 @@ def create_ui(): scale_by.release(**on_change_args) button_update_resize_to.click(**on_change_args) - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) @@ -701,6 +695,12 @@ def create_ui(): if category not in {"accordions"}: scripts.scripts_img2img.setup_ui_for_section(category) + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + def select_img2img_tab(tab): return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), -- cgit v1.2.3 From 8fe1e195228162a4510925de05015f361efa1087 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 22 Nov 2023 18:01:34 +0200 Subject: Update ruff to 0.1.6 --- .github/workflows/on_pull_request.yaml | 2 +- pyproject.toml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml index 78e608ee..9e44c806 100644 --- a/.github/workflows/on_pull_request.yaml +++ b/.github/workflows/on_pull_request.yaml @@ -20,7 +20,7 @@ jobs: # not to have GHA download an (at the time of writing) 4 GB cache # of PyTorch and other dependencies. - name: Install Ruff - run: pip install ruff==0.0.272 + run: pip install ruff==0.1.6 - name: Run Ruff run: ruff . lint-js: diff --git a/pyproject.toml b/pyproject.toml index 80541a8f..d03036e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ exclude = [ ignore = [ "E501", # Line too long + "E721", # Do not compare types, use `isinstance` "E731", # Do not assign a `lambda` expression, use a `def` "I001", # Import block is un-sorted or un-formatted -- cgit v1.2.3 From 066afda2f6f650fe108d285a239d08d59d92590d Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 22 Nov 2023 18:02:39 +0200 Subject: Simplify restart_sampler (suggested by ruff) --- modules/sd_samplers_extra.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py index 1b981ca8..72fd0aa5 100644 --- a/modules/sd_samplers_extra.py +++ b/modules/sd_samplers_extra.py @@ -60,7 +60,7 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] while restart_times > 0: restart_times -= 1 - step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) + step_list.extend(zip(sigma_restart[:-1], sigma_restart[1:])) last_sigma = None for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable): -- cgit v1.2.3 From ac2a981c4f30d77cdb674948fe0e2aa7264a93e1 Mon Sep 17 00:00:00 2001 From: wfjsw Date: Wed, 22 Nov 2023 22:40:24 -0600 Subject: use extension name for determining an extension is installed in the index --- modules/ui_extensions.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index c0a73b57..b6708881 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -335,6 +335,11 @@ def normalize_git_url(url): return url +def get_extension_dirname_from_url(url): + *parts, last_part = url.split('/') + return normalize_git_url(last_part) + + def install_extension_from_url(dirname, url, branch_name=None): check_access() @@ -346,10 +351,7 @@ def install_extension_from_url(dirname, url, branch_name=None): assert url, 'No URL specified' if dirname is None or dirname == "": - *parts, last_part = url.split('/') - last_part = normalize_git_url(last_part) - - dirname = last_part + dirname = get_extension_dirname_from_url(url) target_dir = os.path.join(extensions.extensions_dir, dirname) assert not os.path.exists(target_dir), f'Extension directory already exists: {target_dir}' @@ -449,7 +451,7 @@ def get_date(info: dict, key): def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=""): extlist = available_extensions["extensions"] - installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions} + installed_extensions = {extension.name for extension in extensions.extensions} tags = available_extensions.get("tags", {}) tags_to_hide = set(hide_tags) @@ -482,7 +484,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" if url is None: continue - existing = installed_extension_urls.get(normalize_git_url(url), None) + existing = get_extension_dirname_from_url(url) in installed_extensions extension_tags = extension_tags + ["installed"] if existing else extension_tags if any(x for x in extension_tags if x in tags_to_hide): -- cgit v1.2.3 From 86b99b1e98fcdd6e7e5f6017071944364e01e6ad Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Fri, 24 Nov 2023 11:28:54 -0600 Subject: Move exception_records related methods to errors.py --- modules/errors.py | 18 ++++++++++++++++-- modules/sysinfo.py | 17 +---------------- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/modules/errors.py b/modules/errors.py index 192cd8ff..ac9f1ee5 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -6,6 +6,21 @@ import traceback exception_records = [] +def format_traceback(tb): + return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)] + + +def format_exception(e, tb): + return {"exception": str(e), "traceback": format_traceback(tb)} + + +def get_exceptions(): + try: + return list(reversed(exception_records)) + except Exception as e: + return str(e) + + def record_exception(): _, e, tb = sys.exc_info() if e is None: @@ -14,8 +29,7 @@ def record_exception(): if exception_records and exception_records[-1] == e: return - from modules import sysinfo - exception_records.append(sysinfo.format_exception(e, tb)) + exception_records.append(format_exception(e, tb)) if len(exception_records) > 5: exception_records.pop(0) diff --git a/modules/sysinfo.py b/modules/sysinfo.py index 7d906e1f..226b204d 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -85,7 +85,7 @@ def get_dict(): "Checksum": checksum_token, "Commandline": sys.argv, "Torch env info": get_torch_sysinfo(), - "Exceptions": get_exceptions(), + "Exceptions": errors.get_exceptions(), "CPU": { "model": platform.processor(), "count logical": psutil.cpu_count(logical=True), @@ -105,21 +105,6 @@ def get_dict(): return res -def format_traceback(tb): - return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)] - - -def format_exception(e, tb): - return {"exception": str(e), "traceback": format_traceback(tb)} - - -def get_exceptions(): - try: - return list(reversed(errors.exception_records)) - except Exception as e: - return str(e) - - def get_environment(): return {k: os.environ[k] for k in sorted(os.environ) if k in environment_whitelist} -- cgit v1.2.3 From 5cedc8f9b2b51f392e7c8f5e29286466e3bee8d6 Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Fri, 24 Nov 2023 11:30:30 -0600 Subject: remove traceback in sysinfo --- modules/sysinfo.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/sysinfo.py b/modules/sysinfo.py index 226b204d..1d058950 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -1,7 +1,6 @@ import json import os import sys -import traceback import platform import hashlib -- cgit v1.2.3 From 3a9bf4ac10d99feb81b0e637417a108d3fa5ac06 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 08:29:12 +0300 Subject: move file --- extensions-builtin/hypertile/hypertile.py | 371 ++++++++++++++++++++++++++++++ modules/hypertile.py | 371 ------------------------------ 2 files changed, 371 insertions(+), 371 deletions(-) create mode 100644 extensions-builtin/hypertile/hypertile.py delete mode 100644 modules/hypertile.py diff --git a/extensions-builtin/hypertile/hypertile.py b/extensions-builtin/hypertile/hypertile.py new file mode 100644 index 00000000..be898fce --- /dev/null +++ b/extensions-builtin/hypertile/hypertile.py @@ -0,0 +1,371 @@ +""" +Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE +Warn : The patch works well only if the input image has a width and height that are multiples of 128 +Author : @tfernd Github : https://github.com/tfernd/HyperTile +""" + +from __future__ import annotations +from typing import Callable +from typing_extensions import Literal + +import logging +from functools import wraps, cache +from contextlib import contextmanager + +import math +import torch.nn as nn +import random + +from einops import rearrange + +# TODO add SD-XL layers +DEPTH_LAYERS = { + 0: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.0.attentions.0.transformer_blocks.0.attn1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.1.1.transformer_blocks.0.attn1", + "input_blocks.2.1.transformer_blocks.0.attn1", + "output_blocks.9.1.transformer_blocks.0.attn1", + "output_blocks.10.1.transformer_blocks.0.attn1", + "output_blocks.11.1.transformer_blocks.0.attn1", + # SD 1.5 VAE + "decoder.mid_block.attentions.0", + "decoder.mid.attn_1", + ], + 1: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.1.attentions.0.transformer_blocks.0.attn1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.0.attn1", + "input_blocks.5.1.transformer_blocks.0.attn1", + "output_blocks.6.1.transformer_blocks.0.attn1", + "output_blocks.7.1.transformer_blocks.0.attn1", + "output_blocks.8.1.transformer_blocks.0.attn1", + ], + 2: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.2.attentions.0.transformer_blocks.0.attn1", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.7.1.transformer_blocks.0.attn1", + "input_blocks.8.1.transformer_blocks.0.attn1", + "output_blocks.3.1.transformer_blocks.0.attn1", + "output_blocks.4.1.transformer_blocks.0.attn1", + "output_blocks.5.1.transformer_blocks.0.attn1", + ], + 3: [ + # SD 1.5 U-Net (diffusers) + "mid_block.attentions.0.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "middle_block.1.transformer_blocks.0.attn1", + ], +} +# XL layers, thanks for GitHub@gel-crabs for the help +DEPTH_LAYERS_XL = { + 0: [ + # SD 1.5 U-Net (diffusers) + "down_blocks.0.attentions.0.transformer_blocks.0.attn1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.0.attn1", + "input_blocks.5.1.transformer_blocks.0.attn1", + "output_blocks.3.1.transformer_blocks.0.attn1", + "output_blocks.4.1.transformer_blocks.0.attn1", + "output_blocks.5.1.transformer_blocks.0.attn1", + # SD 1.5 VAE + "decoder.mid_block.attentions.0", + "decoder.mid.attn_1", + ], + 1: [ + # SD 1.5 U-Net (diffusers) + #"down_blocks.1.attentions.0.transformer_blocks.0.attn1", + #"down_blocks.1.attentions.1.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.0.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.1.transformer_blocks.0.attn1", + #"up_blocks.2.attentions.2.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "input_blocks.4.1.transformer_blocks.1.attn1", + "input_blocks.5.1.transformer_blocks.1.attn1", + "output_blocks.3.1.transformer_blocks.1.attn1", + "output_blocks.4.1.transformer_blocks.1.attn1", + "output_blocks.5.1.transformer_blocks.1.attn1", + "input_blocks.7.1.transformer_blocks.0.attn1", + "input_blocks.8.1.transformer_blocks.0.attn1", + "output_blocks.0.1.transformer_blocks.0.attn1", + "output_blocks.1.1.transformer_blocks.0.attn1", + "output_blocks.2.1.transformer_blocks.0.attn1", + "input_blocks.7.1.transformer_blocks.1.attn1", + "input_blocks.8.1.transformer_blocks.1.attn1", + "output_blocks.0.1.transformer_blocks.1.attn1", + "output_blocks.1.1.transformer_blocks.1.attn1", + "output_blocks.2.1.transformer_blocks.1.attn1", + "input_blocks.7.1.transformer_blocks.2.attn1", + "input_blocks.8.1.transformer_blocks.2.attn1", + "output_blocks.0.1.transformer_blocks.2.attn1", + "output_blocks.1.1.transformer_blocks.2.attn1", + "output_blocks.2.1.transformer_blocks.2.attn1", + "input_blocks.7.1.transformer_blocks.3.attn1", + "input_blocks.8.1.transformer_blocks.3.attn1", + "output_blocks.0.1.transformer_blocks.3.attn1", + "output_blocks.1.1.transformer_blocks.3.attn1", + "output_blocks.2.1.transformer_blocks.3.attn1", + "input_blocks.7.1.transformer_blocks.4.attn1", + "input_blocks.8.1.transformer_blocks.4.attn1", + "output_blocks.0.1.transformer_blocks.4.attn1", + "output_blocks.1.1.transformer_blocks.4.attn1", + "output_blocks.2.1.transformer_blocks.4.attn1", + "input_blocks.7.1.transformer_blocks.5.attn1", + "input_blocks.8.1.transformer_blocks.5.attn1", + "output_blocks.0.1.transformer_blocks.5.attn1", + "output_blocks.1.1.transformer_blocks.5.attn1", + "output_blocks.2.1.transformer_blocks.5.attn1", + "input_blocks.7.1.transformer_blocks.6.attn1", + "input_blocks.8.1.transformer_blocks.6.attn1", + "output_blocks.0.1.transformer_blocks.6.attn1", + "output_blocks.1.1.transformer_blocks.6.attn1", + "output_blocks.2.1.transformer_blocks.6.attn1", + "input_blocks.7.1.transformer_blocks.7.attn1", + "input_blocks.8.1.transformer_blocks.7.attn1", + "output_blocks.0.1.transformer_blocks.7.attn1", + "output_blocks.1.1.transformer_blocks.7.attn1", + "output_blocks.2.1.transformer_blocks.7.attn1", + "input_blocks.7.1.transformer_blocks.8.attn1", + "input_blocks.8.1.transformer_blocks.8.attn1", + "output_blocks.0.1.transformer_blocks.8.attn1", + "output_blocks.1.1.transformer_blocks.8.attn1", + "output_blocks.2.1.transformer_blocks.8.attn1", + "input_blocks.7.1.transformer_blocks.9.attn1", + "input_blocks.8.1.transformer_blocks.9.attn1", + "output_blocks.0.1.transformer_blocks.9.attn1", + "output_blocks.1.1.transformer_blocks.9.attn1", + "output_blocks.2.1.transformer_blocks.9.attn1", + ], + 2: [ + # SD 1.5 U-Net (diffusers) + "mid_block.attentions.0.transformer_blocks.0.attn1", + # SD 1.5 U-Net (ldm) + "middle_block.1.transformer_blocks.0.attn1", + "middle_block.1.transformer_blocks.1.attn1", + "middle_block.1.transformer_blocks.2.attn1", + "middle_block.1.transformer_blocks.3.attn1", + "middle_block.1.transformer_blocks.4.attn1", + "middle_block.1.transformer_blocks.5.attn1", + "middle_block.1.transformer_blocks.6.attn1", + "middle_block.1.transformer_blocks.7.attn1", + "middle_block.1.transformer_blocks.8.attn1", + "middle_block.1.transformer_blocks.9.attn1", + ], + 3 : [] # TODO - separate layers for SD-XL +} + + +RNG_INSTANCE = random.Random() + +def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: + """ + Returns a random divisor of value that + x * min_value <= value + if max_options is 1, the behavior is deterministic + """ + min_value = min(min_value, value) + + # All big divisors of value (inclusive) + divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order + + ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order + + idx = RNG_INSTANCE.randint(0, len(ns) - 1) + + return ns[idx] + +def set_hypertile_seed(seed: int) -> None: + RNG_INSTANCE.seed(seed) + +def largest_tile_size_available(width:int, height:int) -> int: + """ + Calculates the largest tile size available for a given width and height + Tile size is always a power of 2 + """ + gcd = math.gcd(width, height) + largest_tile_size_available = 1 + while gcd % (largest_tile_size_available * 2) == 0: + largest_tile_size_available *= 2 + return largest_tile_size_available + +def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]: + """ + Finds h and w such that h*w = hw and h/w = aspect_ratio + We check all possible divisors of hw and return the closest to the aspect ratio + """ + divisors = [i for i in range(2, hw + 1) if hw % i == 0] # all divisors of hw + pairs = [(i, hw // i) for i in divisors] # all pairs of divisors of hw + ratios = [w/h for h, w in pairs] # all ratios of pairs of divisors of hw + closest_ratio = min(ratios, key=lambda x: abs(x - aspect_ratio)) # closest ratio to aspect_ratio + closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio + return closest_pair + +@cache +def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]: + """ + Finds h and w such that h*w = hw and h/w = aspect_ratio + """ + h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) + # find h and w such that h*w = hw and h/w = aspect_ratio + if h * w != hw: + w_candidate = hw / h + # check if w is an integer + if not w_candidate.is_integer(): + h_candidate = hw / w + # check if h is an integer + if not h_candidate.is_integer(): + return iterative_closest_divisors(hw, aspect_ratio) + else: + h = int(h_candidate) + else: + w = int(w_candidate) + return h, w + +@contextmanager +def split_attention( + layer: nn.Module, + /, + aspect_ratio: float, # width/height + tile_size: int = 128, # 128 for VAE + swap_size: int = 1, # 1 for VAE + *, + disable: bool = False, + max_depth: Literal[0, 1, 2, 3] = 0, # ! Try 0 or 1 + scale_depth: bool = True, # scale the tile-size depending on the depth + is_sdxl: bool = False, # is the model SD-XL +): + # Hijacks AttnBlock from ldm and Attention from diffusers + + if disable: + logging.info(f"Attention for {layer.__class__.__qualname__} not splitted") + yield + return + + latent_tile_size = max(128, tile_size) // 8 + + def self_attn_forward(forward: Callable, depth: int, layer_name: str, module: nn.Module) -> Callable: + @wraps(forward) + def wrapper(*args, **kwargs): + x = args[0] + + # VAE + if x.ndim == 4: + b, c, h, w = x.shape + + nh = random_divisor(h, latent_tile_size, swap_size) + nw = random_divisor(w, latent_tile_size, swap_size) + + if nh * nw > 1: + x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles + + out = forward(x, *args[1:], **kwargs) + + if nh * nw > 1: + out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw) + + # U-Net + else: + hw: int = x.size(1) + h, w = find_hw_candidates(hw, aspect_ratio) + assert h * w == hw, f"Invalid aspect ratio {aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}" + + factor = 2**depth if scale_depth else 1 + nh = random_divisor(h, latent_tile_size * factor, swap_size) + nw = random_divisor(w, latent_tile_size * factor, swap_size) + + module._split_sizes_hypertile.append((nh, nw)) # type: ignore + + if nh * nw > 1: + x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + + out = forward(x, *args[1:], **kwargs) + + if nh * nw > 1: + out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) + out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + + return out + + return wrapper + + # Handle hijacking the forward method and recovering afterwards + try: + if is_sdxl: + layers = DEPTH_LAYERS_XL + else: + layers = DEPTH_LAYERS + for depth in range(max_depth + 1): + for layer_name, module in layer.named_modules(): + if any(layer_name.endswith(try_name) for try_name in layers[depth]): + # print input shape for debugging + logging.debug(f"HyperTile hijacking attention layer at depth {depth}: {layer_name}") + # hijack + module._original_forward_hypertile = module.forward + module.forward = self_attn_forward(module.forward, depth, layer_name, module) + module._split_sizes_hypertile = [] + yield + finally: + for layer_name, module in layer.named_modules(): + # remove hijack + if hasattr(module, "_original_forward_hypertile"): + if module._split_sizes_hypertile: + logging.debug(f"layer {layer_name} splitted with ({module._split_sizes_hypertile})") + # recover + module.forward = module._original_forward_hypertile + del module._original_forward_hypertile + del module._split_sizes_hypertile + +def hypertile_context_vae(model:nn.Module, aspect_ratio:float, tile_size:int, opts): + """ + Returns context manager for VAE + """ + enabled = opts.hypertile_split_vae_attn + swap_size = opts.hypertile_swap_size_vae + max_depth = opts.hypertile_max_depth_vae + tile_size_max = opts.hypertile_max_tile_vae + return split_attention( + model, + aspect_ratio=aspect_ratio, + tile_size=min(tile_size, tile_size_max), + swap_size=swap_size, + disable=not enabled, + max_depth=max_depth, + is_sdxl=False, + ) + +def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, opts, is_sdxl:bool): + """ + Returns context manager for U-Net + """ + enabled = opts.hypertile_split_unet_attn + swap_size = opts.hypertile_swap_size_unet + max_depth = opts.hypertile_max_depth_unet + tile_size_max = opts.hypertile_max_tile_unet + return split_attention( + model, + aspect_ratio=aspect_ratio, + tile_size=min(tile_size, tile_size_max), + swap_size=swap_size, + disable=not enabled, + max_depth=max_depth, + is_sdxl=is_sdxl, + ) diff --git a/modules/hypertile.py b/modules/hypertile.py deleted file mode 100644 index be898fce..00000000 --- a/modules/hypertile.py +++ /dev/null @@ -1,371 +0,0 @@ -""" -Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE -Warn : The patch works well only if the input image has a width and height that are multiples of 128 -Author : @tfernd Github : https://github.com/tfernd/HyperTile -""" - -from __future__ import annotations -from typing import Callable -from typing_extensions import Literal - -import logging -from functools import wraps, cache -from contextlib import contextmanager - -import math -import torch.nn as nn -import random - -from einops import rearrange - -# TODO add SD-XL layers -DEPTH_LAYERS = { - 0: [ - # SD 1.5 U-Net (diffusers) - "down_blocks.0.attentions.0.transformer_blocks.0.attn1", - "down_blocks.0.attentions.1.transformer_blocks.0.attn1", - "up_blocks.3.attentions.0.transformer_blocks.0.attn1", - "up_blocks.3.attentions.1.transformer_blocks.0.attn1", - "up_blocks.3.attentions.2.transformer_blocks.0.attn1", - # SD 1.5 U-Net (ldm) - "input_blocks.1.1.transformer_blocks.0.attn1", - "input_blocks.2.1.transformer_blocks.0.attn1", - "output_blocks.9.1.transformer_blocks.0.attn1", - "output_blocks.10.1.transformer_blocks.0.attn1", - "output_blocks.11.1.transformer_blocks.0.attn1", - # SD 1.5 VAE - "decoder.mid_block.attentions.0", - "decoder.mid.attn_1", - ], - 1: [ - # SD 1.5 U-Net (diffusers) - "down_blocks.1.attentions.0.transformer_blocks.0.attn1", - "down_blocks.1.attentions.1.transformer_blocks.0.attn1", - "up_blocks.2.attentions.0.transformer_blocks.0.attn1", - "up_blocks.2.attentions.1.transformer_blocks.0.attn1", - "up_blocks.2.attentions.2.transformer_blocks.0.attn1", - # SD 1.5 U-Net (ldm) - "input_blocks.4.1.transformer_blocks.0.attn1", - "input_blocks.5.1.transformer_blocks.0.attn1", - "output_blocks.6.1.transformer_blocks.0.attn1", - "output_blocks.7.1.transformer_blocks.0.attn1", - "output_blocks.8.1.transformer_blocks.0.attn1", - ], - 2: [ - # SD 1.5 U-Net (diffusers) - "down_blocks.2.attentions.0.transformer_blocks.0.attn1", - "down_blocks.2.attentions.1.transformer_blocks.0.attn1", - "up_blocks.1.attentions.0.transformer_blocks.0.attn1", - "up_blocks.1.attentions.1.transformer_blocks.0.attn1", - "up_blocks.1.attentions.2.transformer_blocks.0.attn1", - # SD 1.5 U-Net (ldm) - "input_blocks.7.1.transformer_blocks.0.attn1", - "input_blocks.8.1.transformer_blocks.0.attn1", - "output_blocks.3.1.transformer_blocks.0.attn1", - "output_blocks.4.1.transformer_blocks.0.attn1", - "output_blocks.5.1.transformer_blocks.0.attn1", - ], - 3: [ - # SD 1.5 U-Net (diffusers) - "mid_block.attentions.0.transformer_blocks.0.attn1", - # SD 1.5 U-Net (ldm) - "middle_block.1.transformer_blocks.0.attn1", - ], -} -# XL layers, thanks for GitHub@gel-crabs for the help -DEPTH_LAYERS_XL = { - 0: [ - # SD 1.5 U-Net (diffusers) - "down_blocks.0.attentions.0.transformer_blocks.0.attn1", - "down_blocks.0.attentions.1.transformer_blocks.0.attn1", - "up_blocks.3.attentions.0.transformer_blocks.0.attn1", - "up_blocks.3.attentions.1.transformer_blocks.0.attn1", - "up_blocks.3.attentions.2.transformer_blocks.0.attn1", - # SD 1.5 U-Net (ldm) - "input_blocks.4.1.transformer_blocks.0.attn1", - "input_blocks.5.1.transformer_blocks.0.attn1", - "output_blocks.3.1.transformer_blocks.0.attn1", - "output_blocks.4.1.transformer_blocks.0.attn1", - "output_blocks.5.1.transformer_blocks.0.attn1", - # SD 1.5 VAE - "decoder.mid_block.attentions.0", - "decoder.mid.attn_1", - ], - 1: [ - # SD 1.5 U-Net (diffusers) - #"down_blocks.1.attentions.0.transformer_blocks.0.attn1", - #"down_blocks.1.attentions.1.transformer_blocks.0.attn1", - #"up_blocks.2.attentions.0.transformer_blocks.0.attn1", - #"up_blocks.2.attentions.1.transformer_blocks.0.attn1", - #"up_blocks.2.attentions.2.transformer_blocks.0.attn1", - # SD 1.5 U-Net (ldm) - "input_blocks.4.1.transformer_blocks.1.attn1", - "input_blocks.5.1.transformer_blocks.1.attn1", - "output_blocks.3.1.transformer_blocks.1.attn1", - "output_blocks.4.1.transformer_blocks.1.attn1", - "output_blocks.5.1.transformer_blocks.1.attn1", - "input_blocks.7.1.transformer_blocks.0.attn1", - "input_blocks.8.1.transformer_blocks.0.attn1", - "output_blocks.0.1.transformer_blocks.0.attn1", - "output_blocks.1.1.transformer_blocks.0.attn1", - "output_blocks.2.1.transformer_blocks.0.attn1", - "input_blocks.7.1.transformer_blocks.1.attn1", - "input_blocks.8.1.transformer_blocks.1.attn1", - "output_blocks.0.1.transformer_blocks.1.attn1", - "output_blocks.1.1.transformer_blocks.1.attn1", - "output_blocks.2.1.transformer_blocks.1.attn1", - "input_blocks.7.1.transformer_blocks.2.attn1", - "input_blocks.8.1.transformer_blocks.2.attn1", - "output_blocks.0.1.transformer_blocks.2.attn1", - "output_blocks.1.1.transformer_blocks.2.attn1", - "output_blocks.2.1.transformer_blocks.2.attn1", - "input_blocks.7.1.transformer_blocks.3.attn1", - "input_blocks.8.1.transformer_blocks.3.attn1", - "output_blocks.0.1.transformer_blocks.3.attn1", - "output_blocks.1.1.transformer_blocks.3.attn1", - "output_blocks.2.1.transformer_blocks.3.attn1", - "input_blocks.7.1.transformer_blocks.4.attn1", - "input_blocks.8.1.transformer_blocks.4.attn1", - "output_blocks.0.1.transformer_blocks.4.attn1", - "output_blocks.1.1.transformer_blocks.4.attn1", - "output_blocks.2.1.transformer_blocks.4.attn1", - "input_blocks.7.1.transformer_blocks.5.attn1", - "input_blocks.8.1.transformer_blocks.5.attn1", - "output_blocks.0.1.transformer_blocks.5.attn1", - "output_blocks.1.1.transformer_blocks.5.attn1", - "output_blocks.2.1.transformer_blocks.5.attn1", - "input_blocks.7.1.transformer_blocks.6.attn1", - "input_blocks.8.1.transformer_blocks.6.attn1", - "output_blocks.0.1.transformer_blocks.6.attn1", - "output_blocks.1.1.transformer_blocks.6.attn1", - "output_blocks.2.1.transformer_blocks.6.attn1", - "input_blocks.7.1.transformer_blocks.7.attn1", - "input_blocks.8.1.transformer_blocks.7.attn1", - "output_blocks.0.1.transformer_blocks.7.attn1", - "output_blocks.1.1.transformer_blocks.7.attn1", - "output_blocks.2.1.transformer_blocks.7.attn1", - "input_blocks.7.1.transformer_blocks.8.attn1", - "input_blocks.8.1.transformer_blocks.8.attn1", - "output_blocks.0.1.transformer_blocks.8.attn1", - "output_blocks.1.1.transformer_blocks.8.attn1", - "output_blocks.2.1.transformer_blocks.8.attn1", - "input_blocks.7.1.transformer_blocks.9.attn1", - "input_blocks.8.1.transformer_blocks.9.attn1", - "output_blocks.0.1.transformer_blocks.9.attn1", - "output_blocks.1.1.transformer_blocks.9.attn1", - "output_blocks.2.1.transformer_blocks.9.attn1", - ], - 2: [ - # SD 1.5 U-Net (diffusers) - "mid_block.attentions.0.transformer_blocks.0.attn1", - # SD 1.5 U-Net (ldm) - "middle_block.1.transformer_blocks.0.attn1", - "middle_block.1.transformer_blocks.1.attn1", - "middle_block.1.transformer_blocks.2.attn1", - "middle_block.1.transformer_blocks.3.attn1", - "middle_block.1.transformer_blocks.4.attn1", - "middle_block.1.transformer_blocks.5.attn1", - "middle_block.1.transformer_blocks.6.attn1", - "middle_block.1.transformer_blocks.7.attn1", - "middle_block.1.transformer_blocks.8.attn1", - "middle_block.1.transformer_blocks.9.attn1", - ], - 3 : [] # TODO - separate layers for SD-XL -} - - -RNG_INSTANCE = random.Random() - -def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: - """ - Returns a random divisor of value that - x * min_value <= value - if max_options is 1, the behavior is deterministic - """ - min_value = min(min_value, value) - - # All big divisors of value (inclusive) - divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order - - ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order - - idx = RNG_INSTANCE.randint(0, len(ns) - 1) - - return ns[idx] - -def set_hypertile_seed(seed: int) -> None: - RNG_INSTANCE.seed(seed) - -def largest_tile_size_available(width:int, height:int) -> int: - """ - Calculates the largest tile size available for a given width and height - Tile size is always a power of 2 - """ - gcd = math.gcd(width, height) - largest_tile_size_available = 1 - while gcd % (largest_tile_size_available * 2) == 0: - largest_tile_size_available *= 2 - return largest_tile_size_available - -def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]: - """ - Finds h and w such that h*w = hw and h/w = aspect_ratio - We check all possible divisors of hw and return the closest to the aspect ratio - """ - divisors = [i for i in range(2, hw + 1) if hw % i == 0] # all divisors of hw - pairs = [(i, hw // i) for i in divisors] # all pairs of divisors of hw - ratios = [w/h for h, w in pairs] # all ratios of pairs of divisors of hw - closest_ratio = min(ratios, key=lambda x: abs(x - aspect_ratio)) # closest ratio to aspect_ratio - closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio - return closest_pair - -@cache -def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]: - """ - Finds h and w such that h*w = hw and h/w = aspect_ratio - """ - h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) - # find h and w such that h*w = hw and h/w = aspect_ratio - if h * w != hw: - w_candidate = hw / h - # check if w is an integer - if not w_candidate.is_integer(): - h_candidate = hw / w - # check if h is an integer - if not h_candidate.is_integer(): - return iterative_closest_divisors(hw, aspect_ratio) - else: - h = int(h_candidate) - else: - w = int(w_candidate) - return h, w - -@contextmanager -def split_attention( - layer: nn.Module, - /, - aspect_ratio: float, # width/height - tile_size: int = 128, # 128 for VAE - swap_size: int = 1, # 1 for VAE - *, - disable: bool = False, - max_depth: Literal[0, 1, 2, 3] = 0, # ! Try 0 or 1 - scale_depth: bool = True, # scale the tile-size depending on the depth - is_sdxl: bool = False, # is the model SD-XL -): - # Hijacks AttnBlock from ldm and Attention from diffusers - - if disable: - logging.info(f"Attention for {layer.__class__.__qualname__} not splitted") - yield - return - - latent_tile_size = max(128, tile_size) // 8 - - def self_attn_forward(forward: Callable, depth: int, layer_name: str, module: nn.Module) -> Callable: - @wraps(forward) - def wrapper(*args, **kwargs): - x = args[0] - - # VAE - if x.ndim == 4: - b, c, h, w = x.shape - - nh = random_divisor(h, latent_tile_size, swap_size) - nw = random_divisor(w, latent_tile_size, swap_size) - - if nh * nw > 1: - x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles - - out = forward(x, *args[1:], **kwargs) - - if nh * nw > 1: - out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw) - - # U-Net - else: - hw: int = x.size(1) - h, w = find_hw_candidates(hw, aspect_ratio) - assert h * w == hw, f"Invalid aspect ratio {aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}" - - factor = 2**depth if scale_depth else 1 - nh = random_divisor(h, latent_tile_size * factor, swap_size) - nw = random_divisor(w, latent_tile_size * factor, swap_size) - - module._split_sizes_hypertile.append((nh, nw)) # type: ignore - - if nh * nw > 1: - x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) - - out = forward(x, *args[1:], **kwargs) - - if nh * nw > 1: - out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) - out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) - - return out - - return wrapper - - # Handle hijacking the forward method and recovering afterwards - try: - if is_sdxl: - layers = DEPTH_LAYERS_XL - else: - layers = DEPTH_LAYERS - for depth in range(max_depth + 1): - for layer_name, module in layer.named_modules(): - if any(layer_name.endswith(try_name) for try_name in layers[depth]): - # print input shape for debugging - logging.debug(f"HyperTile hijacking attention layer at depth {depth}: {layer_name}") - # hijack - module._original_forward_hypertile = module.forward - module.forward = self_attn_forward(module.forward, depth, layer_name, module) - module._split_sizes_hypertile = [] - yield - finally: - for layer_name, module in layer.named_modules(): - # remove hijack - if hasattr(module, "_original_forward_hypertile"): - if module._split_sizes_hypertile: - logging.debug(f"layer {layer_name} splitted with ({module._split_sizes_hypertile})") - # recover - module.forward = module._original_forward_hypertile - del module._original_forward_hypertile - del module._split_sizes_hypertile - -def hypertile_context_vae(model:nn.Module, aspect_ratio:float, tile_size:int, opts): - """ - Returns context manager for VAE - """ - enabled = opts.hypertile_split_vae_attn - swap_size = opts.hypertile_swap_size_vae - max_depth = opts.hypertile_max_depth_vae - tile_size_max = opts.hypertile_max_tile_vae - return split_attention( - model, - aspect_ratio=aspect_ratio, - tile_size=min(tile_size, tile_size_max), - swap_size=swap_size, - disable=not enabled, - max_depth=max_depth, - is_sdxl=False, - ) - -def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, opts, is_sdxl:bool): - """ - Returns context manager for U-Net - """ - enabled = opts.hypertile_split_unet_attn - swap_size = opts.hypertile_swap_size_unet - max_depth = opts.hypertile_max_depth_unet - tile_size_max = opts.hypertile_max_tile_unet - return split_attention( - model, - aspect_ratio=aspect_ratio, - tile_size=min(tile_size, tile_size_max), - swap_size=swap_size, - disable=not enabled, - max_depth=max_depth, - is_sdxl=is_sdxl, - ) -- cgit v1.2.3 From d2e0c1ca132f4f0d98b77397a9f353d4ad8e7c4b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 10:51:45 +0300 Subject: rework hypertile into a built-in extension --- README.md | 1 + extensions-builtin/hypertile/hypertile.py | 221 +++++++++------------ .../hypertile/scripts/hypertile_script.py | 73 +++++++ modules/processing.py | 37 ++-- modules/shared_options.py | 8 - 5 files changed, 186 insertions(+), 154 deletions(-) create mode 100644 extensions-builtin/hypertile/scripts/hypertile_script.py diff --git a/README.md b/README.md index 25ba070e..3b3f93ad 100644 --- a/README.md +++ b/README.md @@ -174,5 +174,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd - LyCORIS - KohakuBlueleaf - Restart sampling - lambertae - https://github.com/Newbeeer/diffusion_restart_sampling +- Hypertile - tfernd - https://github.com/tfernd/HyperTile - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - (You) diff --git a/extensions-builtin/hypertile/hypertile.py b/extensions-builtin/hypertile/hypertile.py index be898fce..a40c1311 100644 --- a/extensions-builtin/hypertile/hypertile.py +++ b/extensions-builtin/hypertile/hypertile.py @@ -1,10 +1,13 @@ """ Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE -Warn : The patch works well only if the input image has a width and height that are multiples of 128 -Author : @tfernd Github : https://github.com/tfernd/HyperTile +Warn: The patch works well only if the input image has a width and height that are multiples of 128 +Original author: @tfernd Github: https://github.com/tfernd/HyperTile """ from __future__ import annotations + +import functools +from dataclasses import dataclass from typing import Callable from typing_extensions import Literal @@ -18,6 +21,19 @@ import random from einops import rearrange + +@dataclass +class HypertileParams: + depth = 0 + layer_name = "" + tile_size: int = 0 + swap_size: int = 0 + aspect_ratio: float = 1.0 + forward = None + enabled = False + + + # TODO add SD-XL layers DEPTH_LAYERS = { 0: [ @@ -176,6 +192,7 @@ DEPTH_LAYERS_XL = { RNG_INSTANCE = random.Random() + def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: """ Returns a random divisor of value that @@ -193,10 +210,13 @@ def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: return ns[idx] + def set_hypertile_seed(seed: int) -> None: RNG_INSTANCE.seed(seed) -def largest_tile_size_available(width:int, height:int) -> int: + +@functools.cache +def largest_tile_size_available(width: int, height: int) -> int: """ Calculates the largest tile size available for a given width and height Tile size is always a power of 2 @@ -207,6 +227,7 @@ def largest_tile_size_available(width:int, height:int) -> int: largest_tile_size_available *= 2 return largest_tile_size_available + def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]: """ Finds h and w such that h*w = hw and h/w = aspect_ratio @@ -219,6 +240,7 @@ def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]: closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio return closest_pair + @cache def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]: """ @@ -240,132 +262,87 @@ def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]: w = int(w_candidate) return h, w -@contextmanager -def split_attention( - layer: nn.Module, - /, - aspect_ratio: float, # width/height - tile_size: int = 128, # 128 for VAE - swap_size: int = 1, # 1 for VAE - *, - disable: bool = False, - max_depth: Literal[0, 1, 2, 3] = 0, # ! Try 0 or 1 - scale_depth: bool = True, # scale the tile-size depending on the depth - is_sdxl: bool = False, # is the model SD-XL -): - # Hijacks AttnBlock from ldm and Attention from diffusers - - if disable: - logging.info(f"Attention for {layer.__class__.__qualname__} not splitted") - yield - return - - latent_tile_size = max(128, tile_size) // 8 - - def self_attn_forward(forward: Callable, depth: int, layer_name: str, module: nn.Module) -> Callable: - @wraps(forward) - def wrapper(*args, **kwargs): - x = args[0] - - # VAE - if x.ndim == 4: - b, c, h, w = x.shape - - nh = random_divisor(h, latent_tile_size, swap_size) - nw = random_divisor(w, latent_tile_size, swap_size) - - if nh * nw > 1: - x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles - - out = forward(x, *args[1:], **kwargs) - - if nh * nw > 1: - out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw) - - # U-Net - else: - hw: int = x.size(1) - h, w = find_hw_candidates(hw, aspect_ratio) - assert h * w == hw, f"Invalid aspect ratio {aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}" - factor = 2**depth if scale_depth else 1 - nh = random_divisor(h, latent_tile_size * factor, swap_size) - nw = random_divisor(w, latent_tile_size * factor, swap_size) +def self_attn_forward(params: HypertileParams, scale_depth=True) -> Callable: + + @wraps(params.forward) + def wrapper(*args, **kwargs): + if not params.enabled: + return params.forward(*args, **kwargs) - module._split_sizes_hypertile.append((nh, nw)) # type: ignore + latent_tile_size = max(128, params.tile_size) // 8 + x = args[0] - if nh * nw > 1: - x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + # VAE + if x.ndim == 4: + b, c, h, w = x.shape - out = forward(x, *args[1:], **kwargs) + nh = random_divisor(h, latent_tile_size, params.swap_size) + nw = random_divisor(w, latent_tile_size, params.swap_size) - if nh * nw > 1: - out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) - out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + if nh * nw > 1: + x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles - return out + out = params.forward(x, *args[1:], **kwargs) - return wrapper + if nh * nw > 1: + out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw) - # Handle hijacking the forward method and recovering afterwards - try: - if is_sdxl: - layers = DEPTH_LAYERS_XL + # U-Net else: - layers = DEPTH_LAYERS - for depth in range(max_depth + 1): - for layer_name, module in layer.named_modules(): + hw: int = x.size(1) + h, w = find_hw_candidates(hw, params.aspect_ratio) + assert h * w == hw, f"Invalid aspect ratio {params.aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}" + + factor = 2 ** params.depth if scale_depth else 1 + nh = random_divisor(h, latent_tile_size * factor, params.swap_size) + nw = random_divisor(w, latent_tile_size * factor, params.swap_size) + + if nh * nw > 1: + x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + + out = params.forward(x, *args[1:], **kwargs) + + if nh * nw > 1: + out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) + out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + + return out + + return wrapper + + +def hypertile_hook_model(model: nn.Module, width, height, *, enable=False, tile_size_max=128, swap_size=1, max_depth=3, is_sdxl=False): + hypertile_layers = getattr(model, "__webui_hypertile_layers", None) + if hypertile_layers is None: + if not enable: + return + + hypertile_layers = {} + layers = DEPTH_LAYERS_XL if is_sdxl else DEPTH_LAYERS + + for depth in range(4): + for layer_name, module in model.named_modules(): if any(layer_name.endswith(try_name) for try_name in layers[depth]): - # print input shape for debugging - logging.debug(f"HyperTile hijacking attention layer at depth {depth}: {layer_name}") - # hijack - module._original_forward_hypertile = module.forward - module.forward = self_attn_forward(module.forward, depth, layer_name, module) - module._split_sizes_hypertile = [] - yield - finally: - for layer_name, module in layer.named_modules(): - # remove hijack - if hasattr(module, "_original_forward_hypertile"): - if module._split_sizes_hypertile: - logging.debug(f"layer {layer_name} splitted with ({module._split_sizes_hypertile})") - # recover - module.forward = module._original_forward_hypertile - del module._original_forward_hypertile - del module._split_sizes_hypertile - -def hypertile_context_vae(model:nn.Module, aspect_ratio:float, tile_size:int, opts): - """ - Returns context manager for VAE - """ - enabled = opts.hypertile_split_vae_attn - swap_size = opts.hypertile_swap_size_vae - max_depth = opts.hypertile_max_depth_vae - tile_size_max = opts.hypertile_max_tile_vae - return split_attention( - model, - aspect_ratio=aspect_ratio, - tile_size=min(tile_size, tile_size_max), - swap_size=swap_size, - disable=not enabled, - max_depth=max_depth, - is_sdxl=False, - ) - -def hypertile_context_unet(model:nn.Module, aspect_ratio:float, tile_size:int, opts, is_sdxl:bool): - """ - Returns context manager for U-Net - """ - enabled = opts.hypertile_split_unet_attn - swap_size = opts.hypertile_swap_size_unet - max_depth = opts.hypertile_max_depth_unet - tile_size_max = opts.hypertile_max_tile_unet - return split_attention( - model, - aspect_ratio=aspect_ratio, - tile_size=min(tile_size, tile_size_max), - swap_size=swap_size, - disable=not enabled, - max_depth=max_depth, - is_sdxl=is_sdxl, - ) + params = HypertileParams() + module.__webui_hypertile_params = params + params.forward = module.forward + params.depth = depth + params.layer_name = layer_name + module.forward = self_attn_forward(params) + + hypertile_layers[layer_name] = 1 + + model.__webui_hypertile_layers = hypertile_layers + + aspect_ratio = width / height + tile_size = min(largest_tile_size_available(width, height), tile_size_max) + + for layer_name, module in model.named_modules(): + if layer_name in hypertile_layers: + params = module.__webui_hypertile_params + + params.tile_size = tile_size + params.swap_size = swap_size + params.aspect_ratio = aspect_ratio + params.enabled = enable and params.depth <= max_depth diff --git a/extensions-builtin/hypertile/scripts/hypertile_script.py b/extensions-builtin/hypertile/scripts/hypertile_script.py new file mode 100644 index 00000000..3cc29cd1 --- /dev/null +++ b/extensions-builtin/hypertile/scripts/hypertile_script.py @@ -0,0 +1,73 @@ +import hypertile +from modules import scripts, script_callbacks, shared + + +class ScriptHypertile(scripts.Script): + name = "Hypertile" + + def title(self): + return self.name + + def show(self, is_img2img): + return scripts.AlwaysVisible + + def process(self, p, *args): + hypertile.set_hypertile_seed(p.all_seeds[0]) + + configure_hypertile(p.width, p.height, enable_unet=shared.opts.hypertile_enable_unet) + + def before_hr(self, p, *args): + configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=shared.opts.hypertile_enable_unet_secondpass or shared.opts.hypertile_enable_unet) + + +def configure_hypertile(width, height, enable_unet=True): + hypertile.hypertile_hook_model( + shared.sd_model.first_stage_model, + width, + height, + swap_size=shared.opts.hypertile_swap_size_vae, + max_depth=shared.opts.hypertile_max_depth_vae, + tile_size_max=shared.opts.hypertile_max_tile_vae, + enable=shared.opts.hypertile_enable_vae, + ) + + hypertile.hypertile_hook_model( + shared.sd_model.model, + width, + height, + swap_size=shared.opts.hypertile_swap_size_unet, + max_depth=shared.opts.hypertile_max_depth_unet, + tile_size_max=shared.opts.hypertile_max_tile_unet, + enable=enable_unet, + is_sdxl=shared.sd_model.is_sdxl + ) + + +def on_ui_settings(): + import gradio as gr + + options = { + "hypertile_explanation": shared.OptionHTML(""" + Hypertile optimizes the self-attention layer within U-Net and VAE models, + resulting in a reduction in computation time ranging from 1 to 4 times. The larger the generated image is, the greater the + benefit. + """), + + "hypertile_enable_unet": shared.OptionInfo(False, "Enable Hypertile U-Net").info("noticeable change in details of the generated picture; if enabled, overrides the setting below"), + "hypertile_enable_unet_secondpass": shared.OptionInfo(False, "Enable Hypertile U-Net for hires fix second pass"), + "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}), + "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), + "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-net swap size", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}), + + "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE").info("minimal change in the generated picture"), + "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}), + "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), + "hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}), + } + + for name, opt in options.items(): + opt.section = ('hypertile', "Hypertile") + shared.opts.add_option(name, opt) + + +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/modules/processing.py b/modules/processing.py index 36c2be5e..ac58ef86 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -24,7 +24,6 @@ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths import modules.face_restoration -from modules.hypertile import set_hypertile_seed, largest_tile_size_available, hypertile_context_unet, hypertile_context_vae import modules.images as images import modules.styles import modules.sd_models as sd_models @@ -861,8 +860,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.comment(comment) p.extra_generation_params.update(model_hijack.extra_generation_params) - set_hypertile_seed(p.seed) - # add batch size + hypertile status to information to reproduce the run + if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" @@ -874,8 +872,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: if opts.sd_vae_decode_method != 'Full': p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method - with hypertile_context_vae(p.sd_model.first_stage_model, aspect_ratio=p.width / p.height, tile_size=largest_tile_size_available(p.width, p.height), opts=shared.opts): - x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) + x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) @@ -1141,25 +1138,23 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - aspect_ratio = self.width / self.height + x = self.rng.next() - tile_size = largest_tile_size_available(self.width, self.height) - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x + if not self.enable_hr: return samples devices.torch_gc() if self.latent_scale_mode is None: - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) + decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) else: decoded_samples = None with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=self.hr_checkpoint_info) + return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts): @@ -1244,18 +1239,15 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.scripts is not None: self.scripts.before_hr(self) - tile_size = largest_tile_size_available(target_width, target_height) - aspect_ratio = self.width / self.height - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + + samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.sampler = None devices.torch_gc() - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) + + decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) self.is_hr_pass = False return decoded_samples @@ -1532,11 +1524,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.initial_noise_multiplier != 1.0: self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier x *= self.initial_noise_multiplier - aspect_ratio = self.width / self.height - tile_size = largest_tile_size_available(self.width, self.height) - with hypertile_context_vae(self.sd_model.first_stage_model, aspect_ratio=aspect_ratio, tile_size=tile_size, opts=shared.opts): - with hypertile_context_unet(self.sd_model.model, aspect_ratio=aspect_ratio, tile_size=tile_size, is_sdxl=shared.sd_model.is_sdxl, opts=shared.opts): - samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) + + samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) if self.mask is not None: samples = samples * self.nmask + self.init_latent * self.mask diff --git a/modules/shared_options.py b/modules/shared_options.py index 28a48906..d40db530 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -200,14 +200,6 @@ options_templates.update(options_section(('optimizations', "Optimizations"), { "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), - "hypertile_split_unet_attn" : OptionInfo(False, "Split attention in Unet with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), - "hypertile_split_vae_attn": OptionInfo(False, "Split attention in VAE with HyperTile").link("Github", "https://github.com/tfernd/HyperTile").info("improves performance; changes behavior, but deterministic"), - "hypertile_max_depth_vae" : OptionInfo(3, "Max depth for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_max_depth_unet" : OptionInfo(3, "Max depth for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_max_tile_vae" : OptionInfo(128, "Max tile size for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_max_tile_unet" : OptionInfo(256, "Max tile size for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_swap_size_unet": OptionInfo(3, "Swap size for Unet HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), - "hypertile_swap_size_vae": OptionInfo(3, "Swap size for VAE HyperTile hijack", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}).link("Github", "https://github.com/tfernd/HyperTile"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.3 From c5a0c59a83c950c64bc44427d3478aaa78c296cf Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 11:36:17 +0300 Subject: do not save HTML explanations from options page to config --- modules/options.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/options.py b/modules/options.py index ab40aff7..7703d80e 100644 --- a/modules/options.py +++ b/modules/options.py @@ -76,7 +76,7 @@ class Options: def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts): self.data_labels = data_labels - self.data = {k: v.default for k, v in self.data_labels.items()} + self.data = {k: v.default for k, v in self.data_labels.items() if not v.do_not_save} self.restricted_opts = restricted_opts def __setattr__(self, key, value): @@ -210,7 +210,7 @@ class Options: def add_option(self, key, info): self.data_labels[key] = info - if key not in self.data: + if key not in self.data and not info.do_not_save: self.data[key] = info.default def reorder(self): -- cgit v1.2.3 From d1750e5eca6fd95db3516928cad18b32e557f56f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 11:37:12 +0300 Subject: fix linter errors --- extensions-builtin/hypertile/hypertile.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/extensions-builtin/hypertile/hypertile.py b/extensions-builtin/hypertile/hypertile.py index a40c1311..feb02fd2 100644 --- a/extensions-builtin/hypertile/hypertile.py +++ b/extensions-builtin/hypertile/hypertile.py @@ -9,11 +9,8 @@ from __future__ import annotations import functools from dataclasses import dataclass from typing import Callable -from typing_extensions import Literal -import logging from functools import wraps, cache -from contextlib import contextmanager import math import torch.nn as nn -- cgit v1.2.3 From 2a40d3c603448d15e209814366f2d6ab25e52398 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 14:58:47 +0300 Subject: compact prompt layout: preserve scroll when switching between lora tabs --- javascript/extraNetworks.js | 4 ++++ modules/ui_extra_networks.py | 5 ++++- style.css | 12 ++++++++++-- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index a1bf29a8..a787372c 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -130,6 +130,10 @@ function extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePromp } else { promptContainer.insertBefore(prompt, promptContainer.firstChild); } + + if (elem) { + elem.classList.toggle('extra-page-prompts-active', showNegativePrompt || showPrompt); + } } diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index f03e2033..f3b23cc9 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -370,6 +370,9 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): for page in ui.stored_extra_pages: with gr.Tab(page.title, elem_id=f"{tabname}_{page.id_page}", elem_classes=["extra-page"]) as tab: + with gr.Column(elem_id=f"{tabname}_{page.id_page}_prompts", elem_classes=["extra-page-prompts"]): + pass + elem_id = f"{tabname}_{page.id_page}_cards_html" page_elem = gr.HTML('Loading...', elem_id=elem_id) ui.pages.append(page_elem) @@ -400,7 +403,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): allow_prompt = "true" if page.allow_prompt else "false" allow_negative_prompt = "true" if page.allow_negative_prompt else "false" - jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');' + jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}_prompts" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');' tab.select(fn=lambda: [gr.update(visible=True) for _ in tab_controls], _js='function(){ ' + jscode + ' }', inputs=[], outputs=tab_controls, show_progress=False) diff --git a/style.css b/style.css index 73162022..f8b42636 100644 --- a/style.css +++ b/style.css @@ -840,8 +840,16 @@ footer { /* extra networks UI */ -.extra-page .prompt{ - margin: 0 0 0.5em 0; +.extra-page > div.gap{ + gap: 0; +} + +.extra-page-prompts{ + margin-bottom: 0; +} + +.extra-page-prompts.extra-page-prompts-active{ + margin-bottom: 1em; } .extra-network-cards{ -- cgit v1.2.3 From a15dd151ffb4d11556028b34561058bc44930427 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 26 Nov 2023 21:55:50 +0900 Subject: json.dump(ensure_ascii=False) improve json readability --- modules/cache.py | 2 +- modules/options.py | 2 +- modules/ui_extensions.py | 2 +- modules/ui_extra_networks_user_metadata.py | 2 +- modules/ui_loadsave.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/cache.py b/modules/cache.py index ff26a213..2d37e7b9 100644 --- a/modules/cache.py +++ b/modules/cache.py @@ -32,7 +32,7 @@ def dump_cache(): with cache_lock: cache_filename_tmp = cache_filename + "-" with open(cache_filename_tmp, "w", encoding="utf8") as file: - json.dump(cache_data, file, indent=4) + json.dump(cache_data, file, indent=4, ensure_ascii=False) os.replace(cache_filename_tmp, cache_filename) diff --git a/modules/options.py b/modules/options.py index 7703d80e..40cb4799 100644 --- a/modules/options.py +++ b/modules/options.py @@ -158,7 +158,7 @@ class Options: assert not cmd_opts.freeze_settings, "saving settings is disabled" with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) + json.dump(self.data, file, indent=4, ensure_ascii=False) def same_type(self, x, y): if x is None or y is None: diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index c0a73b57..96dc9db2 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -65,7 +65,7 @@ def save_config_state(name): filename = os.path.join(config_states_dir, f"{timestamp}_{name}.json") print(f"Saving backup of webui/extension state to {filename}.") with open(filename, "w", encoding="utf-8") as f: - json.dump(current_config_state, f, indent=4) + json.dump(current_config_state, f, indent=4, ensure_ascii=False) config_states.list_config_states() new_value = next(iter(config_states.all_config_states.keys()), "Current") new_choices = ["Current"] + list(config_states.all_config_states.keys()) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index bfec140c..36a807fc 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -134,7 +134,7 @@ class UserMetadataEditor: basename, ext = os.path.splitext(filename) with open(basename + '.json', "w", encoding="utf8") as file: - json.dump(metadata, file, indent=4) + json.dump(metadata, file, indent=4, ensure_ascii=False) def save_user_metadata(self, name, desc, notes): user_metadata = self.get_user_metadata(name) diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index eb20ff25..7826786c 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -141,7 +141,7 @@ class UiLoadsave: def write_to_file(self, current_ui_settings): with open(self.filename, "w", encoding="utf8") as file: - json.dump(current_ui_settings, file, indent=4) + json.dump(current_ui_settings, file, indent=4, ensure_ascii=False) def dump_defaults(self): """saves default values to a file unless tjhe file is present and there was an error loading default values at start""" -- cgit v1.2.3 From f0f100e67b78f686dc73cf3c8cad422e45cc9b8a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 26 Nov 2023 17:56:16 +0300 Subject: add categories to settings --- javascript/settings.js | 25 ++++++++++++++++ modules/options.py | 75 ++++++++++++++++++++++++++++++++++++++++++----- modules/shared_options.py | 49 ++++++++++++++++++------------- style.css | 9 ++++++ 4 files changed, 130 insertions(+), 28 deletions(-) diff --git a/javascript/settings.js b/javascript/settings.js index 4e79ec00..e6009290 100644 --- a/javascript/settings.js +++ b/javascript/settings.js @@ -44,3 +44,28 @@ onUiLoaded(function() { buttonShowAllPages.addEventListener("click", settingsShowAllTabs); }); + + +onOptionsChanged(function() { + if (gradioApp().querySelector('#settings .settings-category')) return; + + var sectionMap = {}; + gradioApp().querySelectorAll('#settings > div > button').forEach(function(x) { + sectionMap[x.textContent.trim()] = x; + }); + + opts._categories.forEach(function(x) { + var section = x[0]; + var category = x[1]; + + var span = document.createElement('SPAN'); + span.textContent = category; + span.className = 'settings-category'; + + var sectionElem = sectionMap[section]; + if (!sectionElem) return; + + sectionElem.parentElement.insertBefore(span, sectionElem); + }); +}); + diff --git a/modules/options.py b/modules/options.py index 40cb4799..4fead690 100644 --- a/modules/options.py +++ b/modules/options.py @@ -1,5 +1,6 @@ import json import sys +from dataclasses import dataclass import gradio as gr @@ -8,13 +9,14 @@ from modules.shared_cmd_options import cmd_opts class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False): + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False, category_id=None): self.default = default self.label = label self.component = component self.component_args = component_args self.onchange = onchange self.section = section + self.category_id = category_id self.refresh = refresh self.do_not_save = False @@ -63,7 +65,11 @@ class OptionHTML(OptionInfo): def options_section(section_identifier, options_dict): for v in options_dict.values(): - v.section = section_identifier + if len(section_identifier) == 2: + v.section = section_identifier + elif len(section_identifier) == 3: + v.section = section_identifier[0:2] + v.category_id = section_identifier[2] return options_dict @@ -206,6 +212,17 @@ class Options: d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} + + item_categories = {} + for item in self.data_labels.values(): + category = categories.mapping.get(item.category_id) + category = "Uncategorized" if category is None else category.label + if category not in item_categories: + item_categories[category] = item.section[1] + + # _categories is a list of pairs: [section, category]. Each section (a setting page) will get a special heading above it with the category as text. + d["_categories"] = [[v, k] for k, v in item_categories.items()] + [["Defaults", "Other"]] + return json.dumps(d) def add_option(self, key, info): @@ -214,15 +231,40 @@ class Options: self.data[key] = info.default def reorder(self): - """reorder settings so that all items related to section always go together""" + """Reorder settings so that: + - all items related to section always go together + - all sections belonging to a category go together + - sections inside a category are ordered alphabetically + - categories are ordered by creation order + + Category is a superset of sections: for category "postprocessing" there could be multiple sections: "face restoration", "upscaling". + + This function also changes items' category_id so that all items belonging to a section have the same category_id. + """ + + category_ids = {} + section_categories = {} - section_ids = {} settings_items = self.data_labels.items() for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) + if item.section not in section_categories: + section_categories[item.section] = item.category_id + + for _, item in settings_items: + item.category_id = section_categories.get(item.section) + + for category_id in categories.mapping: + if category_id not in category_ids: + category_ids[category_id] = len(category_ids) - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) + def sort_key(x): + item: OptionInfo = x[1] + category_order = category_ids.get(item.category_id, len(category_ids)) + section_order = item.section[1] + + return category_order, section_order + + self.data_labels = dict(sorted(settings_items, key=sort_key)) def cast_value(self, key, value): """casts an arbitrary to the same type as this setting's value with key @@ -245,3 +287,22 @@ class Options: value = expected_type(value) return value + + +@dataclass +class OptionsCategory: + id: str + label: str + +class OptionsCategories: + def __init__(self): + self.mapping = {} + + def register_category(self, category_id, label): + if category_id in self.mapping: + return category_id + + self.mapping[category_id] = OptionsCategory(category_id, label) + + +categories = OptionsCategories() diff --git a/modules/shared_options.py b/modules/shared_options.py index 9bcd7914..04e68a71 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -3,7 +3,7 @@ import gradio as gr from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from modules.shared_cmd_options import cmd_opts -from modules.options import options_section, OptionInfo, OptionHTML +from modules.options import options_section, OptionInfo, OptionHTML, categories options_templates = {} hide_dirs = shared.hide_dirs @@ -21,7 +21,14 @@ restricted_opts = { "outdir_init_images" } -options_templates.update(options_section(('saving-images', "Saving images/grids"), { +categories.register_category("saving", "Saving images") +categories.register_category("sd", "Stable Diffusion") +categories.register_category("ui", "User Interface") +categories.register_category("system", "System") +categories.register_category("postprocessing", "Postprocessing") +categories.register_category("training", "Training") + +options_templates.update(options_section(('saving-images', "Saving images/grids", "saving"), { "samples_save": OptionInfo(True, "Always save all generated images"), "samples_format": OptionInfo('png', 'File format for images'), "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), @@ -67,7 +74,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "notification_volume": OptionInfo(100, "Notification sound volume", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}).info("in %"), })) -options_templates.update(options_section(('saving-paths', "Paths for saving"), { +options_templates.update(options_section(('saving-paths', "Paths for saving", "saving"), { "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), @@ -79,7 +86,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), { "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), })) -options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { +options_templates.update(options_section(('saving-to-dirs', "Saving to a directory", "saving"), { "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), @@ -87,21 +94,21 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), })) -options_templates.update(options_section(('upscaling', "Upscaling"), { +options_templates.update(options_section(('upscaling', "Upscaling", "postprocessing"), { "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}), })) -options_templates.update(options_section(('face-restoration', "Face restoration"), { +options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), { "face_restoration": OptionInfo(False, "Restore faces", infotext='Face restoration').info("will use a third-party model on generation result to reconstruct faces"), "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), })) -options_templates.update(options_section(('system', "System"), { +options_templates.update(options_section(('system', "System", "system"), { "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), "enable_console_prompts": OptionInfo(shared.cmd_opts.enable_console_prompts, "Print prompts to console when generating with txt2img and img2img."), "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), @@ -116,13 +123,13 @@ options_templates.update(options_section(('system', "System"), { "dump_stacks_on_signal": OptionInfo(False, "Print stack traces before exiting the program with ctrl+c."), })) -options_templates.update(options_section(('API', "API"), { +options_templates.update(options_section(('API', "API", "system"), { "api_enable_requests": OptionInfo(True, "Allow http:// and https:// URLs for input images in API", restrict_api=True), "api_forbid_local_requests": OptionInfo(True, "Forbid URLs to local resources", restrict_api=True), "api_useragent": OptionInfo("", "User agent for requests", restrict_api=True), })) -options_templates.update(options_section(('training', "Training"), { +options_templates.update(options_section(('training', "Training", "training"), { "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), @@ -137,7 +144,7 @@ options_templates.update(options_section(('training', "Training"), { "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), })) -options_templates.update(options_section(('sd', "Stable Diffusion"), { +options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), { "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles(shared.opts.sd_checkpoint_dropdown_use_short)}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'), "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), @@ -154,14 +161,14 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "hires_fix_refiner_pass": OptionInfo("second pass", "Hires fix: which pass to enable refiner for", gr.Radio, {"choices": ["first pass", "second pass", "both passes"]}, infotext="Hires refiner"), })) -options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { +options_templates.update(options_section(('sdxl', "Stable Diffusion XL", "sd"), { "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), })) -options_templates.update(options_section(('vae', "VAE"), { +options_templates.update(options_section(('vae', "VAE", "sd"), { "sd_vae_explanation": OptionHTML(""" VAE is a neural network that transforms a standard RGB image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling @@ -176,7 +183,7 @@ For img2img, VAE is used to process user's input image before the sampling, and "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Decoder').info("method to decode latent to image"), })) -options_templates.update(options_section(('img2img', "img2img"), { +options_templates.update(options_section(('img2img', "img2img", "sd"), { "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'), "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.0, "maximum": 1.5, "step": 0.001}, infotext='Noise multiplier'), "img2img_extra_noise": OptionInfo(0.0, "Extra noise multiplier for img2img and hires fix", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Extra noise').info("0 = disabled (default); should be lower than denoising strength"), @@ -192,7 +199,7 @@ options_templates.update(options_section(('img2img', "img2img"), { "img2img_batch_show_results_limit": OptionInfo(32, "Show the first N batch img2img results in UI", gr.Slider, {"minimum": -1, "maximum": 1000, "step": 1}).info('0: disable, -1: show all images. Too many images can cause lag'), })) -options_templates.update(options_section(('optimizations', "Optimizations"), { +options_templates.update(options_section(('optimizations', "Optimizations", "sd"), { "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), @@ -203,7 +210,7 @@ options_templates.update(options_section(('optimizations', "Optimizations"), { "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), })) -options_templates.update(options_section(('compatibility', "Compatibility"), { +options_templates.update(options_section(('compatibility', "Compatibility", "sd"), { "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), @@ -228,7 +235,7 @@ options_templates.update(options_section(('interrogate', "Interrogate"), { "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), })) -options_templates.update(options_section(('extra_networks', "Extra Networks"), { +options_templates.update(options_section(('extra_networks', "Extra Networks", "sd"), { "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), @@ -245,7 +252,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks), })) -options_templates.update(options_section(('ui', "User interface"), { +options_templates.update(options_section(('ui', "User interface", "ui"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), @@ -280,7 +287,7 @@ options_templates.update(options_section(('ui', "User interface"), { })) -options_templates.update(options_section(('infotext', "Infotext"), { +options_templates.update(options_section(('infotext', "Infotext", "ui"), { "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), @@ -295,7 +302,7 @@ options_templates.update(options_section(('infotext', "Infotext"), { })) -options_templates.update(options_section(('ui', "Live previews"), { +options_templates.update(options_section(('ui', "Live previews", "ui"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), @@ -308,7 +315,7 @@ options_templates.update(options_section(('ui', "Live previews"), { "live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"), })) -options_templates.update(options_section(('sampler-params', "Sampler parameters"), { +options_templates.update(options_section(('sampler-params', "Sampler parameters", "sd"), { "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(), "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unpredictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for k-diffusion samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; currently only applies to ancestral samplers (i.e. Euler a) and SDE samplers"), @@ -330,7 +337,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'), })) -options_templates.update(options_section(('postprocessing', "Postprocessing"), { +options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), { 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), diff --git a/style.css b/style.css index f8b42636..6e3ca841 100644 --- a/style.css +++ b/style.css @@ -462,6 +462,15 @@ div.toprow-compact-tools{ padding: 4px; } +#settings > div.tab-nav .settings-category{ + display: block; + margin: 1em 0 0.25em 0; + font-weight: bold; + text-decoration: underline; + cursor: default; + user-select: none; +} + #settings_result{ height: 1.4em; margin: 0 1.2em; -- cgit v1.2.3 From 1f6844eb7e3a91639b2977d1e0cfbb9bf98baea7 Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Sun, 26 Nov 2023 10:04:39 -0600 Subject: also consider extension url --- modules/ui_extensions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index b6708881..252e6ff2 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -452,6 +452,7 @@ def get_date(info: dict, key): def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=""): extlist = available_extensions["extensions"] installed_extensions = {extension.name for extension in extensions.extensions} + installed_extension_urls = {normalize_git_url(extension.remote) for extension in extensions.extensions if extension.remote is not None} tags = available_extensions.get("tags", {}) tags_to_hide = set(hide_tags) @@ -484,7 +485,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" if url is None: continue - existing = get_extension_dirname_from_url(url) in installed_extensions + existing = get_extension_dirname_from_url(url) in installed_extensions or normalize_git_url(url) in installed_extension_urls extension_tags = extension_tags + ["installed"] if existing else extension_tags if any(x for x in extension_tags if x in tags_to_hide): -- cgit v1.2.3 From b30cc87b786d32f2385cfecf40a2469ee3a96ab5 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 27 Nov 2023 13:15:17 +0900 Subject: add Block component creation callback --- modules/gradio_extensons.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py index e6b6835a..7d88dc98 100644 --- a/modules/gradio_extensons.py +++ b/modules/gradio_extensons.py @@ -47,10 +47,20 @@ def Block_get_config(self): def BlockContext_init(self, *args, **kwargs): + if scripts.scripts_current is not None: + scripts.scripts_current.before_component(self, **kwargs) + + scripts.script_callbacks.before_component_callback(self, **kwargs) + res = original_BlockContext_init(self, *args, **kwargs) add_classes_to_gradio_component(self) + scripts.script_callbacks.after_component_callback(self, **kwargs) + + if scripts.scripts_current is not None: + scripts.scripts_current.after_component(self, **kwargs) + return res -- cgit v1.2.3 From 8a6e4bda21dddef3ab2e70a05d71b587b6c8b04b Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 27 Nov 2023 14:00:17 +0900 Subject: catch uncaught exception with ui creation scripts prevent total webui crash --- modules/scripts.py | 54 +++++++++++++++++++++++++++++------------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index b0689a23..961d032c 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -570,40 +570,44 @@ class ScriptRunner: if controls is None: return - script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower() - api_args = [] + try: + script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower() + api_args = [] - for control in controls: - control.custom_script_source = os.path.basename(script.filename) + for control in controls: + control.custom_script_source = os.path.basename(script.filename) - arg_info = api_models.ScriptArg(label=control.label or "") + arg_info = api_models.ScriptArg(label=control.label or "") - for field in ("value", "minimum", "maximum", "step"): - v = getattr(control, field, None) - if v is not None: - setattr(arg_info, field, v) + for field in ("value", "minimum", "maximum", "step"): + v = getattr(control, field, None) + if v is not None: + setattr(arg_info, field, v) - choices = getattr(control, 'choices', None) # as of gradio 3.41, some items in choices are strings, and some are tuples where the first elem is the string - if choices is not None: - arg_info.choices = [x[0] if isinstance(x, tuple) else x for x in choices] + choices = getattr(control, 'choices', None) # as of gradio 3.41, some items in choices are strings, and some are tuples where the first elem is the string + if choices is not None: + arg_info.choices = [x[0] if isinstance(x, tuple) else x for x in choices] - api_args.append(arg_info) + api_args.append(arg_info) - script.api_info = api_models.ScriptInfo( - name=script.name, - is_img2img=script.is_img2img, - is_alwayson=script.alwayson, - args=api_args, - ) + script.api_info = api_models.ScriptInfo( + name=script.name, + is_img2img=script.is_img2img, + is_alwayson=script.alwayson, + args=api_args, + ) - if script.infotext_fields is not None: - self.infotext_fields += script.infotext_fields + if script.infotext_fields is not None: + self.infotext_fields += script.infotext_fields - if script.paste_field_names is not None: - self.paste_field_names += script.paste_field_names + if script.paste_field_names is not None: + self.paste_field_names += script.paste_field_names - self.inputs += controls - script.args_to = len(self.inputs) + self.inputs += controls + script.args_to = len(self.inputs) + + except Exception: + errors.report(f"Error creating UI for {script.name}: ", exc_info=True) def setup_ui_for_section(self, section, scriptlist=None): if scriptlist is None: -- cgit v1.2.3 From 9621ca4d64bbe59880d869b923e1572f1475a52b Mon Sep 17 00:00:00 2001 From: Charlie Joynt Date: Mon, 27 Nov 2023 11:39:50 +0000 Subject: Allow use of mutiple styles csv files --- modules/styles.py | 203 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 171 insertions(+), 32 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index 0740fe1b..974d3289 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,4 +1,5 @@ import csv +import fnmatch import os import os.path import re @@ -10,6 +11,23 @@ class PromptStyle(typing.NamedTuple): name: str prompt: str negative_prompt: str + path: str = None + + +def clean_text(text: str) -> str: + """ + Iterating through a list of regular expressions and replacement strings, we + clean up the prompt and style text to make it easier to match against each + other. + """ + re_list = [ + ("multiple commas", re.compile("(,+\s+)+,?"), ", "), + ("multiple spaces", re.compile("\s{2,}"), " "), + ] + for _, regex, replace in re_list: + text = regex.sub(replace, text) + + return text.strip(", ") def merge_prompts(style_prompt: str, prompt: str) -> str: @@ -26,41 +44,64 @@ def apply_styles_to_prompt(prompt, styles): for style in styles: prompt = merge_prompts(style, prompt) - return prompt + return clean_text(prompt) -re_spaces = re.compile(" +") +def unwrap_style_text_from_prompt(style_text, prompt): + """ + Checks the prompt to see if the style text is wrapped around it. If so, + returns True plus the prompt text without the style text. Otherwise, returns + False with the original prompt. - -def extract_style_text_from_prompt(style_text, prompt): - stripped_prompt = re.sub(re_spaces, " ", prompt.strip()) - stripped_style_text = re.sub(re_spaces, " ", style_text.strip()) + Note that the "cleaned" version of the style text is only used for matching + purposes here. It isn't returned; the original style text is not modified. + """ + stripped_prompt = clean_text(prompt) + stripped_style_text = clean_text(style_text) if "{prompt}" in stripped_style_text: - left, right = stripped_style_text.split("{prompt}", 2) + # Work out whether the prompt is wrapped in the style text. If so, we + # return True and the "inner" prompt text that isn't part of the style. + try: + left, right = stripped_style_text.split("{prompt}", 2) + except ValueError as e: + # If the style text has multple "{prompt}"s, we can't split it into + # two parts. This is an error, but we can't do anything about it. + print("Unable to compare style text to prompt:`n{style_text}") + print(f"Error: {e}") + return False, prompt if stripped_prompt.startswith(left) and stripped_prompt.endswith(right): - prompt = stripped_prompt[len(left):len(stripped_prompt)-len(right)] + prompt = stripped_prompt[len(left) : len(stripped_prompt) - len(right)] return True, prompt else: + # Work out whether the given prompt ends with the style text. If so, we + # return True and the prompt text up to where the style text starts. if stripped_prompt.endswith(stripped_style_text): - prompt = stripped_prompt[:len(stripped_prompt)-len(stripped_style_text)] - - if prompt.endswith(', '): + prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)] + if prompt.endswith(", "): prompt = prompt[:-2] - return True, prompt return False, prompt -def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt): +def extract_original_prompts(style: PromptStyle, prompt, negative_prompt): + """ + Takes a style and compares it to the prompt and negative prompt. If the style + matches, returns True plus the prompt and negative prompt with the style text + removed. Otherwise, returns False with the original prompt and negative prompt. + """ if not style.prompt and not style.negative_prompt: return False, prompt, negative_prompt - match_positive, extracted_positive = extract_style_text_from_prompt(style.prompt, prompt) + match_positive, extracted_positive = unwrap_style_text_from_prompt( + style.prompt, prompt + ) if not match_positive: return False, prompt, negative_prompt - match_negative, extracted_negative = extract_style_text_from_prompt(style.negative_prompt, negative_prompt) + match_negative, extracted_negative = unwrap_style_text_from_prompt( + style.negative_prompt, negative_prompt + ) if not match_negative: return False, prompt, negative_prompt @@ -69,25 +110,88 @@ def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt): class StyleDatabase: def __init__(self, path: str): - self.no_style = PromptStyle("None", "", "") + self.no_style = PromptStyle("None", "", "", None) self.styles = {} self.path = path + folder, file = os.path.split(self.path) + self.default_file = file.split("*")[0] + ".csv" + if self.default_file == ".csv": + self.default_file = "styles.csv" + self.default_path = os.path.join(folder, self.default_file) + + self.prompt_fields = [field for field in PromptStyle._fields if field != "path"] + self.reload() def reload(self): + """ + Clears the style database and reloads the styles from the CSV file(s) + matching the path used to initialize the database. + """ self.styles.clear() - if not os.path.exists(self.path): + path, filename = os.path.split(self.path) + + if "*" in filename: + fileglob = filename.split("*")[0] + "*.csv" + filelist = [] + for file in os.listdir(path): + if fnmatch.fnmatch(file, fileglob): + filelist.append(file) + # Add a visible divider to the style list + half_len = round(len(file) / 2) + divider = f"{'-' * (20 - half_len)} {file.upper()}" + divider = f"{divider} {'-' * (40 - len(divider))}" + self.styles[divider] = PromptStyle( + f"{divider}", None, None, "do_not_save" + ) + # Add styles from this CSV file + self.load_from_csv(os.path.join(path, file)) + if len(filelist) == 0: + print(f"No styles found in {path} matching {fileglob}") + return + elif not os.path.exists(self.path): + print(f"Style database not found: {self.path}") return + else: + self.load_from_csv(self.path) - with open(self.path, "r", encoding="utf-8-sig", newline='') as file: + def load_from_csv(self, path: str): + with open(path, "r", encoding="utf-8-sig", newline="") as file: reader = csv.DictReader(file, skipinitialspace=True) for row in reader: + # Ignore empty rows or rows starting with a comment + if not row or row["name"].startswith("#"): + continue # Support loading old CSV format with "name, text"-columns prompt = row["prompt"] if "prompt" in row else row["text"] negative_prompt = row.get("negative_prompt", "") - self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt) + # Add style to database + self.styles[row["name"]] = PromptStyle( + row["name"], prompt, negative_prompt, path + ) + + def get_style_paths(self) -> list(): + """ + Returns a list of all distinct paths, including the default path, of + files that styles are loaded from.""" + # Update any styles without a path to the default path + for style in list(self.styles.values()): + if not style.path: + self.styles[style.name] = style._replace(path=self.default_path) + + # Create a list of all distinct paths, including the default path + style_paths = set() + style_paths.add(self.default_path) + for _, style in self.styles.items(): + if style.path: + style_paths.add(style.path) + + # Remove any paths for styles that are just list dividers + style_paths.remove("do_not_save") + + return list(style_paths) def get_style_prompts(self, styles): return [self.styles.get(x, self.no_style).prompt for x in styles] @@ -96,20 +200,53 @@ class StyleDatabase: return [self.styles.get(x, self.no_style).negative_prompt for x in styles] def apply_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles]) + return apply_styles_to_prompt( + prompt, [self.styles.get(x, self.no_style).prompt for x in styles] + ) def apply_negative_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) - - def save_styles(self, path: str) -> None: - # Always keep a backup file around - if os.path.exists(path): - shutil.copy(path, f"{path}.bak") - - with open(path, "w", encoding="utf-8-sig", newline='') as file: - writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) - writer.writeheader() - writer.writerows(style._asdict() for k, style in self.styles.items()) + return apply_styles_to_prompt( + prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles] + ) + + def save_styles(self, path: str = None) -> None: + # The path argument is deprecated, but kept for backwards compatibility + _ = path + + # Update any styles without a path to the default path + for style in list(self.styles.values()): + if not style.path: + self.styles[style.name] = style._replace(path=self.default_path) + + # Create a list of all distinct paths, including the default path + style_paths = set() + style_paths.add(self.default_path) + for _, style in self.styles.items(): + if style.path: + style_paths.add(style.path) + + # Remove any paths for styles that are just list dividers + style_paths.remove("do_not_save") + + csv_names = [os.path.split(path)[1].lower() for path in style_paths] + + for style_path in style_paths: + # Always keep a backup file around + if os.path.exists(style_path): + shutil.copy(style_path, f"{style_path}.bak") + + # Write the styles to the CSV file + with open(style_path, "w", encoding="utf-8-sig", newline="") as file: + writer = csv.DictWriter(file, fieldnames=self.prompt_fields) + writer.writeheader() + for style in (s for s in self.styles.values() if s.path == style_path): + # Skip style list dividers, e.g. "STYLES.CSV" + if style.name.lower().strip("# ") in csv_names: + continue + # Write style fields, ignoring the path field + writer.writerow( + {k: v for k, v in style._asdict().items() if k != "path"} + ) def extract_styles_from_prompt(self, prompt, negative_prompt): extracted = [] @@ -120,7 +257,9 @@ class StyleDatabase: found_style = None for style in applicable_styles: - is_match, new_prompt, new_neg_prompt = extract_style_from_prompts(style, prompt, negative_prompt) + is_match, new_prompt, new_neg_prompt = extract_original_prompts( + style, prompt, negative_prompt + ) if is_match: found_style = style prompt = new_prompt -- cgit v1.2.3 From 1c64bb71402c2cd62ac98f936203437f0c4fcd02 Mon Sep 17 00:00:00 2001 From: MisterSeajay Date: Mon, 27 Nov 2023 11:57:27 +0000 Subject: bugfix for warning message (#6) --- modules/styles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/styles.py b/modules/styles.py index 974d3289..e73920c7 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -66,7 +66,7 @@ def unwrap_style_text_from_prompt(style_text, prompt): except ValueError as e: # If the style text has multple "{prompt}"s, we can't split it into # two parts. This is an error, but we can't do anything about it. - print("Unable to compare style text to prompt:`n{style_text}") + print(f"Unable to compare style text to prompt:`n{style_text}") print(f"Error: {e}") return False, prompt if stripped_prompt.startswith(left) and stripped_prompt.endswith(right): -- cgit v1.2.3 From a75314b41f938d1e598916ecdd0f14126ae1876b Mon Sep 17 00:00:00 2001 From: MisterSeajay Date: Mon, 27 Nov 2023 12:03:42 +0000 Subject: bugfix for warning message (#6) * bugfix for warning message * bugfix error message --- modules/styles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/styles.py b/modules/styles.py index e73920c7..4d218cd7 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -66,7 +66,7 @@ def unwrap_style_text_from_prompt(style_text, prompt): except ValueError as e: # If the style text has multple "{prompt}"s, we can't split it into # two parts. This is an error, but we can't do anything about it. - print(f"Unable to compare style text to prompt:`n{style_text}") + print(f"Unable to compare style text to prompt:\n{style_text}") print(f"Error: {e}") return False, prompt if stripped_prompt.startswith(left) and stripped_prompt.endswith(right): -- cgit v1.2.3 From 26a0c29587da428d27fd3e6a95491776ef66bbdd Mon Sep 17 00:00:00 2001 From: Charlie Joynt Date: Mon, 27 Nov 2023 11:39:50 +0000 Subject: Allow use of mutiple styles csv files * https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/14122 Fix edge case where style text has multiple {prompt} placeholders * https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/14005 --- modules/styles.py | 203 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 171 insertions(+), 32 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index 0740fe1b..4d218cd7 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,4 +1,5 @@ import csv +import fnmatch import os import os.path import re @@ -10,6 +11,23 @@ class PromptStyle(typing.NamedTuple): name: str prompt: str negative_prompt: str + path: str = None + + +def clean_text(text: str) -> str: + """ + Iterating through a list of regular expressions and replacement strings, we + clean up the prompt and style text to make it easier to match against each + other. + """ + re_list = [ + ("multiple commas", re.compile("(,+\s+)+,?"), ", "), + ("multiple spaces", re.compile("\s{2,}"), " "), + ] + for _, regex, replace in re_list: + text = regex.sub(replace, text) + + return text.strip(", ") def merge_prompts(style_prompt: str, prompt: str) -> str: @@ -26,41 +44,64 @@ def apply_styles_to_prompt(prompt, styles): for style in styles: prompt = merge_prompts(style, prompt) - return prompt + return clean_text(prompt) -re_spaces = re.compile(" +") +def unwrap_style_text_from_prompt(style_text, prompt): + """ + Checks the prompt to see if the style text is wrapped around it. If so, + returns True plus the prompt text without the style text. Otherwise, returns + False with the original prompt. - -def extract_style_text_from_prompt(style_text, prompt): - stripped_prompt = re.sub(re_spaces, " ", prompt.strip()) - stripped_style_text = re.sub(re_spaces, " ", style_text.strip()) + Note that the "cleaned" version of the style text is only used for matching + purposes here. It isn't returned; the original style text is not modified. + """ + stripped_prompt = clean_text(prompt) + stripped_style_text = clean_text(style_text) if "{prompt}" in stripped_style_text: - left, right = stripped_style_text.split("{prompt}", 2) + # Work out whether the prompt is wrapped in the style text. If so, we + # return True and the "inner" prompt text that isn't part of the style. + try: + left, right = stripped_style_text.split("{prompt}", 2) + except ValueError as e: + # If the style text has multple "{prompt}"s, we can't split it into + # two parts. This is an error, but we can't do anything about it. + print(f"Unable to compare style text to prompt:\n{style_text}") + print(f"Error: {e}") + return False, prompt if stripped_prompt.startswith(left) and stripped_prompt.endswith(right): - prompt = stripped_prompt[len(left):len(stripped_prompt)-len(right)] + prompt = stripped_prompt[len(left) : len(stripped_prompt) - len(right)] return True, prompt else: + # Work out whether the given prompt ends with the style text. If so, we + # return True and the prompt text up to where the style text starts. if stripped_prompt.endswith(stripped_style_text): - prompt = stripped_prompt[:len(stripped_prompt)-len(stripped_style_text)] - - if prompt.endswith(', '): + prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)] + if prompt.endswith(", "): prompt = prompt[:-2] - return True, prompt return False, prompt -def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt): +def extract_original_prompts(style: PromptStyle, prompt, negative_prompt): + """ + Takes a style and compares it to the prompt and negative prompt. If the style + matches, returns True plus the prompt and negative prompt with the style text + removed. Otherwise, returns False with the original prompt and negative prompt. + """ if not style.prompt and not style.negative_prompt: return False, prompt, negative_prompt - match_positive, extracted_positive = extract_style_text_from_prompt(style.prompt, prompt) + match_positive, extracted_positive = unwrap_style_text_from_prompt( + style.prompt, prompt + ) if not match_positive: return False, prompt, negative_prompt - match_negative, extracted_negative = extract_style_text_from_prompt(style.negative_prompt, negative_prompt) + match_negative, extracted_negative = unwrap_style_text_from_prompt( + style.negative_prompt, negative_prompt + ) if not match_negative: return False, prompt, negative_prompt @@ -69,25 +110,88 @@ def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt): class StyleDatabase: def __init__(self, path: str): - self.no_style = PromptStyle("None", "", "") + self.no_style = PromptStyle("None", "", "", None) self.styles = {} self.path = path + folder, file = os.path.split(self.path) + self.default_file = file.split("*")[0] + ".csv" + if self.default_file == ".csv": + self.default_file = "styles.csv" + self.default_path = os.path.join(folder, self.default_file) + + self.prompt_fields = [field for field in PromptStyle._fields if field != "path"] + self.reload() def reload(self): + """ + Clears the style database and reloads the styles from the CSV file(s) + matching the path used to initialize the database. + """ self.styles.clear() - if not os.path.exists(self.path): + path, filename = os.path.split(self.path) + + if "*" in filename: + fileglob = filename.split("*")[0] + "*.csv" + filelist = [] + for file in os.listdir(path): + if fnmatch.fnmatch(file, fileglob): + filelist.append(file) + # Add a visible divider to the style list + half_len = round(len(file) / 2) + divider = f"{'-' * (20 - half_len)} {file.upper()}" + divider = f"{divider} {'-' * (40 - len(divider))}" + self.styles[divider] = PromptStyle( + f"{divider}", None, None, "do_not_save" + ) + # Add styles from this CSV file + self.load_from_csv(os.path.join(path, file)) + if len(filelist) == 0: + print(f"No styles found in {path} matching {fileglob}") + return + elif not os.path.exists(self.path): + print(f"Style database not found: {self.path}") return + else: + self.load_from_csv(self.path) - with open(self.path, "r", encoding="utf-8-sig", newline='') as file: + def load_from_csv(self, path: str): + with open(path, "r", encoding="utf-8-sig", newline="") as file: reader = csv.DictReader(file, skipinitialspace=True) for row in reader: + # Ignore empty rows or rows starting with a comment + if not row or row["name"].startswith("#"): + continue # Support loading old CSV format with "name, text"-columns prompt = row["prompt"] if "prompt" in row else row["text"] negative_prompt = row.get("negative_prompt", "") - self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt) + # Add style to database + self.styles[row["name"]] = PromptStyle( + row["name"], prompt, negative_prompt, path + ) + + def get_style_paths(self) -> list(): + """ + Returns a list of all distinct paths, including the default path, of + files that styles are loaded from.""" + # Update any styles without a path to the default path + for style in list(self.styles.values()): + if not style.path: + self.styles[style.name] = style._replace(path=self.default_path) + + # Create a list of all distinct paths, including the default path + style_paths = set() + style_paths.add(self.default_path) + for _, style in self.styles.items(): + if style.path: + style_paths.add(style.path) + + # Remove any paths for styles that are just list dividers + style_paths.remove("do_not_save") + + return list(style_paths) def get_style_prompts(self, styles): return [self.styles.get(x, self.no_style).prompt for x in styles] @@ -96,20 +200,53 @@ class StyleDatabase: return [self.styles.get(x, self.no_style).negative_prompt for x in styles] def apply_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles]) + return apply_styles_to_prompt( + prompt, [self.styles.get(x, self.no_style).prompt for x in styles] + ) def apply_negative_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) - - def save_styles(self, path: str) -> None: - # Always keep a backup file around - if os.path.exists(path): - shutil.copy(path, f"{path}.bak") - - with open(path, "w", encoding="utf-8-sig", newline='') as file: - writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) - writer.writeheader() - writer.writerows(style._asdict() for k, style in self.styles.items()) + return apply_styles_to_prompt( + prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles] + ) + + def save_styles(self, path: str = None) -> None: + # The path argument is deprecated, but kept for backwards compatibility + _ = path + + # Update any styles without a path to the default path + for style in list(self.styles.values()): + if not style.path: + self.styles[style.name] = style._replace(path=self.default_path) + + # Create a list of all distinct paths, including the default path + style_paths = set() + style_paths.add(self.default_path) + for _, style in self.styles.items(): + if style.path: + style_paths.add(style.path) + + # Remove any paths for styles that are just list dividers + style_paths.remove("do_not_save") + + csv_names = [os.path.split(path)[1].lower() for path in style_paths] + + for style_path in style_paths: + # Always keep a backup file around + if os.path.exists(style_path): + shutil.copy(style_path, f"{style_path}.bak") + + # Write the styles to the CSV file + with open(style_path, "w", encoding="utf-8-sig", newline="") as file: + writer = csv.DictWriter(file, fieldnames=self.prompt_fields) + writer.writeheader() + for style in (s for s in self.styles.values() if s.path == style_path): + # Skip style list dividers, e.g. "STYLES.CSV" + if style.name.lower().strip("# ") in csv_names: + continue + # Write style fields, ignoring the path field + writer.writerow( + {k: v for k, v in style._asdict().items() if k != "path"} + ) def extract_styles_from_prompt(self, prompt, negative_prompt): extracted = [] @@ -120,7 +257,9 @@ class StyleDatabase: found_style = None for style in applicable_styles: - is_match, new_prompt, new_neg_prompt = extract_style_from_prompts(style, prompt, negative_prompt) + is_match, new_prompt, new_neg_prompt = extract_original_prompts( + style, prompt, negative_prompt + ) if is_match: found_style = style prompt = new_prompt -- cgit v1.2.3 From 23c36f59b4a423362d74f1ca2cc69871ae101e0e Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Mon, 27 Nov 2023 21:10:26 +0900 Subject: Support XYZ scripts / split hires path from unet --- .../hypertile/scripts/hypertile_script.py | 11 +++-- .../hypertile/scripts/hypertile_xyz.py | 52 ++++++++++++++++++++++ 2 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 extensions-builtin/hypertile/scripts/hypertile_xyz.py diff --git a/extensions-builtin/hypertile/scripts/hypertile_script.py b/extensions-builtin/hypertile/scripts/hypertile_script.py index 3cc29cd1..b2413cc5 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_script.py +++ b/extensions-builtin/hypertile/scripts/hypertile_script.py @@ -1,5 +1,6 @@ import hypertile from modules import scripts, script_callbacks, shared +from scripts.hypertile_xyz import add_axis_options class ScriptHypertile(scripts.Script): @@ -17,7 +18,10 @@ class ScriptHypertile(scripts.Script): configure_hypertile(p.width, p.height, enable_unet=shared.opts.hypertile_enable_unet) def before_hr(self, p, *args): - configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=shared.opts.hypertile_enable_unet_secondpass or shared.opts.hypertile_enable_unet) + # exclusive hypertile seed for the second pass + if not shared.opts.hypertile_enable_unet: + hypertile.set_hypertile_seed(p.all_seeds[0]) + configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=shared.opts.hypertile_enable_unet_secondpass) def configure_hypertile(width, height, enable_unet=True): @@ -57,12 +61,12 @@ def on_ui_settings(): "hypertile_enable_unet_secondpass": shared.OptionInfo(False, "Enable Hypertile U-Net for hires fix second pass"), "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}), "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), - "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-net swap size", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}), + "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}), "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE").info("minimal change in the generated picture"), "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}), "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), - "hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}), + "hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}), } for name, opt in options.items(): @@ -71,3 +75,4 @@ def on_ui_settings(): script_callbacks.on_ui_settings(on_ui_settings) +script_callbacks.on_before_ui(add_axis_options) \ No newline at end of file diff --git a/extensions-builtin/hypertile/scripts/hypertile_xyz.py b/extensions-builtin/hypertile/scripts/hypertile_xyz.py new file mode 100644 index 00000000..eaf7c8d7 --- /dev/null +++ b/extensions-builtin/hypertile/scripts/hypertile_xyz.py @@ -0,0 +1,52 @@ +from modules import scripts +xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module +from modules.shared import opts + +def int_applier(value_name:str, min_range:int = -1, max_range:int = -1): + """ + Returns a function that applies the given value to the given value_name in opts.data. + """ + # convert to int + def validate(value_name:str, value:str): + try: + value = int(value) + except: + raise ValueError(f"Value {value} for {value_name} is not an integer") + # validate value + if not min_range == -1: + assert value >= min_range, f"Value {value} for {value_name} must be greater than or equal to {min_range}" + if not max_range == -1: + assert value <= max_range, f"Value {value} for {value_name} must be less than or equal to {max_range}" + def apply_int(p, x, xs): + validate(value_name, x) + opts.data[value_name] = int(x) + return apply_int + +def bool_applier(value_name:str): + """ + Returns a function that applies the given value to the given value_name in opts.data. + """ + def validate(value_name:str, value:str): + assert value.lower() in ["true", "false"], f"Value {value} for {value_name} must be either true or false" + def apply_bool(p, x, xs): + validate(value_name, x) + value_boolean = x.lower() == "true" + opts.data[value_name] = value_boolean + return apply_bool + +def add_axis_options(): + extra_axis_options = [ + xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, bool_applier("hypertile_enable_unet"), choices=xyz_grid.boolean_choice(reverse=True)), + xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, bool_applier("hypertile_enable_unet_secondpass"), choices=xyz_grid.boolean_choice(reverse=True)), + xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, int_applier("hypertile_max_depth_unet", 0, 3), choices=lambda: [str(x) for x in range(4)]), + xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, int_applier("hypertile_max_tile_unet", 0, 512)), + xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, int_applier("hypertile_swap_size_unet", 0, 64)), + xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, bool_applier("hypertile_enable_vae"), choices=xyz_grid.boolean_choice(reverse=True)), + xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, int_applier("hypertile_max_depth_vae", 0, 3), choices=lambda: [str(x) for x in range(4)]), + xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)), + xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)), + ] + # check if the axis options have already been added + if any(set(opt.label for opt in extra_axis_options).intersection(set(opt.label for opt in xyz_grid.axis_options))): + return + xyz_grid.axis_options.extend(extra_axis_options) \ No newline at end of file -- cgit v1.2.3 From 601a7b4ce5b28efd29b1668c7b8b74fb6b62f6f3 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Mon, 27 Nov 2023 22:10:31 +0900 Subject: cache divisors / fix ruff --- extensions-builtin/hypertile/hypertile.py | 24 ++++++++++++++-------- .../hypertile/scripts/hypertile_script.py | 2 +- .../hypertile/scripts/hypertile_xyz.py | 18 ++++++++-------- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/extensions-builtin/hypertile/hypertile.py b/extensions-builtin/hypertile/hypertile.py index feb02fd2..0f40e2d3 100644 --- a/extensions-builtin/hypertile/hypertile.py +++ b/extensions-builtin/hypertile/hypertile.py @@ -6,7 +6,6 @@ Original author: @tfernd Github: https://github.com/tfernd/HyperTile from __future__ import annotations -import functools from dataclasses import dataclass from typing import Callable @@ -189,20 +188,27 @@ DEPTH_LAYERS_XL = { RNG_INSTANCE = random.Random() - -def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: +@cache +def get_divisors(value: int, min_value: int, /, max_options: int = 1) -> list[int]: """ - Returns a random divisor of value that + Returns divisors of value that x * min_value <= value - if max_options is 1, the behavior is deterministic + in big -> small order, amount of divisors is limited by max_options """ + max_options = max(1, max_options) # at least 1 option should be returned min_value = min(min_value, value) - - # All big divisors of value (inclusive) divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order - ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order + return ns + +def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: + """ + Returns a random divisor of value that + x * min_value <= value + if max_options is 1, the behavior is deterministic + """ + ns = get_divisors(value, min_value, max_options=max_options) # get cached divisors idx = RNG_INSTANCE.randint(0, len(ns) - 1) return ns[idx] @@ -212,7 +218,7 @@ def set_hypertile_seed(seed: int) -> None: RNG_INSTANCE.seed(seed) -@functools.cache +@cache def largest_tile_size_available(width: int, height: int) -> int: """ Calculates the largest tile size available for a given width and height diff --git a/extensions-builtin/hypertile/scripts/hypertile_script.py b/extensions-builtin/hypertile/scripts/hypertile_script.py index b2413cc5..d3ab6091 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_script.py +++ b/extensions-builtin/hypertile/scripts/hypertile_script.py @@ -75,4 +75,4 @@ def on_ui_settings(): script_callbacks.on_ui_settings(on_ui_settings) -script_callbacks.on_before_ui(add_axis_options) \ No newline at end of file +script_callbacks.on_before_ui(add_axis_options) diff --git a/extensions-builtin/hypertile/scripts/hypertile_xyz.py b/extensions-builtin/hypertile/scripts/hypertile_xyz.py index eaf7c8d7..3007a083 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_xyz.py +++ b/extensions-builtin/hypertile/scripts/hypertile_xyz.py @@ -1,17 +1,17 @@ from modules import scripts -xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module from modules.shared import opts +xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module + def int_applier(value_name:str, min_range:int = -1, max_range:int = -1): """ Returns a function that applies the given value to the given value_name in opts.data. """ # convert to int def validate(value_name:str, value:str): - try: - value = int(value) - except: - raise ValueError(f"Value {value} for {value_name} is not an integer") + if not value.isnumeric(): + raise ValueError(f"Value {value} for {value_name} must be an integer") + value = int(value) # validate value if not min_range == -1: assert value >= min_range, f"Value {value} for {value_name} must be greater than or equal to {min_range}" @@ -46,7 +46,9 @@ def add_axis_options(): xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)), xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)), ] - # check if the axis options have already been added - if any(set(opt.label for opt in extra_axis_options).intersection(set(opt.label for opt in xyz_grid.axis_options))): + set_a = set([opt.label for opt in xyz_grid.axis_options]) + set_b = set([opt.label for opt in extra_axis_options]) + if set_a.intersection(set_b): return - xyz_grid.axis_options.extend(extra_axis_options) \ No newline at end of file + + xyz_grid.axis_options.extend(extra_axis_options) -- cgit v1.2.3 From f207eb7a0d8b4443dbe665df99c31f8ff91660fd Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Mon, 27 Nov 2023 22:11:28 +0900 Subject: fix ruff in hypertile_xyz.py --- extensions-builtin/hypertile/scripts/hypertile_xyz.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/hypertile/scripts/hypertile_xyz.py b/extensions-builtin/hypertile/scripts/hypertile_xyz.py index 3007a083..4055a9ea 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_xyz.py +++ b/extensions-builtin/hypertile/scripts/hypertile_xyz.py @@ -46,8 +46,8 @@ def add_axis_options(): xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)), xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)), ] - set_a = set([opt.label for opt in xyz_grid.axis_options]) - set_b = set([opt.label for opt in extra_axis_options]) + set_a = set(opt.label for opt in xyz_grid.axis_options) + set_b = set(opt.label for opt in extra_axis_options) if set_a.intersection(set_b): return -- cgit v1.2.3 From 524d6a4dbae68bf557d9c5fe686707d96841e0b5 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Mon, 27 Nov 2023 22:13:18 +0900 Subject: fix ruff - set comprehension --- extensions-builtin/hypertile/scripts/hypertile_xyz.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/hypertile/scripts/hypertile_xyz.py b/extensions-builtin/hypertile/scripts/hypertile_xyz.py index 4055a9ea..928e9965 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_xyz.py +++ b/extensions-builtin/hypertile/scripts/hypertile_xyz.py @@ -46,8 +46,8 @@ def add_axis_options(): xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)), xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)), ] - set_a = set(opt.label for opt in xyz_grid.axis_options) - set_b = set(opt.label for opt in extra_axis_options) + set_a = {opt.label for opt in xyz_grid.axis_options} + set_b = {opt.label for opt in extra_axis_options} if set_a.intersection(set_b): return -- cgit v1.2.3 From ec78354efa179b64e92d6b98d781f6572b4eb084 Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Mon, 27 Nov 2023 22:25:28 +0900 Subject: hypertile_xyz: we don't need isnumeric check for AxisOption --- extensions-builtin/hypertile/scripts/hypertile_xyz.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/extensions-builtin/hypertile/scripts/hypertile_xyz.py b/extensions-builtin/hypertile/scripts/hypertile_xyz.py index 928e9965..9e96ae3c 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_xyz.py +++ b/extensions-builtin/hypertile/scripts/hypertile_xyz.py @@ -7,10 +7,7 @@ def int_applier(value_name:str, min_range:int = -1, max_range:int = -1): """ Returns a function that applies the given value to the given value_name in opts.data. """ - # convert to int def validate(value_name:str, value:str): - if not value.isnumeric(): - raise ValueError(f"Value {value} for {value_name} must be an integer") value = int(value) # validate value if not min_range == -1: -- cgit v1.2.3 From 3cd6e1d0a0877e6f1ac931c8253e6eee09da3805 Mon Sep 17 00:00:00 2001 From: obsol <33932119+read-0nly@users.noreply.github.com> Date: Mon, 27 Nov 2023 19:21:43 -0500 Subject: Update devices.py fixes issue where "--use-cpu" all properly makes SD run on CPU but leaves ControlNet (and other extensions, I presume) pointed at GPU, causing a crash in ControlNet caused by a mismatch between devices between SD and CN https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/14097 --- modules/devices.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/devices.py b/modules/devices.py index c01f0602..65efcf1e 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -38,7 +38,7 @@ def get_optimal_device(): def get_device_for(task): - if task in shared.cmd_opts.use_cpu: + if task in shared.cmd_opts.use_cpu or "all" in shared.cmd_opts.use_cpu: return cpu return get_optimal_device() -- cgit v1.2.3 From 03ee297aa22296ea12b965fc1cb11aa46375d372 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 27 Nov 2023 17:26:16 +0900 Subject: fix Auto focal point crop for opencv >= 4.8.x autocrop.download_and_cache_models in opencv >= 4.8 the face detection model was updated download the base on opencv version returns the model path or raise exception --- modules/textual_inversion/autocrop.py | 29 ++++++++++++++++------------- modules/textual_inversion/preprocess.py | 4 ++-- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 1675e39a..051be118 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -3,6 +3,8 @@ import requests import os import numpy as np from PIL import ImageDraw +from modules import paths_internal +from pkg_resources import parse_version GREEN = "#0F0" BLUE = "#00F" @@ -294,22 +296,23 @@ def is_square(w, h): return w == h -def download_and_cache_models(dirname): - download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true' - model_file_name = 'face_detection_yunet.onnx' +model_dir_opencv = os.path.join(paths_internal.models_path, 'opencv') +if parse_version(cv2.__version__) >= parse_version('4.8'): + model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet_2023mar.onnx') + model_url = 'https://github.com/opencv/opencv_zoo/blob/b6e370b10f641879a87890d44e42173077154a05/models/face_detection_yunet/face_detection_yunet_2023mar.onnx?raw=true' +else: + model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet.onnx') + model_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true' - os.makedirs(dirname, exist_ok=True) - cache_file = os.path.join(dirname, model_file_name) - if not os.path.exists(cache_file): - print(f"downloading face detection model from '{download_url}' to '{cache_file}'") - response = requests.get(download_url) - with open(cache_file, "wb") as f: +def download_and_cache_models(): + if not os.path.exists(model_file_path): + os.makedirs(model_dir_opencv, exist_ok=True) + print(f"downloading face detection model from '{model_url}' to '{model_file_path}'") + response = requests.get(model_url) + with open(model_file_path, "wb") as f: f.write(response.content) - - if os.path.exists(cache_file): - return cache_file - return None + return model_file_path class PointOfInterest: diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index dbd856bd..789fa083 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -3,7 +3,7 @@ from PIL import Image, ImageOps import math import tqdm -from modules import paths, shared, images, deepbooru +from modules import shared, images, deepbooru from modules.textual_inversion import autocrop @@ -196,7 +196,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre dnn_model_path = None try: - dnn_model_path = autocrop.download_and_cache_models(os.path.join(paths.models_path, "opencv")) + dnn_model_path = autocrop.download_and_cache_models() except Exception as e: print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e) -- cgit v1.2.3 From d608926f817b279d16b39a7875beec80d010a988 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 28 Nov 2023 12:12:27 +0900 Subject: reformat file with uniform indentation --- modules/textual_inversion/autocrop.py | 210 +++++++++++++++++----------------- 1 file changed, 106 insertions(+), 104 deletions(-) diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 051be118..e223a2e0 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -27,7 +27,6 @@ def crop_image(im, settings): elif is_portrait(settings.crop_width, settings.crop_height): scale_by = settings.crop_height / im.height - im = im.resize((int(im.width * scale_by), int(im.height * scale_by))) im_debug = im.copy() @@ -71,6 +70,7 @@ def crop_image(im, settings): return results + def focal_point(im, settings): corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else [] entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else [] @@ -80,118 +80,120 @@ def focal_point(im, settings): weight_pref_total = 0 if corner_points: - weight_pref_total += settings.corner_points_weight + weight_pref_total += settings.corner_points_weight if entropy_points: - weight_pref_total += settings.entropy_points_weight + weight_pref_total += settings.entropy_points_weight if face_points: - weight_pref_total += settings.face_points_weight + weight_pref_total += settings.face_points_weight corner_centroid = None if corner_points: - corner_centroid = centroid(corner_points) - corner_centroid.weight = settings.corner_points_weight / weight_pref_total - pois.append(corner_centroid) + corner_centroid = centroid(corner_points) + corner_centroid.weight = settings.corner_points_weight / weight_pref_total + pois.append(corner_centroid) entropy_centroid = None if entropy_points: - entropy_centroid = centroid(entropy_points) - entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total - pois.append(entropy_centroid) + entropy_centroid = centroid(entropy_points) + entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total + pois.append(entropy_centroid) face_centroid = None if face_points: - face_centroid = centroid(face_points) - face_centroid.weight = settings.face_points_weight / weight_pref_total - pois.append(face_centroid) + face_centroid = centroid(face_points) + face_centroid.weight = settings.face_points_weight / weight_pref_total + pois.append(face_centroid) average_point = poi_average(pois, settings) if settings.annotate_image: - d = ImageDraw.Draw(im) - max_size = min(im.width, im.height) * 0.07 - if corner_centroid is not None: - color = BLUE - box = corner_centroid.bounding(max_size * corner_centroid.weight) - d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color) - d.ellipse(box, outline=color) - if len(corner_points) > 1: - for f in corner_points: - d.rectangle(f.bounding(4), outline=color) - if entropy_centroid is not None: - color = "#ff0" - box = entropy_centroid.bounding(max_size * entropy_centroid.weight) - d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color) - d.ellipse(box, outline=color) - if len(entropy_points) > 1: - for f in entropy_points: - d.rectangle(f.bounding(4), outline=color) - if face_centroid is not None: - color = RED - box = face_centroid.bounding(max_size * face_centroid.weight) - d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color) - d.ellipse(box, outline=color) - if len(face_points) > 1: - for f in face_points: - d.rectangle(f.bounding(4), outline=color) - - d.ellipse(average_point.bounding(max_size), outline=GREEN) + d = ImageDraw.Draw(im) + max_size = min(im.width, im.height) * 0.07 + if corner_centroid is not None: + color = BLUE + box = corner_centroid.bounding(max_size * corner_centroid.weight) + d.text((box[0], box[1] - 15), f"Edge: {corner_centroid.weight:.02f}", fill=color) + d.ellipse(box, outline=color) + if len(corner_points) > 1: + for f in corner_points: + d.rectangle(f.bounding(4), outline=color) + if entropy_centroid is not None: + color = "#ff0" + box = entropy_centroid.bounding(max_size * entropy_centroid.weight) + d.text((box[0], box[1] - 15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color) + d.ellipse(box, outline=color) + if len(entropy_points) > 1: + for f in entropy_points: + d.rectangle(f.bounding(4), outline=color) + if face_centroid is not None: + color = RED + box = face_centroid.bounding(max_size * face_centroid.weight) + d.text((box[0], box[1] - 15), f"Face: {face_centroid.weight:.02f}", fill=color) + d.ellipse(box, outline=color) + if len(face_points) > 1: + for f in face_points: + d.rectangle(f.bounding(4), outline=color) + + d.ellipse(average_point.bounding(max_size), outline=GREEN) return average_point def image_face_points(im, settings): if settings.dnn_model_path is not None: - detector = cv2.FaceDetectorYN.create( - settings.dnn_model_path, - "", - (im.width, im.height), - 0.9, # score threshold - 0.3, # nms threshold - 5000 # keep top k before nms - ) - faces = detector.detect(np.array(im)) - results = [] - if faces[1] is not None: - for face in faces[1]: - x = face[0] - y = face[1] - w = face[2] - h = face[3] - results.append( - PointOfInterest( - int(x + (w * 0.5)), # face focus left/right is center - int(y + (h * 0.33)), # face focus up/down is close to the top of the head - size = w, - weight = 1/len(faces[1]) - ) - ) - return results + detector = cv2.FaceDetectorYN.create( + settings.dnn_model_path, + "", + (im.width, im.height), + 0.9, # score threshold + 0.3, # nms threshold + 5000 # keep top k before nms + ) + faces = detector.detect(np.array(im)) + results = [] + if faces[1] is not None: + for face in faces[1]: + x = face[0] + y = face[1] + w = face[2] + h = face[3] + results.append( + PointOfInterest( + int(x + (w * 0.5)), # face focus left/right is center + int(y + (h * 0.33)), # face focus up/down is close to the top of the head + size=w, + weight=1 / len(faces[1]) + ) + ) + return results else: - np_im = np.array(im) - gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY) - - tries = [ - [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ], - [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ] - ] - for t in tries: - classifier = cv2.CascadeClassifier(t[0]) - minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side - try: - faces = classifier.detectMultiScale(gray, scaleFactor=1.1, - minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) - except Exception: - continue - - if faces: - rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces] - return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects] + np_im = np.array(im) + gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY) + + tries = [ + [f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01], + [f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05], + [f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05], + [f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05], + [f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05], + [f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05], + [f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05], + [f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05] + ] + for t in tries: + classifier = cv2.CascadeClassifier(t[0]) + minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side + try: + faces = classifier.detectMultiScale(gray, scaleFactor=1.1, + minNeighbors=7, minSize=(minsize, minsize), + flags=cv2.CASCADE_SCALE_IMAGE) + except Exception: + continue + + if faces: + rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces] + return [PointOfInterest((r[0] + r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0] - r[2]), + weight=1 / len(rects)) for r in rects] return [] @@ -200,7 +202,7 @@ def image_corner_points(im, settings): # naive attempt at preventing focal points from collecting at watermarks near the bottom gd = ImageDraw.Draw(grayscale) - gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999") + gd.rectangle([0, im.height * .9, im.width, im.height], fill="#999") np_im = np.array(grayscale) @@ -208,7 +210,7 @@ def image_corner_points(im, settings): np_im, maxCorners=100, qualityLevel=0.04, - minDistance=min(grayscale.width, grayscale.height)*0.06, + minDistance=min(grayscale.width, grayscale.height) * 0.06, useHarrisDetector=False, ) @@ -217,8 +219,8 @@ def image_corner_points(im, settings): focal_points = [] for point in points: - x, y = point.ravel() - focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points))) + x, y = point.ravel() + focal_points.append(PointOfInterest(x, y, size=4, weight=1 / len(points))) return focal_points @@ -227,13 +229,13 @@ def image_entropy_points(im, settings): landscape = im.height < im.width portrait = im.height > im.width if landscape: - move_idx = [0, 2] - move_max = im.size[0] + move_idx = [0, 2] + move_max = im.size[0] elif portrait: - move_idx = [1, 3] - move_max = im.size[1] + move_idx = [1, 3] + move_max = im.size[1] else: - return [] + return [] e_max = 0 crop_current = [0, 0, settings.crop_width, settings.crop_height] @@ -243,14 +245,14 @@ def image_entropy_points(im, settings): e = image_entropy(crop) if (e > e_max): - e_max = e - crop_best = list(crop_current) + e_max = e + crop_best = list(crop_current) crop_current[move_idx[0]] += 4 crop_current[move_idx[1]] += 4 - x_mid = int(crop_best[0] + settings.crop_width/2) - y_mid = int(crop_best[1] + settings.crop_height/2) + x_mid = int(crop_best[0] + settings.crop_width / 2) + y_mid = int(crop_best[1] + settings.crop_height / 2) return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)] -- cgit v1.2.3 From 39eae9f009c8302eed77b0942e1e634f6125d53e Mon Sep 17 00:00:00 2001 From: hidenorly Date: Wed, 29 Nov 2023 04:07:48 +0900 Subject: Revert "Add FP32 fallback support on sd_vae_approx" This reverts commit 58c19545c83fa6925c9ce2216ee64964eb5129ce. Since the modification is expected to move to mac_specific.py (https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046#issuecomment-1826731532) --- modules/sd_vae_approx.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py index 8370493f..3965e223 100644 --- a/modules/sd_vae_approx.py +++ b/modules/sd_vae_approx.py @@ -21,13 +21,7 @@ class VAEApprox(nn.Module): def forward(self, x): extra = 11 - try: - x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2)) - except RuntimeError as e: - if "not implemented for" in str(e) and "Half" in str(e): - x = nn.functional.interpolate(x.to(torch.float32), (x.shape[2] * 2, x.shape[3] * 2)).to(x.dtype) - else: - print(f"An unexpected RuntimeError occurred: {str(e)}") + x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2)) x = nn.functional.pad(x, (extra, extra, extra, extra)) for layer in [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8, ]: -- cgit v1.2.3 From a0096c58977c01ddc6a2b83a8a7b64da6fd4a51e Mon Sep 17 00:00:00 2001 From: hidenorly Date: Wed, 29 Nov 2023 04:45:04 +0900 Subject: Add FP32 fallback support on torch.nn.functional.interpolate This tries to execute interpolate with FP32 if it failed. Background is that on some environment such as Mx chip MacOS devices, we get error as follows: ``` "torch/nn/functional.py", line 3931, in interpolate return torch._C._nn.upsample_nearest2d(input, output_size, scale_factors) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RuntimeError: "upsample_nearest2d_channels_last" not implemented for 'Half' ``` In this case, ```--no-half``` doesn't help to solve. Therefore this commits add the FP32 fallback execution to solve it. Note that the ```upsample_nearest2d``` is called from ```torch.nn.functional.interpolate```. And the fallback for torch.nn.functional.interpolate is necessary at ```modules/sd_vae_approx.py``` 's ```VAEApprox.forward``` ```repositories/stable-diffusion-stability-ai/ldm/modules/diffusionmodules/openaimodel.py``` 's ```Upsample.forward``` --- modules/mac_specific.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 89256c5b..3538e659 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,6 +1,8 @@ import logging import torch +from typing import Optional, List +from torch import Tensor import platform from modules.sd_hijack_utils import CondFunc from packaging import version @@ -51,6 +53,17 @@ def cumsum_fix(input, cumsum_func, *args, **kwargs): return cumsum_func(input, *args, **kwargs) +# MPS workaround for https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046 +def interpolate_with_fp32_fallback(orig_func, *args, **kwargs) -> Tensor: + try: + return orig_func(*args, **kwargs) + except RuntimeError as e: + if "not implemented for" in str(e) and "Half" in str(e): + input_tensor = args[0] + return orig_func(input_tensor.to(torch.float32), *args[1:], **kwargs).to(input_tensor.dtype) + else: + print(f"An unexpected RuntimeError occurred: {str(e)}") + if has_mps: if platform.mac_ver()[0].startswith("13.2."): # MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124) @@ -77,6 +90,9 @@ if has_mps: # MPS workaround for https://github.com/pytorch/pytorch/issues/96113 CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps') + # MPS workaround for https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046 + CondFunc('torch.nn.functional.interpolate', interpolate_with_fp32_fallback, None) + # MPS workaround for https://github.com/pytorch/pytorch/issues/92311 if platform.processor() == 'i386': for funcName in ['torch.argmax', 'torch.Tensor.argmax']: -- cgit v1.2.3 From 81c00728b8ec0b6c0e70ea10c7687aad065a95cb Mon Sep 17 00:00:00 2001 From: hidenorly Date: Wed, 29 Nov 2023 04:59:35 +0900 Subject: Fix the Ruff error about unused import --- modules/mac_specific.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 3538e659..d96d86d7 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,7 +1,6 @@ import logging import torch -from typing import Optional, List from torch import Tensor import platform from modules.sd_hijack_utils import CondFunc -- cgit v1.2.3 From 8b40f475a31109cc6ecbdc0d14a0cee9e0303291 Mon Sep 17 00:00:00 2001 From: Nuullll Date: Fri, 10 Nov 2023 11:06:26 +0800 Subject: Initial IPEX support --- modules/devices.py | 11 +++++++++-- modules/xpu_specific.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) create mode 100644 modules/xpu_specific.py diff --git a/modules/devices.py b/modules/devices.py index 1d4eb563..be599736 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,7 +3,7 @@ import contextlib from functools import lru_cache import torch -from modules import errors, shared +from modules import errors, shared, xpu_specific if sys.platform == "darwin": from modules import mac_specific @@ -30,6 +30,9 @@ def get_optimal_device_name(): if has_mps(): return "mps" + if xpu_specific.has_ipex: + return xpu_specific.get_xpu_device_string() + return "cpu" @@ -100,11 +103,15 @@ def autocast(disable=False): if dtype == torch.float32 or shared.cmd_opts.precision == "full": return contextlib.nullcontext() + if xpu_specific.has_xpu: + return torch.autocast("xpu") + return torch.autocast("cuda") def without_autocast(disable=False): - return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext() + device_type = "xpu" if xpu_specific.has_xpu else "cuda" + return torch.autocast(device_type, enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext() class NansException(Exception): diff --git a/modules/xpu_specific.py b/modules/xpu_specific.py new file mode 100644 index 00000000..6417dd2d --- /dev/null +++ b/modules/xpu_specific.py @@ -0,0 +1,42 @@ +import contextlib +from modules import shared +from modules.sd_hijack_utils import CondFunc + +has_ipex = False +try: + import torch + import intel_extension_for_pytorch as ipex + has_ipex = True +except Exception: + pass + +def check_for_xpu(): + if not has_ipex: + return False + + return hasattr(torch, 'xpu') and torch.xpu.is_available() + +has_xpu = check_for_xpu() + +def get_xpu_device_string(): + if shared.cmd_opts.device_id is not None: + return f"xpu:{shared.cmd_opts.device_id}" + return "xpu" + +def return_null_context(*args, **kwargs): # pylint: disable=unused-argument + return contextlib.nullcontext() + +if has_xpu: + CondFunc('torch.Generator', + lambda orig_func, device=None: torch.xpu.Generator(device), + lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu") + + CondFunc('torch.nn.functional.layer_norm', + lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: + orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs), + lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: + weight is not None and input.dtype != weight.data.dtype) + + CondFunc('torch.nn.modules.GroupNorm.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) -- cgit v1.2.3 From c2ed4132037a32cda856e8ba6e2cda32b44b9784 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 1 Dec 2023 02:59:41 +0900 Subject: add max-heigh/width to global-popup-inner prevent the pop-up from being too big as to making exiting the pop-up impossible --- style.css | 2 ++ 1 file changed, 2 insertions(+) diff --git a/style.css b/style.css index 6e3ca841..ee39a57b 100644 --- a/style.css +++ b/style.css @@ -646,6 +646,8 @@ table.popup-table .link{ margin: auto; padding: 2em; z-index: 1001; + max-height: 90%; + max-width: 90%; } /* fullpage image viewer */ -- cgit v1.2.3 From 01c8f1803a77c63b2ebfd3cbbd41659fb914f274 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 30 Nov 2023 22:36:12 -0700 Subject: Close popups with escape key --- javascript/extraNetworks.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index a787372c..98a7abb7 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -392,3 +392,9 @@ function extraNetworksRefreshSingleCard(page, tabname, name) { } }); } + +window.addEventListener("keydown", function(event) { + if (event.key == "Escape") { + closePopup(); + } +}); -- cgit v1.2.3 From 293f44e6c1de7bbf744a4236db81ac4559bdb82a Mon Sep 17 00:00:00 2001 From: MrCheeze Date: Fri, 1 Dec 2023 22:56:08 -0500 Subject: Fix bug where is_using_v_parameterization_for_sd2 fails because the sd_hijack is only partially undone --- modules/sd_hijack.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 0157e19f..3d340fc9 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -38,9 +38,6 @@ ldm.models.diffusion.ddpm.print = shared.ldm_print optimizers = [] current_optimizer: sd_hijack_optimizations.SdOptimization = None -ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) -sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) - def list_optimizers(): new_optimizers = script_callbacks.list_optimizers_callback() @@ -258,6 +255,9 @@ class StableDiffusionModelHijack: import modules.models.diffusion.ddpm_edit + ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) + sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) + if isinstance(m, ldm.models.diffusion.ddpm.LatentDiffusion): sd_unet.original_forward = ldm_original_forward elif isinstance(m, modules.models.diffusion.ddpm_edit.LatentDiffusion): @@ -303,6 +303,9 @@ class StableDiffusionModelHijack: self.layers = None self.clip = None + patches.undo(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward") + patches.undo(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward") + sd_unet.original_forward = None -- cgit v1.2.3 From 6080045b2a0964e63bdcd33dd26015f8a51411f6 Mon Sep 17 00:00:00 2001 From: MrCheeze Date: Fri, 1 Dec 2023 22:58:05 -0500 Subject: Add support for SD 2.1 Turbo, by converting the state dict from SGM to LDM on load --- modules/sd_models.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 841402e8..9355f1e1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -230,15 +230,19 @@ def select_checkpoint(): return checkpoint_info -checkpoint_dict_replacements = { +checkpoint_dict_replacements_sd1 = { 'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.', 'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.', 'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.', } +checkpoint_dict_replacements_sd2_turbo = { # Converts SD 2.1 Turbo from SGM to LDM format. + 'conditioner.embedders.0.': 'cond_stage_model.', +} + -def transform_checkpoint_dict_key(k): - for text, replacement in checkpoint_dict_replacements.items(): +def transform_checkpoint_dict_key(k, replacements): + for text, replacement in replacements.items(): if k.startswith(text): k = replacement + k[len(text):] @@ -249,9 +253,14 @@ def get_state_dict_from_checkpoint(pl_sd): pl_sd = pl_sd.pop("state_dict", pl_sd) pl_sd.pop("state_dict", None) + is_sd2_turbo = 'conditioner.embedders.0.model.ln_final.weight' in pl_sd and pl_sd['conditioner.embedders.0.model.ln_final.weight'].size()[0] == 1024 + sd = {} for k, v in pl_sd.items(): - new_key = transform_checkpoint_dict_key(k) + if is_sd2_turbo: + new_key = transform_checkpoint_dict_key(k, checkpoint_dict_replacements_sd2_turbo) + else: + new_key = transform_checkpoint_dict_key(k, checkpoint_dict_replacements_sd1) if new_key is not None: sd[new_key] = v -- cgit v1.2.3 From b58d061e41cba6fb91910d310d53e175d0511650 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Dec 2023 08:33:28 +0300 Subject: infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page --- modules/generation_parameters_copypaste.py | 13 +++++++++---- modules/processing.py | 4 ++-- modules/shared_items.py | 16 ++++++++++++++++ modules/shared_options.py | 20 ++++++++++++++------ 4 files changed, 41 insertions(+), 12 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 0a606515..4efe53e0 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,3 +1,4 @@ +from __future__ import annotations import base64 import io import json @@ -15,9 +16,6 @@ re_imagesize = re.compile(r"^(\d+)x(\d+)$") re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") type_of_gr_update = type(gr.update()) -paste_fields = {} -registered_param_bindings = [] - class ParamBinding: def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None): @@ -30,6 +28,10 @@ class ParamBinding: self.paste_field_names = paste_field_names or [] +paste_fields: dict[str, dict] = {} +registered_param_bindings: list[ParamBinding] = [] + + def reset(): paste_fields.clear() registered_param_bindings.clear() @@ -113,7 +115,6 @@ def register_paste_params_button(binding: ParamBinding): def connect_paste_params_buttons(): - binding: ParamBinding for binding in registered_param_bindings: destination_image_component = paste_fields[binding.tabname]["init_img"] fields = paste_fields[binding.tabname]["fields"] @@ -313,6 +314,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "VAE Decoder" not in res: res["VAE Decoder"] = "Full" + skip = set(shared.opts.infotext_skip_pasting) + res = {k: v for k, v in res.items() if k not in skip} + return res @@ -443,3 +447,4 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, outputs=[], show_progress=False, ) + diff --git a/modules/processing.py b/modules/processing.py index ac58ef86..5ab6ddde 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -679,8 +679,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Size": f"{p.width}x{p.height}", "Model hash": p.sd_model_hash if opts.add_model_hash_to_info else None, "Model": p.sd_model_name if opts.add_model_name_to_info else None, - "VAE hash": p.sd_vae_hash if opts.add_model_hash_to_info else None, - "VAE": p.sd_vae_name if opts.add_model_name_to_info else None, + "VAE hash": p.sd_vae_hash if opts.add_vae_hash_to_info else None, + "VAE": p.sd_vae_name if opts.add_vae_name_to_info else None, "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), diff --git a/modules/shared_items.py b/modules/shared_items.py index 5024b426..991971ad 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -66,6 +66,22 @@ def reload_hypernetworks(): shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) +def get_infotext_names(): + from modules import generation_parameters_copypaste, shared + res = {} + + for info in shared.opts.data_labels.values(): + if info.infotext: + res[info.infotext] = 1 + + for tab_data in generation_parameters_copypaste.paste_fields.values(): + for _, name in tab_data.get("fields") or []: + if isinstance(name, str): + res[name] = 1 + + return list(res) + + ui_reorder_categories_builtin_items = [ "prompt", "image", diff --git a/modules/shared_options.py b/modules/shared_options.py index 04e68a71..df45fc0a 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -46,8 +46,6 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), - "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), - "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), @@ -288,11 +286,21 @@ options_templates.update(options_section(('ui', "User interface", "ui"), { options_templates.update(options_section(('infotext', "Infotext", "ui"), { - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), + "infotext_explanation": OptionHTML(""" +Infotext is what this software calls the text that contains generation parameters and can be used to generate the same picture again. +It is displayed in UI below the image. To use infotext, paste it into the prompt and click the ↙️ paste button. +"""), + "enable_pnginfo": OptionInfo(True, "Write infotext to metadata of the generated image"), + "save_txt": OptionInfo(False, "Create a text file with infotext next to every generated image"), + + "add_model_name_to_info": OptionInfo(True, "Add model name to infotext"), + "add_model_hash_to_info": OptionInfo(True, "Add model hash to infotext"), + "add_vae_name_to_info": OptionInfo(True, "Add VAE name to infotext"), + "add_vae_hash_to_info": OptionInfo(True, "Add VAE hash to infotext"), + "add_user_name_to_info": OptionInfo(False, "Add user name to infotext when authenticated"), + "add_version_to_infotext": OptionInfo(True, "Add program version to infotext"), "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), + "infotext_skip_pasting": OptionInfo([], "Disregard fields from pasted infotext", ui_components.DropdownMulti, lambda: {"choices": shared_items.get_infotext_names()}), "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
  • Ignore: keep prompt and styles dropdown as it is.
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • -- cgit v1.2.3 From 7499148ad4dbd3444215c843d02453f68c459707 Mon Sep 17 00:00:00 2001 From: Nuullll Date: Sat, 2 Dec 2023 14:00:46 +0800 Subject: Disable ipex autocast due to its bad perf --- modules/cmd_args.py | 1 + modules/devices.py | 20 +++++++++++++------- modules/xpu_specific.py | 28 ++++++++++++++++++---------- webui-ipex-user.bat | 19 +++++++++++++++++++ 4 files changed, 51 insertions(+), 17 deletions(-) create mode 100644 webui-ipex-user.bat diff --git a/modules/cmd_args.py b/modules/cmd_args.py index a9fb9bfa..da93eb26 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -70,6 +70,7 @@ parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="pre parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization") parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI") parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower) +parser.add_argument("--use-ipex", action="store_true", help="use Intel XPU as torch device") parser.add_argument("--disable-model-loading-ram-optimization", action='store_true', help="disable an optimization that reduces RAM use when loading a model") parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) diff --git a/modules/devices.py b/modules/devices.py index be599736..37ecca78 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,11 +3,18 @@ import contextlib from functools import lru_cache import torch -from modules import errors, shared, xpu_specific +from modules import errors, shared if sys.platform == "darwin": from modules import mac_specific +if shared.cmd_opts.use_ipex: + from modules import xpu_specific + + +def has_xpu() -> bool: + return shared.cmd_opts.use_ipex and xpu_specific.has_xpu + def has_mps() -> bool: if sys.platform != "darwin": @@ -30,7 +37,7 @@ def get_optimal_device_name(): if has_mps(): return "mps" - if xpu_specific.has_ipex: + if has_xpu(): return xpu_specific.get_xpu_device_string() return "cpu" @@ -57,6 +64,9 @@ def torch_gc(): if has_mps(): mac_specific.torch_mps_gc() + if has_xpu(): + xpu_specific.torch_xpu_gc() + def enable_tf32(): if torch.cuda.is_available(): @@ -103,15 +113,11 @@ def autocast(disable=False): if dtype == torch.float32 or shared.cmd_opts.precision == "full": return contextlib.nullcontext() - if xpu_specific.has_xpu: - return torch.autocast("xpu") - return torch.autocast("cuda") def without_autocast(disable=False): - device_type = "xpu" if xpu_specific.has_xpu else "cuda" - return torch.autocast(device_type, enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext() + return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext() class NansException(Exception): diff --git a/modules/xpu_specific.py b/modules/xpu_specific.py index 6417dd2d..2df68665 100644 --- a/modules/xpu_specific.py +++ b/modules/xpu_specific.py @@ -1,4 +1,3 @@ -import contextlib from modules import shared from modules.sd_hijack_utils import CondFunc @@ -10,33 +9,42 @@ try: except Exception: pass -def check_for_xpu(): - if not has_ipex: - return False - return hasattr(torch, 'xpu') and torch.xpu.is_available() +def check_for_xpu(): + return has_ipex and hasattr(torch, 'xpu') and torch.xpu.is_available() -has_xpu = check_for_xpu() def get_xpu_device_string(): if shared.cmd_opts.device_id is not None: return f"xpu:{shared.cmd_opts.device_id}" return "xpu" -def return_null_context(*args, **kwargs): # pylint: disable=unused-argument - return contextlib.nullcontext() + +def torch_xpu_gc(): + with torch.xpu.device(get_xpu_device_string()): + torch.xpu.empty_cache() + + +has_xpu = check_for_xpu() if has_xpu: + # W/A for https://github.com/intel/intel-extension-for-pytorch/issues/452: torch.Generator API doesn't support XPU device CondFunc('torch.Generator', lambda orig_func, device=None: torch.xpu.Generator(device), - lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu") + lambda orig_func, device=None: device is not None and device.type == "xpu") + # W/A for some OPs that could not handle different input dtypes CondFunc('torch.nn.functional.layer_norm', lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs), lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: weight is not None and input.dtype != weight.data.dtype) - CondFunc('torch.nn.modules.GroupNorm.forward', lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), lambda orig_func, self, input: input.dtype != self.weight.data.dtype) + CondFunc('torch.nn.modules.linear.Linear.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) + CondFunc('torch.nn.modules.conv.Conv2d.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) diff --git a/webui-ipex-user.bat b/webui-ipex-user.bat new file mode 100644 index 00000000..ab25a040 --- /dev/null +++ b/webui-ipex-user.bat @@ -0,0 +1,19 @@ +@echo off + +set PYTHON= +@REM The "Nuullll/intel-extension-for-pytorch" wheels were built from IPEX source for Intel Arc GPU: https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main +@REM This is NOT an Intel official release so please use it at your own risk!! +@REM See https://github.com/Nuullll/intel-extension-for-pytorch/releases/tag/v2.0.110%2Bxpu-master%2Bdll-bundle for details. +@REM +@REM Strengths (over official IPEX 2.0.110 windows release): +@REM - AOT build (for Arc GPU only) to eliminate JIT compilation overhead: https://github.com/intel/intel-extension-for-pytorch/issues/399 +@REM - Bundles minimal oneAPI 2023.2 dependencies into the python wheels, so users don't need to install oneAPI for the whole system. +@REM - Provides a compatible torchvision wheel: https://github.com/intel/intel-extension-for-pytorch/issues/465 +@REM Limitation: +@REM - Only works for python 3.10 +set "TORCH_COMMAND=pip install https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%%2Bxpu-master%%2Bdll-bundle/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%%2Bxpu-master%%2Bdll-bundle/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%%2Bxpu-master%%2Bdll-bundle/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl" +set GIT= +set VENV_DIR= +set "COMMANDLINE_ARGS=--use-ipex --skip-torch-cuda-test --skip-version-check --opt-sdp-attention" + +call webui.bat -- cgit v1.2.3 From e294e46d46a814457fc77af13c17128bd6075d45 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Dec 2023 09:26:38 +0300 Subject: split UI settings page into many --- .../scripts/extra_options_section.py | 13 +++-- modules/shared_options.py | 57 ++++++++++++---------- 2 files changed, 40 insertions(+), 30 deletions(-) diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index 983f87ff..a903df62 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -64,11 +64,14 @@ class ExtraOptionsSection(scripts.Script): p.override_settings[name] = value -shared.options_templates.update(shared.options_section(('ui', "User interface"), { - "extra_options_txt2img": shared.OptionInfo([], "Options in main UI - txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(), - "extra_options_img2img": shared.OptionInfo([], "Options in main UI - img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(), - "extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(), - "extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui() +shared.options_templates.update(shared.options_section(('settings_in_ui', "Settings in UI", "ui"), { + "settings_in_ui": shared.OptionHTML(""" +This page allows you to add some settings to the main interface of txt2img and img2img tabs. +"""), + "extra_options_txt2img": shared.OptionInfo([], "Settings for txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(), + "extra_options_img2img": shared.OptionInfo([], "Settings for img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(), + "extra_options_cols": shared.OptionInfo(1, "Number of columns for added settings", gr.Number, {"precision": 0}).needs_reload_ui(), + "extra_options_accordion": shared.OptionInfo(False, "Place added settings into an accordion").needs_reload_ui() })) diff --git a/modules/shared_options.py b/modules/shared_options.py index df45fc0a..1390152d 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -250,38 +250,45 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks), })) -options_templates.update(options_section(('ui', "User interface", "ui"), { - "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), - "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), - "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("an be any valid CSS value").needs_reload_ui(), - "return_grid": OptionInfo(True, "Show grid in results for web"), - "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), - "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), - "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), - "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), - "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), +options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "ui"), { + "keyedit_precision_attention": OptionInfo(0.1, "Precision for (attention:1.1) when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_precision_extra": OptionInfo(0.05, "Precision for when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Word delimiters when editing the prompt with Ctrl+up/down"), + "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), +})) + +options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), { + "return_grid": OptionInfo(True, "Show grid in gallery"), + "do_not_show_images": OptionInfo(False, "Do not show any images in gallery"), + "js_modal_lightbox": OptionInfo(True, "Full page image viewer: enable"), + "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Full page image viewer: show images zoomed in by default"), + "js_modal_lightbox_gamepad": OptionInfo(False, "Full page image viewer: navigate with gamepad"), + "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Full page image viewer: gamepad repeat period").info("in milliseconds"), + "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("can be any valid CSS value, for example 768px or 20em").needs_reload_ui(), +})) + +options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), { + "compact_prompt_box": OptionInfo(False, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), - "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Ctrl+up/down word delimiters"), - "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), - "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), - "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), "sd_checkpoint_dropdown_use_short": OptionInfo(False, "Checkpoint dropdown: use filenames without paths").info("models in subdirectories like photo/sd15.ckpt will be listed as just sd15.ckpt"), "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), "txt2img_settings_accordion": OptionInfo(False, "Settings in txt2img hidden under Accordion").needs_reload_ui(), "img2img_settings_accordion": OptionInfo(False, "Settings in img2img hidden under Accordion").needs_reload_ui(), - "compact_prompt_box": OptionInfo(False, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(), +})) + +options_templates.update(options_section(('ui', "User interface", "ui"), { + "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), + "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), + "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), + "ui_reorder_list": OptionInfo([], "UI item order for txt2img/img2img tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), + "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), + "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), + "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), + "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), })) -- cgit v1.2.3 From ef6b8123dc57e4e4bd5e08d9f3e3dbdfdf6b4c4a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Dec 2023 09:57:39 +0300 Subject: put code that can cause an exception into its own function for #14120 --- modules/scripts.py | 62 +++++++++++++++++++++++++++++------------------------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 961d032c..7f9454eb 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -560,54 +560,58 @@ class ScriptRunner: on_after.clear() def create_script_ui(self, script): - import modules.api.models as api_models script.args_from = len(self.inputs) script.args_to = len(self.inputs) + try: + self.create_script_ui_inner(script) + except Exception: + errors.report(f"Error creating UI for {script.name}: ", exc_info=True) + + def create_script_ui_inner(self, script): + import modules.api.models as api_models + controls = wrap_call(script.ui, script.filename, "ui", script.is_img2img) if controls is None: return - try: - script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower() - api_args = [] + script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower() - for control in controls: - control.custom_script_source = os.path.basename(script.filename) + api_args = [] - arg_info = api_models.ScriptArg(label=control.label or "") + for control in controls: + control.custom_script_source = os.path.basename(script.filename) - for field in ("value", "minimum", "maximum", "step"): - v = getattr(control, field, None) - if v is not None: - setattr(arg_info, field, v) + arg_info = api_models.ScriptArg(label=control.label or "") - choices = getattr(control, 'choices', None) # as of gradio 3.41, some items in choices are strings, and some are tuples where the first elem is the string - if choices is not None: - arg_info.choices = [x[0] if isinstance(x, tuple) else x for x in choices] + for field in ("value", "minimum", "maximum", "step"): + v = getattr(control, field, None) + if v is not None: + setattr(arg_info, field, v) - api_args.append(arg_info) + choices = getattr(control, 'choices', None) # as of gradio 3.41, some items in choices are strings, and some are tuples where the first elem is the string + if choices is not None: + arg_info.choices = [x[0] if isinstance(x, tuple) else x for x in choices] - script.api_info = api_models.ScriptInfo( - name=script.name, - is_img2img=script.is_img2img, - is_alwayson=script.alwayson, - args=api_args, - ) + api_args.append(arg_info) - if script.infotext_fields is not None: - self.infotext_fields += script.infotext_fields + script.api_info = api_models.ScriptInfo( + name=script.name, + is_img2img=script.is_img2img, + is_alwayson=script.alwayson, + args=api_args, + ) - if script.paste_field_names is not None: - self.paste_field_names += script.paste_field_names + if script.infotext_fields is not None: + self.infotext_fields += script.infotext_fields - self.inputs += controls - script.args_to = len(self.inputs) + if script.paste_field_names is not None: + self.paste_field_names += script.paste_field_names - except Exception: - errors.report(f"Error creating UI for {script.name}: ", exc_info=True) + self.inputs += controls + script.args_to = len(self.inputs) def setup_ui_for_section(self, section, scriptlist=None): if scriptlist is None: -- cgit v1.2.3 From 87cd07b3af74c447b02570bf3963ba83ade2e203 Mon Sep 17 00:00:00 2001 From: Nuullll Date: Sat, 2 Dec 2023 15:54:25 +0800 Subject: Fix fp64 --- modules/sd_samplers_timesteps_impl.py | 4 ++-- modules/xpu_specific.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py index a72daafd..930a64af 100644 --- a/modules/sd_samplers_timesteps_impl.py +++ b/modules/sd_samplers_timesteps_impl.py @@ -11,7 +11,7 @@ from modules.models.diffusion.uni_pc import uni_pc def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0): alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas = alphas_cumprod[timesteps] - alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' else torch.float32) + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32) sqrt_one_minus_alphas = torch.sqrt(1 - alphas) sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy())) @@ -43,7 +43,7 @@ def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta= def plms(model, x, timesteps, extra_args=None, callback=None, disable=None): alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas = alphas_cumprod[timesteps] - alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' else torch.float32) + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32) sqrt_one_minus_alphas = torch.sqrt(1 - alphas) extra_args = {} if extra_args is None else extra_args diff --git a/modules/xpu_specific.py b/modules/xpu_specific.py index 2df68665..d933c790 100644 --- a/modules/xpu_specific.py +++ b/modules/xpu_specific.py @@ -4,7 +4,7 @@ from modules.sd_hijack_utils import CondFunc has_ipex = False try: import torch - import intel_extension_for_pytorch as ipex + import intel_extension_for_pytorch as ipex # noqa: F401 has_ipex = True except Exception: pass -- cgit v1.2.3 From 4a666381bf98333ba4512db0f0033df5f6a08771 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Dec 2023 12:11:21 +0300 Subject: extras tab batch: actually use original filename preprocessing upscale: do not do an extra upscale step if it's not needed --- modules/postprocessing.py | 4 +++- modules/upscaler.py | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index fd0c0cc9..0a134ee4 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -60,8 +60,10 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if opts.use_original_name_batch and name is not None: basename = os.path.splitext(os.path.basename(name))[0] + forced_filename = basename else: basename = '' + forced_filename = None infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None]) @@ -70,7 +72,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, pp.image.info["postprocessing"] = infotext if save_output: - images.save_image(pp.image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None) + images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename) if extras_mode != 2 or show_extras_results: outputs.append(pp.image) diff --git a/modules/upscaler.py b/modules/upscaler.py index e682bbaa..b256e085 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -57,6 +57,9 @@ class Upscaler: dest_h = int((img.height * scale) // 8 * 8) for _ in range(3): + if img.width >= dest_w and img.height >= dest_h: + break + shape = (img.width, img.height) img = self.do_upscale(img, selected_model) @@ -64,9 +67,6 @@ class Upscaler: if shape == (img.width, img.height): break - if img.width >= dest_w and img.height >= dest_h: - break - if img.width != dest_w or img.height != dest_h: img = img.resize((int(dest_w), int(dest_h)), resample=LANCZOS) -- cgit v1.2.3 From 96871e4f744471177d97e01c49f8587d7f67c125 Mon Sep 17 00:00:00 2001 From: Nuullll Date: Sat, 2 Dec 2023 17:11:11 +0800 Subject: Remove webui-ipex-user.bat --- modules/launch_utils.py | 22 ++++++++++++++++++++++ webui-ipex-user.bat | 19 ------------------- 2 files changed, 22 insertions(+), 19 deletions(-) delete mode 100644 webui-ipex-user.bat diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 264ec9ca..586cdc7e 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -310,6 +310,26 @@ def requirements_met(requirements_file): def prepare_environment(): torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118") torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}") + if args.use_ipex: + if platform.system() == "Windows": + # The "Nuullll/intel-extension-for-pytorch" wheels were built from IPEX source for Intel Arc GPU: https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main + # This is NOT an Intel official release so please use it at your own risk!! + # See https://github.com/Nuullll/intel-extension-for-pytorch/releases/tag/v2.0.110%2Bxpu-master%2Bdll-bundle for details. + # + # Strengths (over official IPEX 2.0.110 windows release): + # - AOT build (for Arc GPU only) to eliminate JIT compilation overhead: https://github.com/intel/intel-extension-for-pytorch/issues/399 + # - Bundles minimal oneAPI 2023.2 dependencies into the python wheels, so users don't need to install oneAPI for the whole system. + # - Provides a compatible torchvision wheel: https://github.com/intel/intel-extension-for-pytorch/issues/465 + # Limitation: + # - Only works for python 3.10 + url_prefix = "https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%2Bxpu-master%2Bdll-bundle" + torch_command = os.environ.get('TORCH_COMMAND', f"pip install {url_prefix}/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl {url_prefix}/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl {url_prefix}/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl") + else: + # Using official IPEX release for linux since it's already an AOT build. + # However, users still have to install oneAPI toolkit and activate oneAPI environment manually. + # See https://intel.github.io/intel-extension-for-pytorch/index.html#installation for details. + torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://pytorch-extension.intel.com/release-whl/stable/xpu/us/") + torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.0a0 intel-extension-for-pytorch==2.0.110+gitba7f6c1 --extra-index-url {torch_index_url}") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20') @@ -352,6 +372,8 @@ def prepare_environment(): run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True) startup_timer.record("install torch") + if args.use_ipex: + args.skip_torch_cuda_test = True if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"): raise RuntimeError( 'Torch is not able to use GPU; ' diff --git a/webui-ipex-user.bat b/webui-ipex-user.bat deleted file mode 100644 index ab25a040..00000000 --- a/webui-ipex-user.bat +++ /dev/null @@ -1,19 +0,0 @@ -@echo off - -set PYTHON= -@REM The "Nuullll/intel-extension-for-pytorch" wheels were built from IPEX source for Intel Arc GPU: https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main -@REM This is NOT an Intel official release so please use it at your own risk!! -@REM See https://github.com/Nuullll/intel-extension-for-pytorch/releases/tag/v2.0.110%2Bxpu-master%2Bdll-bundle for details. -@REM -@REM Strengths (over official IPEX 2.0.110 windows release): -@REM - AOT build (for Arc GPU only) to eliminate JIT compilation overhead: https://github.com/intel/intel-extension-for-pytorch/issues/399 -@REM - Bundles minimal oneAPI 2023.2 dependencies into the python wheels, so users don't need to install oneAPI for the whole system. -@REM - Provides a compatible torchvision wheel: https://github.com/intel/intel-extension-for-pytorch/issues/465 -@REM Limitation: -@REM - Only works for python 3.10 -set "TORCH_COMMAND=pip install https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%%2Bxpu-master%%2Bdll-bundle/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%%2Bxpu-master%%2Bdll-bundle/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%%2Bxpu-master%%2Bdll-bundle/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl" -set GIT= -set VENV_DIR= -set "COMMANDLINE_ARGS=--use-ipex --skip-torch-cuda-test --skip-version-check --opt-sdp-attention" - -call webui.bat -- cgit v1.2.3 From 11d23e8ca55c097ecfa255a05b63f194e25f08be Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Dec 2023 18:01:11 +0300 Subject: remove Train/Preprocessing tab and put all its functionality into extras batch images mode --- javascript/ui.js | 17 ++ modules/api/api.py | 15 -- modules/api/models.py | 3 - modules/postprocessing.py | 92 +++++++--- modules/scripts_postprocessing.py | 86 ++++++++- modules/shared_options.py | 1 + modules/textual_inversion/preprocess.py | 232 ------------------------ modules/textual_inversion/ui.py | 7 - modules/ui.py | 107 ----------- modules/ui_postprocessing.py | 16 +- modules/ui_toprow.py | 6 +- scripts/postprocessing_caption.py | 30 +++ scripts/postprocessing_codeformer.py | 16 +- scripts/postprocessing_create_flipped_copies.py | 32 ++++ scripts/postprocessing_focal_crop.py | 54 ++++++ scripts/postprocessing_gfpgan.py | 13 +- scripts/postprocessing_split_oversized.py | 71 ++++++++ scripts/postprocessing_upscale.py | 12 ++ scripts/processing_autosized_crop.py | 64 +++++++ 19 files changed, 460 insertions(+), 414 deletions(-) delete mode 100644 modules/textual_inversion/preprocess.py create mode 100644 scripts/postprocessing_caption.py create mode 100644 scripts/postprocessing_create_flipped_copies.py create mode 100644 scripts/postprocessing_focal_crop.py create mode 100644 scripts/postprocessing_split_oversized.py create mode 100644 scripts/processing_autosized_crop.py diff --git a/javascript/ui.js b/javascript/ui.js index 2e262602..410fc44e 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -170,6 +170,23 @@ function submit_img2img() { return res; } +function submit_extras() { + showSubmitButtons('extras', false); + + var id = randomId(); + + requestProgress(id, gradioApp().getElementById('extras_gallery_container'), gradioApp().getElementById('extras_gallery'), function() { + showSubmitButtons('extras', true); + }); + + var res = create_submit_args(arguments); + + res[0] = id; + + console.log(res); + return res; +} + function restoreProgressTxt2img() { showRestoreProgressButton("txt2img", false); var id = localGet("txt2img_task_id"); diff --git a/modules/api/api.py b/modules/api/api.py index 09083874..b3d74e51 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -22,7 +22,6 @@ from modules.api import models from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.textual_inversion.textual_inversion import create_embedding, train_embedding -from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from PIL import PngImagePlugin, Image from modules.sd_models_config import find_checkpoint_config_near_filename @@ -235,7 +234,6 @@ class Api: self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"]) self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse) self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse) - self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse) self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse) self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse) self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse) @@ -675,19 +673,6 @@ class Api: finally: shared.state.end() - def preprocess(self, args: dict): - try: - shared.state.begin(job="preprocess") - preprocess(**args) # quick operation unless blip/booru interrogation is enabled - shared.state.end() - return models.PreprocessResponse(info='preprocess complete') - except KeyError as e: - return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}") - except Exception as e: - return models.PreprocessResponse(info=f"preprocess error: {e}") - finally: - shared.state.end() - def train_embedding(self, args: dict): try: shared.state.begin(job="train_embedding") diff --git a/modules/api/models.py b/modules/api/models.py index a0d80af8..33894b3e 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -202,9 +202,6 @@ class TrainResponse(BaseModel): class CreateResponse(BaseModel): info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.") -class PreprocessResponse(BaseModel): - info: str = Field(title="Preprocess info", description="Response string from preprocessing task.") - fields = {} for key, metadata in opts.data_labels.items(): value = opts.data.get(key) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 0a134ee4..3c85a74c 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -6,7 +6,7 @@ from modules import shared, images, devices, scripts, scripts_postprocessing, ui from modules.shared import opts -def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): +def run_postprocessing(id_task, extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): devices.torch_gc() shared.state.begin(job="extras") @@ -29,11 +29,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, image_list = shared.listfiles(input_dir) for filename in image_list: - try: - image = Image.open(filename) - except Exception: - continue - yield image, filename + yield filename, filename else: assert image, 'image not selected' yield image, None @@ -45,37 +41,85 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, infotext = '' - for image_data, name in get_images(extras_mode, image, image_folder, input_dir): + data_to_process = list(get_images(extras_mode, image, image_folder, input_dir)) + shared.state.job_count = len(data_to_process) + + for image_placeholder, name in data_to_process: image_data: Image.Image + shared.state.nextjob() shared.state.textinfo = name + shared.state.skipped = False + + if shared.state.interrupted: + break + + if isinstance(image_placeholder, str): + try: + image_data = Image.open(image_placeholder) + except Exception: + continue + else: + image_data = image_placeholder + + shared.state.assign_current_image(image_data) parameters, existing_pnginfo = images.read_info_from_image(image_data) if parameters: existing_pnginfo["parameters"] = parameters - pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB")) + initial_pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB")) - scripts.scripts_postproc.run(pp, args) + scripts.scripts_postproc.run(initial_pp, args) - if opts.use_original_name_batch and name is not None: - basename = os.path.splitext(os.path.basename(name))[0] - forced_filename = basename - else: - basename = '' - forced_filename = None + if shared.state.skipped: + continue + + used_suffixes = {} + for pp in [initial_pp, *initial_pp.extra_images]: + suffix = pp.get_suffix(used_suffixes) + + if opts.use_original_name_batch and name is not None: + basename = os.path.splitext(os.path.basename(name))[0] + forced_filename = basename + suffix + else: + basename = '' + forced_filename = None + + infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None]) + + if opts.enable_pnginfo: + pp.image.info = existing_pnginfo + pp.image.info["postprocessing"] = infotext + + if save_output: + fullfn, _ = images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename, suffix=suffix) - infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None]) + if pp.caption: + caption_filename = os.path.splitext(fullfn)[0] + ".txt" + if os.path.isfile(caption_filename): + with open(caption_filename, encoding="utf8") as file: + existing_caption = file.read().strip() + else: + existing_caption = "" - if opts.enable_pnginfo: - pp.image.info = existing_pnginfo - pp.image.info["postprocessing"] = infotext + action = shared.opts.postprocessing_existing_caption_action + if action == 'Prepend' and existing_caption: + caption = f"{existing_caption} {pp.caption}" + elif action == 'Append' and existing_caption: + caption = f"{pp.caption} {existing_caption}" + elif action == 'Keep' and existing_caption: + caption = existing_caption + else: + caption = pp.caption - if save_output: - images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename) + caption = caption.strip() + if caption: + with open(caption_filename, "w", encoding="utf8") as file: + file.write(caption) - if extras_mode != 2 or show_extras_results: - outputs.append(pp.image) + if extras_mode != 2 or show_extras_results: + outputs.append(pp.image) image_data.close() @@ -99,9 +143,11 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ "upscaler_2_visibility": extras_upscaler_2_visibility, }, "GFPGAN": { + "enable": True, "gfpgan_visibility": gfpgan_visibility, }, "CodeFormer": { + "enable": True, "codeformer_visibility": codeformer_visibility, "codeformer_weight": codeformer_weight, }, diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py index bac1335d..901cad08 100644 --- a/modules/scripts_postprocessing.py +++ b/modules/scripts_postprocessing.py @@ -1,13 +1,56 @@ +import dataclasses import os import gradio as gr from modules import errors, shared +@dataclasses.dataclass +class PostprocessedImageSharedInfo: + target_width: int = None + target_height: int = None + + class PostprocessedImage: def __init__(self, image): self.image = image self.info = {} + self.shared = PostprocessedImageSharedInfo() + self.extra_images = [] + self.nametags = [] + self.disable_processing = False + self.caption = None + + def get_suffix(self, used_suffixes=None): + used_suffixes = {} if used_suffixes is None else used_suffixes + suffix = "-".join(self.nametags) + if suffix: + suffix = "-" + suffix + + if suffix not in used_suffixes: + used_suffixes[suffix] = 1 + return suffix + + for i in range(1, 100): + proposed_suffix = suffix + "-" + str(i) + + if proposed_suffix not in used_suffixes: + used_suffixes[proposed_suffix] = 1 + return proposed_suffix + + return suffix + + def create_copy(self, new_image, *, nametags=None, disable_processing=False): + pp = PostprocessedImage(new_image) + pp.shared = self.shared + pp.nametags = self.nametags.copy() + pp.info = self.info.copy() + pp.disable_processing = disable_processing + + if nametags is not None: + pp.nametags += nametags + + return pp class ScriptPostprocessing: @@ -42,10 +85,17 @@ class ScriptPostprocessing: pass - def image_changed(self): - pass + def process_firstpass(self, pp: PostprocessedImage, **args): + """ + Called for all scripts before calling process(). Scripts can examine the image here and set fields + of the pp object to communicate things to other scripts. + args contains a dictionary with all values returned by components from ui() + """ + pass + def image_changed(self): + pass def wrap_call(func, filename, funcname, *args, default=None, **kwargs): @@ -118,16 +168,42 @@ class ScriptPostprocessingRunner: return inputs def run(self, pp: PostprocessedImage, args): - for script in self.scripts_in_preferred_order(): - shared.state.job = script.name + scripts = [] + for script in self.scripts_in_preferred_order(): script_args = args[script.args_from:script.args_to] process_args = {} for (name, _component), value in zip(script.controls.items(), script_args): process_args[name] = value - script.process(pp, **process_args) + scripts.append((script, process_args)) + + for script, process_args in scripts: + script.process_firstpass(pp, **process_args) + + all_images = [pp] + + for script, process_args in scripts: + if shared.state.skipped: + break + + shared.state.job = script.name + + for single_image in all_images.copy(): + + if not single_image.disable_processing: + script.process(single_image, **process_args) + + for extra_image in single_image.extra_images: + if not isinstance(extra_image, PostprocessedImage): + extra_image = single_image.create_copy(extra_image) + + all_images.append(extra_image) + + single_image.extra_images.clear() + + pp.extra_images = all_images[1:] def create_args_for_run(self, scripts_args): if not self.ui_created: diff --git a/modules/shared_options.py b/modules/shared_options.py index d8a27180..859dee40 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -357,6 +357,7 @@ options_templates.update(options_section(('postprocessing', "Postprocessing", "p 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), + 'postprocessing_existing_caption_action': OptionInfo("Ignore", "Action for existing captions", gr.Radio, {"choices": ["Ignore", "Keep", "Prepend", "Append"]}).info("when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both"), })) options_templates.update(options_section((None, "Hidden options"), { diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py deleted file mode 100644 index 789fa083..00000000 --- a/modules/textual_inversion/preprocess.py +++ /dev/null @@ -1,232 +0,0 @@ -import os -from PIL import Image, ImageOps -import math -import tqdm - -from modules import shared, images, deepbooru -from modules.textual_inversion import autocrop - - -def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.15, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): - try: - if process_caption: - shared.interrogator.load() - - if process_caption_deepbooru: - deepbooru.model.start() - - preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold) - - finally: - - if process_caption: - shared.interrogator.send_blip_to_ram() - - if process_caption_deepbooru: - deepbooru.model.stop() - - -def listfiles(dirname): - return os.listdir(dirname) - - -class PreprocessParams: - src = None - dstdir = None - subindex = 0 - flip = False - process_caption = False - process_caption_deepbooru = False - preprocess_txt_action = None - - -def save_pic_with_caption(image, index, params: PreprocessParams, existing_caption=None): - caption = "" - - if params.process_caption: - caption += shared.interrogator.generate_caption(image) - - if params.process_caption_deepbooru: - if caption: - caption += ", " - caption += deepbooru.model.tag_multi(image) - - filename_part = params.src - filename_part = os.path.splitext(filename_part)[0] - filename_part = os.path.basename(filename_part) - - basename = f"{index:05}-{params.subindex}-{filename_part}" - image.save(os.path.join(params.dstdir, f"{basename}.png")) - - if params.preprocess_txt_action == 'prepend' and existing_caption: - caption = f"{existing_caption} {caption}" - elif params.preprocess_txt_action == 'append' and existing_caption: - caption = f"{caption} {existing_caption}" - elif params.preprocess_txt_action == 'copy' and existing_caption: - caption = existing_caption - - caption = caption.strip() - - if caption: - with open(os.path.join(params.dstdir, f"{basename}.txt"), "w", encoding="utf8") as file: - file.write(caption) - - params.subindex += 1 - - -def save_pic(image, index, params, existing_caption=None): - save_pic_with_caption(image, index, params, existing_caption=existing_caption) - - if params.flip: - save_pic_with_caption(ImageOps.mirror(image), index, params, existing_caption=existing_caption) - - -def split_pic(image, inverse_xy, width, height, overlap_ratio): - if inverse_xy: - from_w, from_h = image.height, image.width - to_w, to_h = height, width - else: - from_w, from_h = image.width, image.height - to_w, to_h = width, height - h = from_h * to_w // from_w - if inverse_xy: - image = image.resize((h, to_w)) - else: - image = image.resize((to_w, h)) - - split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio))) - y_step = (h - to_h) / (split_count - 1) - for i in range(split_count): - y = int(y_step * i) - if inverse_xy: - splitted = image.crop((y, 0, y + to_h, to_w)) - else: - splitted = image.crop((0, y, to_w, y + to_h)) - yield splitted - -# not using torchvision.transforms.CenterCrop because it doesn't allow float regions -def center_crop(image: Image, w: int, h: int): - iw, ih = image.size - if ih / h < iw / w: - sw = w * ih / h - box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih - else: - sh = h * iw / w - box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2 - return image.resize((w, h), Image.Resampling.LANCZOS, box) - - -def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold): - iw, ih = image.size - err = lambda w, h: 1-(lambda x: x if x < 1 else 1/x)(iw/ih/(w/h)) - wh = max(((w, h) for w in range(mindim, maxdim+1, 64) for h in range(mindim, maxdim+1, 64) - if minarea <= w * h <= maxarea and err(w, h) <= threshold), - key= lambda wh: (wh[0]*wh[1], -err(*wh))[::1 if objective=='Maximize area' else -1], - default=None - ) - return wh and center_crop(image, *wh) - - -def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): - width = process_width - height = process_height - src = os.path.abspath(process_src) - dst = os.path.abspath(process_dst) - split_threshold = max(0.0, min(1.0, split_threshold)) - overlap_ratio = max(0.0, min(0.9, overlap_ratio)) - - assert src != dst, 'same directory specified as source and destination' - - os.makedirs(dst, exist_ok=True) - - files = listfiles(src) - - shared.state.job = "preprocess" - shared.state.textinfo = "Preprocessing..." - shared.state.job_count = len(files) - - params = PreprocessParams() - params.dstdir = dst - params.flip = process_flip - params.process_caption = process_caption - params.process_caption_deepbooru = process_caption_deepbooru - params.preprocess_txt_action = preprocess_txt_action - - pbar = tqdm.tqdm(files) - for index, imagefile in enumerate(pbar): - params.subindex = 0 - filename = os.path.join(src, imagefile) - try: - img = Image.open(filename) - img = ImageOps.exif_transpose(img) - img = img.convert("RGB") - except Exception: - continue - - description = f"Preprocessing [Image {index}/{len(files)}]" - pbar.set_description(description) - shared.state.textinfo = description - - params.src = filename - - existing_caption = None - existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt" - if os.path.exists(existing_caption_filename): - with open(existing_caption_filename, 'r', encoding="utf8") as file: - existing_caption = file.read() - - if shared.state.interrupted: - break - - if img.height > img.width: - ratio = (img.width * height) / (img.height * width) - inverse_xy = False - else: - ratio = (img.height * width) / (img.width * height) - inverse_xy = True - - process_default_resize = True - - if process_split and ratio < 1.0 and ratio <= split_threshold: - for splitted in split_pic(img, inverse_xy, width, height, overlap_ratio): - save_pic(splitted, index, params, existing_caption=existing_caption) - process_default_resize = False - - if process_focal_crop and img.height != img.width: - - dnn_model_path = None - try: - dnn_model_path = autocrop.download_and_cache_models() - except Exception as e: - print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e) - - autocrop_settings = autocrop.Settings( - crop_width = width, - crop_height = height, - face_points_weight = process_focal_crop_face_weight, - entropy_points_weight = process_focal_crop_entropy_weight, - corner_points_weight = process_focal_crop_edges_weight, - annotate_image = process_focal_crop_debug, - dnn_model_path = dnn_model_path, - ) - for focal in autocrop.crop_image(img, autocrop_settings): - save_pic(focal, index, params, existing_caption=existing_caption) - process_default_resize = False - - if process_multicrop: - cropped = multicrop_pic(img, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold) - if cropped is not None: - save_pic(cropped, index, params, existing_caption=existing_caption) - else: - print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)") - process_default_resize = False - - if process_keep_original_size: - save_pic(img, index, params, existing_caption=existing_caption) - process_default_resize = False - - if process_default_resize: - img = images.resize_image(1, img, width, height) - save_pic(img, index, params, existing_caption=existing_caption) - - shared.state.nextjob() diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py index 35c4feef..f149ad1f 100644 --- a/modules/textual_inversion/ui.py +++ b/modules/textual_inversion/ui.py @@ -3,7 +3,6 @@ import html import gradio as gr import modules.textual_inversion.textual_inversion -import modules.textual_inversion.preprocess from modules import sd_hijack, shared @@ -15,12 +14,6 @@ def create_embedding(name, initialization_text, nvpt, overwrite_old): return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", "" -def preprocess(*args): - modules.textual_inversion.preprocess.preprocess(*args) - - return f"Preprocessing {'interrupted' if shared.state.interrupted else 'finished'}.", "" - - def train_embedding(*args): assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible' diff --git a/modules/ui.py b/modules/ui.py index 08e0ad77..d80486dd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -912,71 +912,6 @@ def create_ui(): with gr.Column(): create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - def get_textual_inversion_template_names(): return sorted(textual_inversion.textual_inversion_templates) @@ -1077,42 +1012,6 @@ def create_ui(): ] ) - run_preprocess.click( - fn=wrap_gradio_gpu_call(textual_inversion_ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - train_embedding.click( fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", @@ -1186,12 +1085,6 @@ def create_ui(): outputs=[], ) - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) settings = ui_settings.UiSettings() diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index 802e1ce7..fbad0800 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -1,9 +1,10 @@ import gradio as gr -from modules import scripts, shared, ui_common, postprocessing, call_queue +from modules import scripts, shared, ui_common, postprocessing, call_queue, ui_toprow import modules.generation_parameters_copypaste as parameters_copypaste def create_ui(): + dummy_component = gr.Label(visible=False) tab_index = gr.State(value=0) with gr.Row(equal_height=False, variant='compact'): @@ -20,11 +21,13 @@ def create_ui(): extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir") show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") - submit = gr.Button('Generate', elem_id="extras_generate", variant='primary') - script_inputs = scripts.scripts_postproc.setup_ui() with gr.Column(): + toprow = ui_toprow.Toprow(is_compact=True, is_img2img=False, id_part="extras") + toprow.create_inline_toprow_image() + submit = toprow.submit + result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples) tab_single.select(fn=lambda: 0, inputs=[], outputs=[tab_index]) @@ -33,7 +36,9 @@ def create_ui(): submit.click( fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing, extra_outputs=[None, '']), + _js="submit_extras", inputs=[ + dummy_component, tab_index, extras_image, image_batch, @@ -45,8 +50,9 @@ def create_ui(): outputs=[ result_images, html_info_x, - html_info, - ] + html_log, + ], + show_progress=False, ) parameters_copypaste.add_paste_fields("extras", extras_image, None) diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py index 985b5a2d..88838f97 100644 --- a/modules/ui_toprow.py +++ b/modules/ui_toprow.py @@ -34,8 +34,10 @@ class Toprow: submit_box = None - def __init__(self, is_img2img, is_compact=False): - id_part = "img2img" if is_img2img else "txt2img" + def __init__(self, is_img2img, is_compact=False, id_part=None): + if id_part is None: + id_part = "img2img" if is_img2img else "txt2img" + self.id_part = id_part self.is_img2img = is_img2img self.is_compact = is_compact diff --git a/scripts/postprocessing_caption.py b/scripts/postprocessing_caption.py new file mode 100644 index 00000000..243e3ad9 --- /dev/null +++ b/scripts/postprocessing_caption.py @@ -0,0 +1,30 @@ +from modules import scripts_postprocessing, ui_components, deepbooru, shared +import gradio as gr + + +class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing): + name = "Caption" + order = 4000 + + def ui(self): + with ui_components.InputAccordion(False, label="Caption") as enable: + option = gr.CheckboxGroup(value=["Deepbooru"], choices=["Deepbooru", "BLIP"], show_label=False) + + return { + "enable": enable, + "option": option, + } + + def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option): + if not enable: + return + + captions = [pp.caption] + + if "Deepbooru" in option: + captions.append(deepbooru.model.tag(pp.image)) + + if "BLIP" in option: + captions.append(shared.interrogator.generate_caption(pp.image)) + + pp.caption = ", ".join([x for x in captions if x]) diff --git a/scripts/postprocessing_codeformer.py b/scripts/postprocessing_codeformer.py index a7d80d40..e1e156dd 100644 --- a/scripts/postprocessing_codeformer.py +++ b/scripts/postprocessing_codeformer.py @@ -1,28 +1,28 @@ from PIL import Image import numpy as np -from modules import scripts_postprocessing, codeformer_model +from modules import scripts_postprocessing, codeformer_model, ui_components import gradio as gr -from modules.ui_components import FormRow - class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing): name = "CodeFormer" order = 3000 def ui(self): - with FormRow(): - codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, elem_id="extras_codeformer_visibility") - codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight") + with ui_components.InputAccordion(False, label="CodeFormer") as enable: + with gr.Row(): + codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_codeformer_visibility") + codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight") return { + "enable": enable, "codeformer_visibility": codeformer_visibility, "codeformer_weight": codeformer_weight, } - def process(self, pp: scripts_postprocessing.PostprocessedImage, codeformer_visibility, codeformer_weight): - if codeformer_visibility == 0: + def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, codeformer_visibility, codeformer_weight): + if codeformer_visibility == 0 or not enable: return restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight) diff --git a/scripts/postprocessing_create_flipped_copies.py b/scripts/postprocessing_create_flipped_copies.py new file mode 100644 index 00000000..3425571d --- /dev/null +++ b/scripts/postprocessing_create_flipped_copies.py @@ -0,0 +1,32 @@ +from PIL import ImageOps, Image + +from modules import scripts_postprocessing, ui_components +import gradio as gr + + +class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.ScriptPostprocessing): + name = "Create flipped copies" + order = 4000 + + def ui(self): + with ui_components.InputAccordion(False, label="Create flipped copies") as enable: + with gr.Row(): + option = gr.CheckboxGroup(value=["Horizontal"], choices=["Horizontal", "Vertical", "Both"], show_label=False) + + return { + "enable": enable, + "option": option, + } + + def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option): + if not enable: + return + + if "Horizontal" in option: + pp.extra_images.append(ImageOps.mirror(pp.image)) + + if "Vertical" in option: + pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM)) + + if "Both" in option: + pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM).transpose(Image.Transpose.FLIP_LEFT_RIGHT)) diff --git a/scripts/postprocessing_focal_crop.py b/scripts/postprocessing_focal_crop.py new file mode 100644 index 00000000..d3baf298 --- /dev/null +++ b/scripts/postprocessing_focal_crop.py @@ -0,0 +1,54 @@ + +from modules import scripts_postprocessing, ui_components, errors +import gradio as gr + +from modules.textual_inversion import autocrop + + +class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing): + name = "Auto focal point crop" + order = 4000 + + def ui(self): + with ui_components.InputAccordion(False, label="Auto focal point crop") as enable: + face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight") + entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight") + edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight") + debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") + + return { + "enable": enable, + "face_weight": face_weight, + "entropy_weight": entropy_weight, + "edges_weight": edges_weight, + "debug": debug, + } + + def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, face_weight, entropy_weight, edges_weight, debug): + if not enable: + return + + if not pp.shared.target_width or not pp.shared.target_height: + return + + dnn_model_path = None + try: + dnn_model_path = autocrop.download_and_cache_models() + except Exception: + errors.report("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", exc_info=True) + + autocrop_settings = autocrop.Settings( + crop_width=pp.shared.target_width, + crop_height=pp.shared.target_height, + face_points_weight=face_weight, + entropy_points_weight=entropy_weight, + corner_points_weight=edges_weight, + annotate_image=debug, + dnn_model_path=dnn_model_path, + ) + + result, *others = autocrop.crop_image(pp.image, autocrop_settings) + + pp.image = result + pp.extra_images = [pp.create_copy(x, nametags=["focal-crop-debug"], disable_processing=True) for x in others] + diff --git a/scripts/postprocessing_gfpgan.py b/scripts/postprocessing_gfpgan.py index d854f3f7..6e756605 100644 --- a/scripts/postprocessing_gfpgan.py +++ b/scripts/postprocessing_gfpgan.py @@ -1,26 +1,25 @@ from PIL import Image import numpy as np -from modules import scripts_postprocessing, gfpgan_model +from modules import scripts_postprocessing, gfpgan_model, ui_components import gradio as gr -from modules.ui_components import FormRow - class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing): name = "GFPGAN" order = 2000 def ui(self): - with FormRow(): - gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility") + with ui_components.InputAccordion(False, label="GFPGAN") as enable: + gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_gfpgan_visibility") return { + "enable": enable, "gfpgan_visibility": gfpgan_visibility, } - def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility): - if gfpgan_visibility == 0: + def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, gfpgan_visibility): + if gfpgan_visibility == 0 or not enable: return restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8)) diff --git a/scripts/postprocessing_split_oversized.py b/scripts/postprocessing_split_oversized.py new file mode 100644 index 00000000..c4a03160 --- /dev/null +++ b/scripts/postprocessing_split_oversized.py @@ -0,0 +1,71 @@ +import math + +from modules import scripts_postprocessing, ui_components +import gradio as gr + + +def split_pic(image, inverse_xy, width, height, overlap_ratio): + if inverse_xy: + from_w, from_h = image.height, image.width + to_w, to_h = height, width + else: + from_w, from_h = image.width, image.height + to_w, to_h = width, height + h = from_h * to_w // from_w + if inverse_xy: + image = image.resize((h, to_w)) + else: + image = image.resize((to_w, h)) + + split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio))) + y_step = (h - to_h) / (split_count - 1) + for i in range(split_count): + y = int(y_step * i) + if inverse_xy: + splitted = image.crop((y, 0, y + to_h, to_w)) + else: + splitted = image.crop((0, y, to_w, y + to_h)) + yield splitted + + +class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostprocessing): + name = "Split oversized images" + order = 4000 + + def ui(self): + with ui_components.InputAccordion(False, label="Split oversized images") as enable: + with gr.Row(): + split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold") + overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio") + + return { + "enable": enable, + "split_threshold": split_threshold, + "overlap_ratio": overlap_ratio, + } + + def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, split_threshold, overlap_ratio): + if not enable: + return + + width = pp.shared.target_width + height = pp.shared.target_height + + if not width or not height: + return + + if pp.image.height > pp.image.width: + ratio = (pp.image.width * height) / (pp.image.height * width) + inverse_xy = False + else: + ratio = (pp.image.height * width) / (pp.image.width * height) + inverse_xy = True + + if ratio >= 1.0 and ratio > split_threshold: + return + + result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio) + + pp.image = result + pp.extra_images = [pp.create_copy(x) for x in others] + diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index eb42a29e..ed709688 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -81,6 +81,14 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): return image + def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): + if upscale_mode == 1: + pp.shared.target_width = upscale_to_width + pp.shared.target_height = upscale_to_height + else: + pp.shared.target_width = int(pp.image.width * upscale_by) + pp.shared.target_height = int(pp.image.height * upscale_by) + def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): if upscaler_1_name == "None": upscaler_1_name = None @@ -126,6 +134,10 @@ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale): "upscaler_name": upscaler_name, } + def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None): + pp.shared.target_width = int(pp.image.width * upscale_by) + pp.shared.target_height = int(pp.image.height * upscale_by) + def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None): if upscaler_name is None or upscaler_name == "None": return diff --git a/scripts/processing_autosized_crop.py b/scripts/processing_autosized_crop.py new file mode 100644 index 00000000..c0980226 --- /dev/null +++ b/scripts/processing_autosized_crop.py @@ -0,0 +1,64 @@ +from PIL import Image + +from modules import scripts_postprocessing, ui_components +import gradio as gr + + +def center_crop(image: Image, w: int, h: int): + iw, ih = image.size + if ih / h < iw / w: + sw = w * ih / h + box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih + else: + sh = h * iw / w + box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2 + return image.resize((w, h), Image.Resampling.LANCZOS, box) + + +def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold): + iw, ih = image.size + err = lambda w, h: 1 - (lambda x: x if x < 1 else 1 / x)(iw / ih / (w / h)) + wh = max(((w, h) for w in range(mindim, maxdim + 1, 64) for h in range(mindim, maxdim + 1, 64) + if minarea <= w * h <= maxarea and err(w, h) <= threshold), + key=lambda wh: (wh[0] * wh[1], -err(*wh))[::1 if objective == 'Maximize area' else -1], + default=None + ) + return wh and center_crop(image, *wh) + + +class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocessing): + name = "Auto-sized crop" + order = 4000 + + def ui(self): + with ui_components.InputAccordion(False, label="Auto-sized crop") as enable: + gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') + with gr.Row(): + mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim") + maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim") + with gr.Row(): + minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea") + maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea") + with gr.Row(): + objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective") + threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold") + + return { + "enable": enable, + "mindim": mindim, + "maxdim": maxdim, + "minarea": minarea, + "maxarea": maxarea, + "objective": objective, + "threshold": threshold, + } + + def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, mindim, maxdim, minarea, maxarea, objective, threshold): + if not enable: + return + + cropped = multicrop_pic(pp.image, mindim, maxdim, minarea, maxarea, objective, threshold) + if cropped is not None: + pp.image = cropped + else: + print(f"skipped {pp.image.width}x{pp.image.height} image (can't find suitable size within error threshold)") -- cgit v1.2.3 From a5f61aa8c5933d8e5a0e0aa841138eeaccd86d62 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Dec 2023 18:03:34 +0300 Subject: potential fix for #14172 --- modules/sd_hijack.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 3d340fc9..14fe62c7 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -38,6 +38,10 @@ ldm.models.diffusion.ddpm.print = shared.ldm_print optimizers = [] current_optimizer: sd_hijack_optimizations.SdOptimization = None +ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) +sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) + + def list_optimizers(): new_optimizers = script_callbacks.list_optimizers_callback() @@ -255,9 +259,6 @@ class StableDiffusionModelHijack: import modules.models.diffusion.ddpm_edit - ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) - sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) - if isinstance(m, ldm.models.diffusion.ddpm.LatentDiffusion): sd_unet.original_forward = ldm_original_forward elif isinstance(m, modules.models.diffusion.ddpm_edit.LatentDiffusion): @@ -303,11 +304,6 @@ class StableDiffusionModelHijack: self.layers = None self.clip = None - patches.undo(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward") - patches.undo(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward") - - sd_unet.original_forward = None - def apply_circular(self, enable): if self.circular_enabled == enable: -- cgit v1.2.3 From ac02216e540cd581f9169c6c791e55721e3117b0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Dec 2023 19:35:47 +0300 Subject: alternate implementation for unet forward replacement that does not depend on hijack being applied --- modules/sd_hijack.py | 7 +++++-- modules/sd_unet.py | 14 ++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 14fe62c7..e139d996 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -38,8 +38,11 @@ ldm.models.diffusion.ddpm.print = shared.ldm_print optimizers = [] current_optimizer: sd_hijack_optimizations.SdOptimization = None -ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) -sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward) +ldm_patched_forward = sd_unet.create_unet_forward(ldm.modules.diffusionmodules.openaimodel.UNetModel.forward) +ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", ldm_patched_forward) + +sgm_patched_forward = sd_unet.create_unet_forward(sgm.modules.diffusionmodules.openaimodel.UNetModel.forward) +sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sgm_patched_forward) def list_optimizers(): diff --git a/modules/sd_unet.py b/modules/sd_unet.py index 6a7bc9e2..a771849c 100644 --- a/modules/sd_unet.py +++ b/modules/sd_unet.py @@ -5,8 +5,7 @@ from modules import script_callbacks, shared, devices unet_options = [] current_unet_option = None current_unet = None -original_forward = None - +original_forward = None # not used, only left temporarily for compatibility def list_unets(): new_unets = script_callbacks.list_unets_callback() @@ -84,9 +83,12 @@ class SdUnet(torch.nn.Module): pass -def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs): - if current_unet is not None: - return current_unet.forward(x, timesteps, context, *args, **kwargs) +def create_unet_forward(original_forward): + def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs): + if current_unet is not None: + return current_unet.forward(x, timesteps, context, *args, **kwargs) + + return original_forward(self, x, timesteps, context, *args, **kwargs) - return original_forward(self, x, timesteps, context, *args, **kwargs) + return UNetModel_forward -- cgit v1.2.3 From 83e8c322762c545fd589c060811379582926060f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 2 Dec 2023 13:30:53 -0500 Subject: Fix `save_samples` being checked early when saving masked composite --- modules/processing.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 5ab6ddde..4f265801 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -938,14 +938,14 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.enable_pnginfo: image.info["parameters"] = text output_images.append(image) - if save_samples and hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): + if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): image_mask = p.mask_for_overlay.convert('RGB') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') - if opts.save_mask: + if save_samples and opts.save_mask: images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask") - if opts.save_mask_composite: + if save_samples and opts.save_mask_composite: images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite") if opts.return_mask: -- cgit v1.2.3 From 9528d66c9479d02c83b8db6107f6b0cb741612dc Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 2 Dec 2023 14:56:26 -0500 Subject: Re-add setting lost as part of e294e46 --- modules/shared_options.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared_options.py b/modules/shared_options.py index 859dee40..e5de0d01 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -255,6 +255,7 @@ options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "keyedit_precision_attention": OptionInfo(0.1, "Precision for (attention:1.1) when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Precision for when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Word delimiters when editing the prompt with Ctrl+up/down"), + "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), })) -- cgit v1.2.3 From d3fdc4af61b7560eede52290e1ede48185680089 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 3 Dec 2023 18:22:00 +0900 Subject: rework mask and mask_composite logic --- modules/processing.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 4f265801..6f01c95f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -938,21 +938,20 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.enable_pnginfo: image.info["parameters"] = text output_images.append(image) - if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): - image_mask = p.mask_for_overlay.convert('RGB') - image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') - - if save_samples and opts.save_mask: - images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask") - - if save_samples and opts.save_mask_composite: - images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite") - - if opts.return_mask: - output_images.append(image_mask) - - if opts.return_mask_composite: - output_images.append(image_mask_composite) + if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: + if opts.return_mask or opts.save_mask: + image_mask = p.mask_for_overlay.convert('RGB') + if save_samples and opts.save_mask: + images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask") + if opts.return_mask: + output_images.append(image_mask) + + if opts.return_mask_composite or opts.save_mask_composite: + image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') + if save_samples and opts.save_mask_composite: + images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite") + if opts.return_mask_composite: + output_images.append(image_mask_composite) del x_samples_ddim -- cgit v1.2.3 From d92ce145bba714c5b257b9853aa22681233651b8 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Sun, 3 Dec 2023 16:50:20 +0200 Subject: Add import_hook hack to work around basicsr incompatibility Fixes #13985 --- modules/import_hook.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/modules/import_hook.py b/modules/import_hook.py index 28c67dfa..eba9a372 100644 --- a/modules/import_hook.py +++ b/modules/import_hook.py @@ -3,3 +3,14 @@ import sys # this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it if "--xformers" not in "".join(sys.argv): sys.modules["xformers"] = None + +# Hack to fix a changed import in torchvision 0.17+, which otherwise breaks +# basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985 +try: + import torchvision.transforms.functional_tensor # noqa: F401 +except ImportError: + try: + import torchvision.transforms.functional as functional + sys.modules["torchvision.transforms.functional_tensor"] = functional + except ImportError: + pass # shrug... -- cgit v1.2.3 From 639ccf254bd4d072f33333abb1ada3d08aaab470 Mon Sep 17 00:00:00 2001 From: illtellyoulater <3078931+illtellyoulater@users.noreply.github.com> Date: Mon, 4 Dec 2023 02:35:35 +0000 Subject: Update launch_utils.py to fix wrong dep. checks and reinstalls Fixes failing dependency checks for extensions having a different package name and import name (for example ffmpeg-python / ffmpeg), which currently is causing the unneeded reinstall of packages at runtime. In fact with current code, the same string is used when installing a package and when checking for its presence, as you can see in the following example: > launch_utils.run_pip("install ffmpeg-python", "required package") [ Installing required package: "ffmpeg-python" ... ] [ Installed ] > launch_utils.is_installed("ffmpeg-python") False ... which would actually return true with: > launch_utils.is_installed("ffmpeg") True --- modules/launch_utils.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 6e54d063..6664c5e0 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -6,6 +6,7 @@ import os import shutil import sys import importlib.util +import importlib.metadata import platform import json from functools import lru_cache @@ -119,11 +120,16 @@ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_ def is_installed(package): try: - spec = importlib.util.find_spec(package) - except ModuleNotFoundError: - return False + dist = importlib.metadata.distribution(package) + except importlib.metadata.PackageNotFoundError: + try: + spec = importlib.util.find_spec(package) + except ModuleNotFoundError: + return False - return spec is not None + return spec is not None + + return dist is not None def repo_dir(name): -- cgit v1.2.3 From 06725af40b94a146c56e693a47cbec6d0af55396 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sun, 3 Dec 2023 21:26:12 -0700 Subject: Lint --- modules/launch_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 6664c5e0..e71edd01 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -120,12 +120,12 @@ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_ def is_installed(package): try: - dist = importlib.metadata.distribution(package) + dist = importlib.metadata.distribution(package) except importlib.metadata.PackageNotFoundError: - try: + try: spec = importlib.util.find_spec(package) except ModuleNotFoundError: - return False + return False return spec is not None -- cgit v1.2.3 From 9e1f3feb12a7cfe4fd426dd3df5431c805746ecc Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 4 Dec 2023 09:15:19 +0300 Subject: make webui not crash when running with --disable-all-extensions option --- modules/models/diffusion/ddpm_edit.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index b892d5fc..6db340da 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -24,10 +24,15 @@ from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL +from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler +try: + from ldm.models.autoencoder import VQModelInterface +except Exception: + class VQModelInterface: + pass __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', -- cgit v1.2.3 From 48fae7ccdc2fe2d2ba8e8cfcb17b56028734e570 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 4 Dec 2023 09:35:52 +0300 Subject: update changelog --- CHANGELOG.md | 162 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c72359f..67429bbf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,165 @@ +## 1.7.0 + +### Features: +* settings tab rework: add search field, add categories, split UI settings page into many +* add altdiffusion-m18 support ([#13364](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13364)) +* support inference with LyCORIS GLora networks ([#13610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13610)) +* add lora-embedding bundle system ([#13568](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13568)) +* option to move prompt from top row into generation parameters +* add support for SSD-1B ([#13865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13865)) +* support inference with OFT networks ([#13692](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13692)) +* script metadata and DAG sorting mechanism ([#13944](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13944)) +* support HyperTile optimization ([#13948](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13948)) +* add support for SD 2.1 Turbo ([#14170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14170)) +* remove Train->Preprocessing tab and put all its functionality into Extras tab +* initial IPEX support for Intel Arc GPU ([#14171](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14171)) + +### Minor: +* allow reading model hash from images in img2img batch mode ([#12767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12767)) +* add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818)) +* extra field for lora metadata viewer: `ss_output_name` ([#12838](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12838)) +* add action in settings page to calculate all SD checkpoint hashes ([#12909](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12909)) +* add button to copy prompt to style editor ([#12975](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12975)) +* add --skip-load-model-at-start option ([#13253](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13253)) +* write infotext to gif images +* read infotext from gif images ([#13068](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13068)) +* allow configuring the initial state of InputAccordion in ui-config.json ([#13189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13189)) +* allow editing whitespace delimiters for ctrl+up/ctrl+down prompt editing ([#13444](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13444)) +* prevent accidentally closing popup dialogs ([#13480](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13480)) +* added option to play notification sound or not ([#13631](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13631)) +* show the preview image in the full screen image viewer if available ([#13459](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13459)) +* support for webui.settings.bat ([#13638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13638)) +* add an option to not print stack traces on ctrl+c +* start/restart generation by Ctrl (Alt) + Enter ([#13644](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13644)) +* update prompts_from_file script to allow concatenating entries with the general prompt ([#13733](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13733)) +* added a visible checkbox to input accordion +* added an option to hide all txt2img/img2img parameters in an accordion ([#13826](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13826)) +* added 'Path' sorting option for Extra network cards ([#13968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13968)) +* enable prompt hotkeys in style editor ([#13931](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13931)) +* option to show batch img2img results in UI ([#14009](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14009)) +* infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page +* add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046)) +* support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126)) +* allow use of mutiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125)) + +### Extensions and API: +* update gradio to 3.41.2 +* support installed extensions list api ([#12774](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12774)) +* update pnginfo API to return dict with parsed values +* add noisy latent to `ExtraNoiseParams` for callback ([#12856](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12856)) +* show extension datetime in UTC ([#12864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12864), [#12865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12865), [#13281](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13281)) +* add an option to choose how to combine hires fix and refiner +* include program version in info response. ([#13135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13135)) +* sd_unet support for SDXL +* patch DDPM.register_betas so that users can put given_betas in model yaml ([#13276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13276)) +* xyz_grid: add prepare ([#13266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13266)) +* allow multiple localization files with same language in extensions ([#13077](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13077)) +* add onEdit function for js and rework token-counter.js to use it +* fix the key error exception when processing override_settings keys ([#13567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13567)) +* ability for extensions to return custom data via api in response.images ([#13463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13463)) +* call state.jobnext() before postproces*() ([#13762](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13762)) +* add option to set notification sound volume ([#13884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13884)) +* update Ruff to 0.1.6 ([#14059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14059)) +* add Block component creation callback ([#14119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14119)) +* catch uncaught exception with ui creation scripts ([#14120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14120)) +* use extension name for determining an extension is installed in the index ([#14063](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14063)) +* update is_installed() from launch_utils.py to fix reinstalling already installed packages ([#14192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14192)) + +### Bug Fixes: +* fix pix2pix producing bad results +* fix defaults settings page breaking when any of main UI tabs are hidden +* fix error that causes some extra networks to be disabled if both and are present in the prompt +* fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working +* prevent duplicate resize handler ([#12795](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12795)) +* small typo: vae resolve bug ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12797)) +* hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12792)) +* don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12780)) +* fix style editing dialog breaking if it's opened in both img2img and txt2img tabs +* hide --gradio-auth and --api-auth values from /internal/sysinfo report +* add missing infotext for RNG in options ([#12819](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12819)) +* fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834)) +* honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832)) +* don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12833), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855)) +* get progressbar to display correctly in extensions tab +* keep order in list of checkpoints when loading model that doesn't have a checksum +* fix inpainting models in txt2img creating black pictures +* fix generation params regex ([#12876](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12876)) +* fix batch img2img output dir with script ([#12926](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12926)) +* fix #13080 - Hypernetwork/TI preview generation ([#13084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13084)) +* fix bug with sigma min/max overrides. ([#12995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12995)) +* more accurate check for enabling cuDNN benchmark on 16XX cards ([#12924](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12924)) +* don't use multicond parser for negative prompt counter ([#13118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13118)) +* fix data-sort-name containing spaces ([#13412](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13412)) +* update card on correct tab when editing metadata ([#13411](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13411)) +* fix viewing/editing metadata when filename contains an apostrophe ([#13395](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13395)) +* fix: --sd_model in "Prompts from file or textbox" script is not working ([#13302](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13302)) +* better Support for Portable Git ([#13231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13231)) +* fix issues when webui_dir is not work_dir ([#13210](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13210)) +* fix: lora-bias-backup don't reset cache ([#13178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13178)) +* account for customizable extra network separators whyen removing extra network text from the prompt ([#12877](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12877)) +* re fix batch img2img output dir with script ([#13170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13170)) +* fix `--ckpt-dir` path separator and option use `short name` for checkpoint dropdown ([#13139](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13139)) +* consolidated allowed preview formats, Fix extra network `.gif` not woking as preview ([#13121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13121)) +* fix venv_dir=- environment variable not working as expected on linux ([#13469](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13469)) +* repair unload sd checkpoint button +* edit-attention fixes ([#13533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13533)) +* fix bug when using --gfpgan-models-path ([#13718](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13718)) +* properly apply sort order for extra network cards when selected from dropdown +* fixes generation restart not working for some users when 'Ctrl+Enter' is pressed ([#13962](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13962)) +* thread safe extra network list_items ([#13014](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13014)) +* fix not able to exit metadata popup when pop up is too big ([#14156](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14156)) +* fix auto focal point crop for opencv >= 4.8 ([#14121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14121)) +* make 'use-cpu all' actually apply to 'all' ([#14131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14131)) +* extras tab batch: actually use original filename +* make webui not crash when running with --disable-all-extensions option + +### Other: +* non-local condition ([#12814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12814)) +* fix minor typos ([#12827](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12827)) +* remove xformers Python version check ([#12842](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12842)) +* style: file-metadata word-break ([#12837](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12837)) +* revert SGM noise multiplier change for img2img because it breaks hires fix +* do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854)) +* [RC 1.6.0 - zoom is partly hidden] Update style.css ([#12839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12839)) +* chore: change extension time format ([#12851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12851)) +* WEBUI.SH - Use torch 2.1.0 release candidate for Navi 3 ([#12929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12929)) +* add Fallback at images.read_info_from_image if exif data was invalid ([#13028](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13028)) +* update cmd arg description ([#12986](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12986)) +* fix: update shared.opts.data when add_option ([#12957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12957), [#13213](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13213)) +* restore missing tooltips ([#12976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12976)) +* use default dropdown padding on mobile ([#12880](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12880)) +* put enable console prompts option into settings from commandline args ([#13119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13119)) +* fix some deprecated types ([#12846](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12846)) +* bump to torchsde==0.2.6 ([#13418](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13418)) +* update dragdrop.js ([#13372](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13372)) +* use orderdict as lru cache:opt/bug ([#13313](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13313)) +* XYZ if not include sub grids do not save sub grid ([#13282](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13282)) +* initialize state.time_start befroe state.job_count ([#13229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13229)) +* fix fieldname regex ([#13458](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13458)) +* change denoising_strength default to None. ([#13466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13466)) +* fix regression ([#13475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13475)) +* fix IndexError ([#13630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13630)) +* fix: checkpoints_loaded:{checkpoint:state_dict}, model.load_state_dict issue in dict value empty ([#13535](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13535)) +* update bug_report.yml ([#12991](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12991)) +* requirements_versions httpx==0.24.1 ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839)) +* fix parenthesis auto selection ([#13829](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13829)) +* fix #13796 ([#13797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13797)) +* corrected a typo in `modules/cmd_args.py` ([#13855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13855)) +* feat: fix randn found element of type float at pos 2 ([#14004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14004)) +* adds tqdm handler to logging_config.py for progress bar integration ([#13996](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13996)) +* hotfix: call shared.state.end() after postprocessing done ([#13977](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13977)) +* fix dependency address patch 1 ([#13929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13929)) +* save sysinfo as .json ([#14035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14035)) +* move exception_records related methods to errors.py ([#14084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14084)) +* compatibility ([#13936](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13936)) +* json.dump(ensure_ascii=False) ([#14108](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14108)) +* dir buttons start with / so only the correct dir will be shown and no… ([#13957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13957)) +* alternate implementation for unet forward replacement that does not depend on hijack being applied +* re-add `keyedit_delimiters_whitespace` setting lost as part of commit e294e46 ([#14178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14178)) +* fix `save_samples` being checked early when saving masked composite ([#14177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14177)) +* slight optimization for mask and mask_composite ([#14181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14181)) +* add import_hook hack to work around basicsr/torchvision incompatibility ([#14186](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14186)) + ## 1.6.1 ### Bug Fixes: -- cgit v1.2.3 From 24dae9bc4cc03a30236957d9c35d37aed79f6f5d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 4 Dec 2023 12:36:41 +0300 Subject: repair old handler for postprocessing API --- modules/postprocessing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 3c85a74c..d166f859 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -153,4 +153,4 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ }, }) - return run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output=save_output) + return run_postprocessing("", extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output=save_output) -- cgit v1.2.3 From 81105ee0135f1c475920bf44d3a04fc181aed29e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 4 Dec 2023 13:11:00 +0300 Subject: repair old handler for postprocessing API in a way that doesn't break interface --- modules/postprocessing.py | 8 ++++++-- modules/ui_postprocessing.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index d166f859..0c59fad4 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -6,7 +6,7 @@ from modules import shared, images, devices, scripts, scripts_postprocessing, ui from modules.shared import opts -def run_postprocessing(id_task, extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): +def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): devices.torch_gc() shared.state.begin(job="extras") @@ -128,6 +128,10 @@ def run_postprocessing(id_task, extras_mode, image, image_folder, input_dir, out return outputs, ui_common.plaintext_to_html(infotext), '' +def run_postprocessing_webui(id_task, *args, **kwargs): + return run_postprocessing(*args, **kwargs) + + def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True): """old handler for API""" @@ -153,4 +157,4 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ }, }) - return run_postprocessing("", extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output=save_output) + return run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output=save_output) diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index fbad0800..13d888e4 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -35,7 +35,7 @@ def create_ui(): tab_batch_dir.select(fn=lambda: 2, inputs=[], outputs=[tab_index]) submit.click( - fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing, extra_outputs=[None, '']), + fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing_webui, extra_outputs=[None, '']), _js="submit_extras", inputs=[ dummy_component, -- cgit v1.2.3 From 368d66c9ccca0270cc64d6a64d22bfa562f28361 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 4 Dec 2023 15:56:03 +0300 Subject: add hypertile infotext --- .../hypertile/scripts/hypertile_script.py | 53 +++++++++++++++++----- 1 file changed, 42 insertions(+), 11 deletions(-) diff --git a/extensions-builtin/hypertile/scripts/hypertile_script.py b/extensions-builtin/hypertile/scripts/hypertile_script.py index d3ab6091..395d584b 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_script.py +++ b/extensions-builtin/hypertile/scripts/hypertile_script.py @@ -17,11 +17,42 @@ class ScriptHypertile(scripts.Script): configure_hypertile(p.width, p.height, enable_unet=shared.opts.hypertile_enable_unet) + self.add_infotext(p) + def before_hr(self, p, *args): + + enable = shared.opts.hypertile_enable_unet_secondpass or shared.opts.hypertile_enable_unet + # exclusive hypertile seed for the second pass - if not shared.opts.hypertile_enable_unet: + if enable: hypertile.set_hypertile_seed(p.all_seeds[0]) - configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=shared.opts.hypertile_enable_unet_secondpass) + + configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=enable) + + if enable and not shared.opts.hypertile_enable_unet: + p.extra_generation_params["Hypertile U-Net second pass"] = True + + self.add_infotext(p, add_unet_params=True) + + def add_infotext(self, p, add_unet_params=False): + def option(name): + value = getattr(shared.opts, name) + default_value = shared.opts.get_default(name) + return None if value == default_value else value + + if shared.opts.hypertile_enable_unet: + p.extra_generation_params["Hypertile U-Net"] = True + + if shared.opts.hypertile_enable_unet or add_unet_params: + p.extra_generation_params["Hypertile U-Net max depth"] = option('hypertile_max_depth_unet') + p.extra_generation_params["Hypertile U-Net max tile size"] = option('hypertile_max_tile_unet') + p.extra_generation_params["Hypertile U-Net swap size"] = option('hypertile_swap_size_unet') + + if shared.opts.hypertile_enable_vae: + p.extra_generation_params["Hypertile VAE"] = True + p.extra_generation_params["Hypertile VAE max depth"] = option('hypertile_max_depth_vae') + p.extra_generation_params["Hypertile VAE max tile size"] = option('hypertile_max_tile_vae') + p.extra_generation_params["Hypertile VAE swap size"] = option('hypertile_swap_size_vae') def configure_hypertile(width, height, enable_unet=True): @@ -57,16 +88,16 @@ def on_ui_settings(): benefit. """), - "hypertile_enable_unet": shared.OptionInfo(False, "Enable Hypertile U-Net").info("noticeable change in details of the generated picture; if enabled, overrides the setting below"), - "hypertile_enable_unet_secondpass": shared.OptionInfo(False, "Enable Hypertile U-Net for hires fix second pass"), - "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}), - "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), - "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}), + "hypertile_enable_unet": shared.OptionInfo(False, "Enable Hypertile U-Net", infotext="Hypertile U-Net").info("enables hypertile for all modes, including hires fix second pass; noticeable change in details of the generated picture"), + "hypertile_enable_unet_secondpass": shared.OptionInfo(False, "Enable Hypertile U-Net for hires fix second pass", infotext="Hypertile U-Net second pass").info("enables hypertile just for hires fix second pass - regardless of whether the above setting is enabled"), + "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"), + "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"), + "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"), - "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE").info("minimal change in the generated picture"), - "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}), - "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), - "hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}), + "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"), + "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"), + "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"), + "hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile VAE swap size"), } for name, opt in options.items(): -- cgit v1.2.3 From 120a84bd2f01ec4489bd12bd68f319798ef30782 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 5 Dec 2023 07:15:39 +0300 Subject: Merge pull request #14203 from AUTOMATIC1111/remove-clean_text() remove clean_text() --- modules/styles.py | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index 4d218cd7..7fb6c2e1 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -2,7 +2,6 @@ import csv import fnmatch import os import os.path -import re import typing import shutil @@ -14,22 +13,6 @@ class PromptStyle(typing.NamedTuple): path: str = None -def clean_text(text: str) -> str: - """ - Iterating through a list of regular expressions and replacement strings, we - clean up the prompt and style text to make it easier to match against each - other. - """ - re_list = [ - ("multiple commas", re.compile("(,+\s+)+,?"), ", "), - ("multiple spaces", re.compile("\s{2,}"), " "), - ] - for _, regex, replace in re_list: - text = regex.sub(replace, text) - - return text.strip(", ") - - def merge_prompts(style_prompt: str, prompt: str) -> str: if "{prompt}" in style_prompt: res = style_prompt.replace("{prompt}", prompt) @@ -44,7 +27,7 @@ def apply_styles_to_prompt(prompt, styles): for style in styles: prompt = merge_prompts(style, prompt) - return clean_text(prompt) + return prompt def unwrap_style_text_from_prompt(style_text, prompt): @@ -56,8 +39,8 @@ def unwrap_style_text_from_prompt(style_text, prompt): Note that the "cleaned" version of the style text is only used for matching purposes here. It isn't returned; the original style text is not modified. """ - stripped_prompt = clean_text(prompt) - stripped_style_text = clean_text(style_text) + stripped_prompt = prompt + stripped_style_text = style_text if "{prompt}" in stripped_style_text: # Work out whether the prompt is wrapped in the style text. If so, we # return True and the "inner" prompt text that isn't part of the style. -- cgit v1.2.3 From 6ef0ff39f2a35a02e5380e522e1dff3eafd7ccfc Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 09:39:57 +0300 Subject: Merge pull request #14300 from AUTOMATIC1111/oft_fixes Fix wrong implementation in network_oft --- extensions-builtin/Lora/network_oft.py | 37 ++++++++++------------------------ 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 05c37811..fa647020 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -21,6 +21,8 @@ class NetworkModuleOFT(network.NetworkModule): self.lin_module = None self.org_module: list[torch.Module] = [self.sd_module] + self.scale = 1.0 + # kohya-ss if "oft_blocks" in weights.w.keys(): self.is_kohya = True @@ -53,12 +55,18 @@ class NetworkModuleOFT(network.NetworkModule): self.constraint = None self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) - def calc_updown_kb(self, orig_weight, multiplier): + def calc_updown(self, orig_weight): oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - oft_blocks = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix + eye = torch.eye(self.block_size, device=self.oft_blocks.device) + + if self.is_kohya: + block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix + norm_Q = torch.norm(block_Q.flatten()) + new_norm_Q = torch.clamp(norm_Q, max=self.constraint) + block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) + oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse()) R = oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - R = R * multiplier + torch.eye(self.block_size, device=orig_weight.device) # This errors out for MultiheadAttention, might need to be handled up-stream merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) @@ -72,26 +80,3 @@ class NetworkModuleOFT(network.NetworkModule): updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight output_shape = orig_weight.shape return self.finalize_updown(updown, orig_weight, output_shape) - - def calc_updown(self, orig_weight): - # if alpha is a very small number as in coft, calc_scale() will return a almost zero number so we ignore it - multiplier = self.multiplier() - return self.calc_updown_kb(orig_weight, multiplier) - - # override to remove the multiplier/scale factor; it's already multiplied in get_weight - def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): - if self.bias is not None: - updown = updown.reshape(self.bias.shape) - updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) - updown = updown.reshape(output_shape) - - if len(output_shape) == 4: - updown = updown.reshape(output_shape) - - if orig_weight.size().numel() == updown.size().numel(): - updown = updown.reshape(orig_weight.shape) - - if ex_bias is not None: - ex_bias = ex_bias * self.multiplier() - - return updown, ex_bias -- cgit v1.2.3 From c7cd9b441d9061f33b7b88be519fb4c6e5b8bc1e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 09:41:18 +0300 Subject: Merge pull request #14296 from akx/paste-resolution Allow pasting in WIDTHxHEIGHT strings into the width/height fields --- javascript/ui.js | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/javascript/ui.js b/javascript/ui.js index 410fc44e..18c9f891 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -215,9 +215,33 @@ function restoreProgressImg2img() { } +/** + * Configure the width and height elements on `tabname` to accept + * pasting of resolutions in the form of "width x height". + */ +function setupResolutionPasting(tabname) { + var width = gradioApp().querySelector(`#${tabname}_width input[type=number]`); + var height = gradioApp().querySelector(`#${tabname}_height input[type=number]`); + for (const el of [width, height]) { + el.addEventListener('paste', function(event) { + var pasteData = event.clipboardData.getData('text/plain'); + var parsed = pasteData.match(/^\s*(\d+)\D+(\d+)\s*$/); + if (parsed) { + width.value = parsed[1]; + height.value = parsed[2]; + updateInput(width); + updateInput(height); + event.preventDefault(); + } + }); + } +} + onUiLoaded(function() { showRestoreProgressButton('txt2img', localGet("txt2img_task_id")); showRestoreProgressButton('img2img', localGet("img2img_task_id")); + setupResolutionPasting('txt2img'); + setupResolutionPasting('img2img'); }); -- cgit v1.2.3 From b55f09c4e13c082590bc64cd792a0b7bd46c1c0d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 09:46:05 +0300 Subject: Merge pull request #14270 from kaalibro/extra-options-elem-id Assign id for "extra_options". Replace numeric field with slider. --- .../extra-options-section/scripts/extra_options_section.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index a903df62..ac2c3de4 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -23,11 +23,12 @@ class ExtraOptionsSection(scripts.Script): self.setting_names = [] self.infotext_fields = [] extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img + elem_id_tabname = "extra_options_" + ("img2img" if is_img2img else "txt2img") mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping} with gr.Blocks() as interface: - with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and extra_options else gr.Group(): + with gr.Accordion("Options", open=False, elem_id=elem_id_tabname) if shared.opts.extra_options_accordion and extra_options else gr.Group(elem_id=elem_id_tabname): row_count = math.ceil(len(extra_options) / shared.opts.extra_options_cols) @@ -70,7 +71,7 @@ This page allows you to add some settings to the main interface of txt2img and i """), "extra_options_txt2img": shared.OptionInfo([], "Settings for txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(), "extra_options_img2img": shared.OptionInfo([], "Settings for img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(), - "extra_options_cols": shared.OptionInfo(1, "Number of columns for added settings", gr.Number, {"precision": 0}).needs_reload_ui(), + "extra_options_cols": shared.OptionInfo(1, "Number of columns for added settings", gr.Slider, {"step": 1, "minimum": 1, "maximum": 20}).info("displayed amount will depend on the actual browser window width").needs_reload_ui(), "extra_options_accordion": shared.OptionInfo(False, "Place added settings into an accordion").needs_reload_ui() })) -- cgit v1.2.3 From 888b928f0da7da4a2dfa4519e95ac17a3e5562f7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 09:48:14 +0300 Subject: Merge pull request #14276 from AUTOMATIC1111/fix-styles Fix styles --- modules/styles.py | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index 7fb6c2e1..81d9800d 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -98,10 +98,8 @@ class StyleDatabase: self.path = path folder, file = os.path.split(self.path) - self.default_file = file.split("*")[0] + ".csv" - if self.default_file == ".csv": - self.default_file = "styles.csv" - self.default_path = os.path.join(folder, self.default_file) + filename, _, ext = file.partition('*') + self.default_path = os.path.join(folder, filename + ext) self.prompt_fields = [field for field in PromptStyle._fields if field != "path"] @@ -155,10 +153,8 @@ class StyleDatabase: row["name"], prompt, negative_prompt, path ) - def get_style_paths(self) -> list(): - """ - Returns a list of all distinct paths, including the default path, of - files that styles are loaded from.""" + def get_style_paths(self) -> set: + """Returns a set of all distinct paths of files that styles are loaded from.""" # Update any styles without a path to the default path for style in list(self.styles.values()): if not style.path: @@ -172,9 +168,9 @@ class StyleDatabase: style_paths.add(style.path) # Remove any paths for styles that are just list dividers - style_paths.remove("do_not_save") + style_paths.discard("do_not_save") - return list(style_paths) + return style_paths def get_style_prompts(self, styles): return [self.styles.get(x, self.no_style).prompt for x in styles] @@ -196,20 +192,7 @@ class StyleDatabase: # The path argument is deprecated, but kept for backwards compatibility _ = path - # Update any styles without a path to the default path - for style in list(self.styles.values()): - if not style.path: - self.styles[style.name] = style._replace(path=self.default_path) - - # Create a list of all distinct paths, including the default path - style_paths = set() - style_paths.add(self.default_path) - for _, style in self.styles.items(): - if style.path: - style_paths.add(style.path) - - # Remove any paths for styles that are just list dividers - style_paths.remove("do_not_save") + style_paths = self.get_style_paths() csv_names = [os.path.split(path)[1].lower() for path in style_paths] -- cgit v1.2.3 From 5cb1ce470df8332872af3dfa1067b761062d4608 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 09:48:36 +0300 Subject: Merge pull request #14266 from kaalibro/dev Re-add setting lost as part of e294e46 --- modules/shared_options.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared_options.py b/modules/shared_options.py index e5de0d01..acb6e2d4 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -256,6 +256,7 @@ options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "keyedit_precision_extra": OptionInfo(0.05, "Precision for when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Word delimiters when editing the prompt with Ctrl+up/down"), "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), + "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), })) -- cgit v1.2.3 From b7e0d4a7e171ee1cef73684b8423fe4a20ca7e34 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 09:52:23 +0300 Subject: Merge pull request #14229 from Nuullll/ipex-embedding [IPEX] Fix embedding and ControlNet --- modules/xpu_specific.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/xpu_specific.py b/modules/xpu_specific.py index d933c790..d8da94a0 100644 --- a/modules/xpu_specific.py +++ b/modules/xpu_specific.py @@ -48,3 +48,12 @@ if has_xpu: CondFunc('torch.nn.modules.conv.Conv2d.forward', lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), lambda orig_func, self, input: input.dtype != self.weight.data.dtype) + CondFunc('torch.bmm', + lambda orig_func, input, mat2, out=None: orig_func(input.to(mat2.dtype), mat2, out=out), + lambda orig_func, input, mat2, out=None: input.dtype != mat2.dtype) + CondFunc('torch.cat', + lambda orig_func, tensors, dim=0, out=None: orig_func([t.to(tensors[0].dtype) for t in tensors], dim=dim, out=out), + lambda orig_func, tensors, dim=0, out=None: not all(t.dtype == tensors[0].dtype for t in tensors)) + CondFunc('torch.nn.functional.scaled_dot_product_attention', + lambda orig_func, query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False: orig_func(query, key.to(query.dtype), value.to(query.dtype), attn_mask, dropout_p, is_causal), + lambda orig_func, query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False: query.dtype != key.dtype or query.dtype != value.dtype) -- cgit v1.2.3 From f8871dedcfe3a67689ef333aea2fdf05a9aaffa2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 09:59:48 +0300 Subject: Merge pull request #14230 from AUTOMATIC1111/add-option-Live-preview-in-full-page-image-viewer add option: Live preview in full page image viewer --- javascript/imageviewer.js | 2 +- modules/shared_options.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index e4dae91b..625c5d14 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -34,7 +34,7 @@ function updateOnBackgroundChange() { if (modalImage && modalImage.offsetParent) { let currentButton = selected_gallery_button(); let preview = gradioApp().querySelectorAll('.livePreview > img'); - if (preview.length > 0) { + if (opts.js_live_preview_in_modal_lightbox && preview.length > 0) { // show preview image if available modalImage.src = preview[preview.length - 1].src; } else if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) { diff --git a/modules/shared_options.py b/modules/shared_options.py index acb6e2d4..41097d8e 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -331,6 +331,7 @@ options_templates.update(options_section(('ui', "Live previews", "ui"), { "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), "live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"), + "js_live_preview_in_modal_lightbox": OptionInfo(True, "Show Live preview in full page image viewer"), })) options_templates.update(options_section(('sampler-params', "Sampler parameters", "sd"), { -- cgit v1.2.3 From eb52c803b849cdd1fc137db4568eca5bb8373f58 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 10:03:14 +0300 Subject: Merge pull request #14216 from wfjsw/state-dict-ref-comparison change state dict comparison to ref compare --- modules/sd_disable_initialization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py index 8863107a..273a7edd 100644 --- a/modules/sd_disable_initialization.py +++ b/modules/sd_disable_initialization.py @@ -215,7 +215,7 @@ class LoadStateDictOnMeta(ReplaceHelper): would be on the meta device. """ - if state_dict == sd: + if state_dict is sd: state_dict = {k: v.to(device="meta", dtype=v.dtype) for k, v in state_dict.items()} original(module, state_dict, strict=strict) -- cgit v1.2.3 From 2be85f8fe03533bf3b1ad562ea58ee8227ba3b99 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 14 Dec 2023 10:08:03 +0300 Subject: Merge pull request #14237 from ReneKroon/dev #13354 : solve lora loading issue --- extensions-builtin/Lora/networks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 7f814706..629bf853 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -159,7 +159,8 @@ def load_network(name, network_on_disk): bundle_embeddings = {} for key_network, weight in sd.items(): - key_network_without_network_parts, network_part = key_network.split(".", 1) + key_network_without_network_parts, _, network_part = key_network.partition(".") + if key_network_without_network_parts == "bundle_emb": emb_name, vec_name = network_part.split(".", 1) emb_dict = bundle_embeddings.get(emb_name, {}) -- cgit v1.2.3 From 0dfffe53ec11b2ee097d55efc479f8e707015db9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 16 Dec 2023 09:25:08 +0300 Subject: Merge pull request #14307 from AUTOMATIC1111/default-Falst-js_live_preview_in_modal_lightbox default False js_live_preview_in_modal_lightbox --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 41097d8e..d2e86ff1 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -331,7 +331,7 @@ options_templates.update(options_section(('ui', "Live previews", "ui"), { "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), "live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"), - "js_live_preview_in_modal_lightbox": OptionInfo(True, "Show Live preview in full page image viewer"), + "js_live_preview_in_modal_lightbox": OptionInfo(False, "Show Live preview in full page image viewer"), })) options_templates.update(options_section(('sampler-params', "Sampler parameters", "sd"), { -- cgit v1.2.3