From dc25a31d1a3816a7fb0cd5ef186559b3c085db43 Mon Sep 17 00:00:00 2001 From: Ju1-js <40339350+Ju1-js@users.noreply.github.com> Date: Fri, 27 Jan 2023 22:43:10 -0800 Subject: Gradio Auth Read from External File Usage: `--gradio-auth-path {PATH}` It adds the credentials to the already existing `--gradio-auth` credentials. It can also handle line breaks. The file should look like: `{u1}:{p1},{u2}:{p2}` or ``` {u1}:{p1}, {u2}:{p2} ``` Will gradio handle duplicate credentials if it happens? --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 474fcc42..36e9762f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -81,6 +81,7 @@ parser.add_argument("--freeze-settings", action='store_true', help="disable edit parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json')) parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option") parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) +parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None) parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything') parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything") parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") -- cgit v1.2.3 From 6d11cda4188633ba19f4d8948139e510d5678059 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 5 Feb 2023 23:12:42 +0900 Subject: configurable image downscale allowing the user to configure the image downscale parameters in setting --- modules/images.py | 4 ++-- modules/shared.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index c2ca8849..cf4aea22 100644 --- a/modules/images.py +++ b/modules/images.py @@ -575,9 +575,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image.already_saved_as = fullfn - target_side_length = 4000 + target_side_length = int(opts.target_side_length) oversize = image.width > target_side_length or image.height > target_side_length - if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024): + if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024): ratio = image.width / image.height if oversize and ratio > 1: diff --git a/modules/shared.py b/modules/shared.py index 79fbf724..a93a0299 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -325,7 +325,9 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"), + "export_for_4chan": OptionInfo(True, "If PNG image is larger than Downscale threshold or any dimension is larger than Target length, downscale the image to dimensions and save a copy as JPG"), + "img_downscale_threshold": OptionInfo(4, "Downscale threshold (MB)"), + "target_side_length": OptionInfo(4000, "Target length"), "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), -- cgit v1.2.3 From fe33be6cac140ff83b481029106968f39209cd90 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 5 Feb 2023 23:33:05 +0900 Subject: use Default if ValueError --- modules/images.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index cf4aea22..34d08b73 100644 --- a/modules/images.py +++ b/modules/images.py @@ -575,9 +575,17 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image.already_saved_as = fullfn - target_side_length = int(opts.target_side_length) + try: + target_side_length = int(opts.target_side_length) + except ValueError: + target_side_length = 4000 + try: + img_downscale_threshold = float(opts.img_downscale_threshold) + except ValueError: + img_downscale_threshold = 4 + oversize = image.width > target_side_length or image.height > target_side_length - if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024): + if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > img_downscale_threshold * 1024 * 1024): ratio = image.width / image.height if oversize and ratio > 1: -- cgit v1.2.3 From 584f782391b42c182385a56ae46d1674649713e6 Mon Sep 17 00:00:00 2001 From: CurtisDS <20732674+CurtisDS@users.noreply.github.com> Date: Sun, 5 Feb 2023 16:42:45 -0500 Subject: Update ui_extra_networks.py update the string used to build the ID handle to replace spaces with underscore --- modules/ui_extra_networks.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 90abec0a..fb7f2d6c 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -94,11 +94,13 @@ class ExtraNetworksPage: dirs = "".join([f"
  • {x}
  • " for x in self.allowed_directories_for_previews()]) items_html = shared.html("extra-networks-no-cards.html").format(dirs=dirs) + self_name_id = self.name.replace(" ", "_") + res = f""" -
    +
    {subdirs_html}
    -
    +
    {items_html}
    """ -- cgit v1.2.3 From 9a22c63f47e895cfa17704ae18dd62fd3a831e9f Mon Sep 17 00:00:00 2001 From: EllangoK Date: Mon, 6 Feb 2023 00:52:31 -0500 Subject: call modules.sd_vae.refresh_vae_list() --- modules/shared_items.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared_items.py b/modules/shared_items.py index 8b5ec96d..b72b2bae 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -20,4 +20,4 @@ def sd_vae_items(): def refresh_vae_list(): import modules.sd_vae - return modules.sd_vae.refresh_vae_list + return modules.sd_vae.refresh_vae_list() -- cgit v1.2.3 From 5d483bf307c766aee97caec857d414946fad47db Mon Sep 17 00:00:00 2001 From: Gerschel Date: Mon, 6 Feb 2023 08:18:04 -0800 Subject: aspect ratio for dim's; sliders adjust by ratio MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Default choices added to settings in user interface section Choices are editable by user User selects from dropdown. When you move one slider, the other adjusts according to the ratio chosen. Vice versa for the other slider. Number fields for changes work as well. For disabling ratio, an unlock pad "🔓" is available as a default. This string can be changed to anything to serve as a disable, as long as there is no colon ":". Ratios are entered in this format, floats or ints with a colon "1:1". The string is split at the colon, parses left and right as floats to perform the math. --- modules/shared.py | 14 ++++++++++++++ modules/ui.py | 6 ++++++ 2 files changed, 20 insertions(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 79fbf724..ead7be36 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -139,6 +139,19 @@ ui_reorder_categories = [ "scripts", ] +aspect_ratio_defaults = [ + "🔓" + "1:1", + "1:2", + "2:1", + "2:3", + "3:2", + "4:3", + "5:4", + "9:16", + "16:9", +] + cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -456,6 +469,7 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), + "aspect_ratios": OptionInfo(", ".join(aspect_ratio_defaults), "txt2img/img2img aspect ratios"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) diff --git a/modules/ui.py b/modules/ui.py index f5df1ffe..6853485c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -424,6 +424,10 @@ def ordered_ui_categories(): yield category +def aspect_ratio_list(): + return [ratio.strip() for ratio in shared.opts.aspect_ratios.split(",")] + + def get_value_for_setting(key): value = getattr(opts, key) @@ -480,6 +484,7 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") + aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="txt2img_ratio", show_label=False, label="Aspect Ratio") if opts.dimensions_and_batch_together: with gr.Column(elem_id="txt2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") @@ -758,6 +763,7 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="img2img_ratio", show_label=False, label="Aspect Ratio") if opts.dimensions_and_batch_together: with gr.Column(elem_id="img2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") -- cgit v1.2.3 From 4738486d8f528a98a525970ac06a109431fd7344 Mon Sep 17 00:00:00 2001 From: brkirch Date: Mon, 6 Feb 2023 18:10:55 -0500 Subject: Support for hypernetworks with --upcast-sampling --- modules/hypernetworks/hypernetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 825a93b2..a15bae18 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -380,8 +380,8 @@ def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): layer.hyper_k = hypernetwork_layers[0] layer.hyper_v = hypernetwork_layers[1] - context_k = hypernetwork_layers[0](context_k) - context_v = hypernetwork_layers[1](context_v) + context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k))) + context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v))) return context_k, context_v -- cgit v1.2.3 From 2016733814433ca2b69d10764bfa0ab4c7088782 Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 7 Feb 2023 00:05:54 -0500 Subject: Apply hijacks in ddpm_edit for upcast sampling To avoid import errors, ddpm_edit hijacks are done after an instruct pix2pix model is loaded. --- modules/sd_hijack.py | 3 +++ modules/sd_hijack_unet.py | 11 +++++++++++ 2 files changed, 14 insertions(+) (limited to 'modules') diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 8fdc5990..fca418cd 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -104,6 +104,9 @@ class StableDiffusionModelHijack: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + if m.cond_stage_key == "edit": + sd_hijack_unet.hijack_ddpm_edit() + self.optimization_method = apply_optimizations() self.clip = m.cond_stage_model diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 45cf2b18..843ab66c 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -44,6 +44,7 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs): with devices.autocast(): return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float() + class GELUHijack(torch.nn.GELU, torch.nn.Module): def __init__(self, *args, **kwargs): torch.nn.GELU.__init__(self, *args, **kwargs) @@ -53,6 +54,16 @@ class GELUHijack(torch.nn.GELU, torch.nn.Module): else: return torch.nn.GELU.forward(self, x) + +ddpm_edit_hijack = None +def hijack_ddpm_edit(): + global ddpm_edit_hijack + if not ddpm_edit_hijack: + CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond) + CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond) + ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) + + unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) -- cgit v1.2.3 From 4c562a9832343f21ce86410aea6b26904cb13b2c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 8 Feb 2023 07:03:36 -0500 Subject: convert rgba to rgb some image format (e.g. jpg) do not support rgba --- modules/img2img.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index bcc158dc..c973b770 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -73,6 +73,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): if not save_normally: os.makedirs(output_dir, exist_ok=True) + if processed_image.mode == 'RGBA': + processed_image = processed_image.convert("RGB") processed_image.save(os.path.join(output_dir, filename)) -- cgit v1.2.3 From 3ee9ca5cb06bcb36a633aea0dbbc6fd2478921df Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 8 Feb 2023 07:08:09 -0500 Subject: add missing import used later in line 418 --- modules/esrgan_model_arch.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index bc9ceb2a..1b52b0f5 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -1,5 +1,6 @@ # this file is adapted from https://github.com/victorca25/iNNfer +from collections import OrderedDict import math import functools import torch -- cgit v1.2.3 From 3ca41dbded119509bb66d7952d403091a5543257 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 8 Feb 2023 07:10:13 -0500 Subject: add missing import used later in line 70 --- modules/sd_hijack_inpainting.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 478cd499..55a2ce4d 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -11,6 +11,7 @@ import ldm.models.diffusion.plms from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like +from ldm.models.diffusion.sampling_util import norm_thresholding @torch.no_grad() -- cgit v1.2.3 From 374fe636b80169c78b4b5f92013681d75fa2fad6 Mon Sep 17 00:00:00 2001 From: Gerschel Date: Wed, 8 Feb 2023 18:57:32 -0800 Subject: Squashed commit of the following: commit b030b67ad005bfe29bcda692238a00042dcae816 Author: Gerschel Date: Wed Feb 8 16:38:56 2023 -0800 styling adjustements commit 80a2acb0230dd77489b0eb466f2efe827a053f6d Author: Gerschel Date: Wed Feb 8 10:49:47 2023 -0800 badge indicator toggles visibility by selection commit 898922e025a6422ac947fb45c1fa4f1109882f0a Merge: 745382a0 31bbfa72 Author: Gerschel <9631031+Gerschel@users.noreply.github.com> Date: Wed Feb 8 08:35:26 2023 -0800 Merge pull request #1 from w-e-w/Rounding-Method Rounding Method commit 31bbfa729a15ef35fa1f905345d3ba2b17b26ab9 Author: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed Feb 8 19:41:45 2023 +0900 use switch commit 85dbe511c33521d3ac62224bf0e0f3a48194ce63 Author: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed Feb 8 16:47:52 2023 +0900 Rounding Method commit 745382a0f4b8d16241545a3460d5206915959255 Author: Gerschel Date: Tue Feb 7 21:19:20 2023 -0800 default set to round commit 728579c618af30ec98a5af0991bd3f28bdaca399 Author: Gerschel Date: Tue Feb 7 21:17:03 2023 -0800 cleaned some commented code out; added indicator commit 5b288c24a1edd8a5c2f35214b9634316d05b8dae Author: Gerschel Date: Tue Feb 7 18:19:00 2023 -0800 needs cleaning; attempt at rounding commit d9f18ae92b929576b0b8c5f1ef8b3b38e441e381 Author: Gerschel Date: Tue Feb 7 15:46:25 2023 -0800 add rounding option in setting for aspect ratio commit af22106802c9e42205649e4c71c23fcf5b8c62f6 Author: Gerschel Date: Tue Feb 7 13:18:45 2023 -0800 added some ratios, sorted ratios by commonality commit 11e2fba73cffe8cdbf4cd0860641b94428ca0e74 Author: Gerschel Date: Tue Feb 7 10:46:53 2023 -0800 snaps to mulitples of 8 and along ratio commit fa00387e07460b10ee82671a1bfa8687e00ee60b Author: Gerschel Date: Mon Feb 6 14:54:59 2023 -0800 updated slidercomponentcontroller commit 8059bc111c3e2d1edb3314e05ab21b65120fa1dd Author: Gerschel Date: Mon Feb 6 14:29:11 2023 -0800 added step size adjustment on number field commit 641157b9f27a874a24ee7b0a854a092e9eac3eec Author: Gerschel Date: Mon Feb 6 14:12:03 2023 -0800 added return step size to default when ratio is disabled commit 5fb75ad28f2476f36100ec93922a8199adbd2a68 Author: Gerschel Date: Mon Feb 6 14:09:34 2023 -0800 added step size adjustment commit e33532883bc4709cd41c3775cbb646d1d5ab0584 Author: Gerschel Date: Mon Feb 6 11:56:15 2023 -0800 adjusted dropdown size, padding, text-align commit 81937329cee77f466c5a5b23c268d0c810128f84 Author: Gerschel Date: Mon Feb 6 11:39:57 2023 -0800 added positioning and styling commit 86eb4583782d92880a9a113a54ffbac9d92f3753 Author: Gerschel Date: Mon Feb 6 08:54:45 2023 -0800 fix typo in defaults; added preventDefault in event --- modules/shared.py | 17 ++++++++++++----- modules/ui.py | 10 ++++++---- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index ead7be36..fcd6eadf 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -140,16 +140,21 @@ ui_reorder_categories = [ ] aspect_ratio_defaults = [ - "🔓" + "🔓", "1:1", - "1:2", - "2:1", - "2:3", "3:2", "4:3", "5:4", - "9:16", "16:9", + "9:16", + "1.85:1", + "2.35:1", + "2.39:1", + "2.40:1", + "21:9", + "1.375:1", + "1.66:1", + "1.75:1" ] cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access @@ -469,6 +474,8 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), + "aspect_ratios_rounding": OptionInfo(True, "Round aspect ratios for more flexibility?", gr.Checkbox), + "aspect_ratios_rounding_method": OptionInfo("Ceiling", "Aspect ratios rounding method", gr.Radio,{"choices": ["Round", "Ceiling", "Floor"]}), "aspect_ratios": OptionInfo(", ".join(aspect_ratio_defaults), "txt2img/img2img aspect ratios"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), diff --git a/modules/ui.py b/modules/ui.py index 6853485c..873c857a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -483,8 +483,9 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") - aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="txt2img_ratio", show_label=False, label="Aspect Ratio") + with gr.Column(elem_id="txt2img_size_toolbox", scale=0): + aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="txt2img_ratio", show_label=False, label="Aspect Ratio") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") if opts.dimensions_and_batch_together: with gr.Column(elem_id="txt2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") @@ -762,8 +763,9 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="img2img_ratio", show_label=False, label="Aspect Ratio") + with gr.Column(elem_id="img2img_size_toolbox", scale=0): + aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="img2img_ratio", show_label=False, label="Aspect Ratio") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") if opts.dimensions_and_batch_together: with gr.Column(elem_id="img2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") -- cgit v1.2.3 From 463ab841803b45ea421ad7f9769b836f3000ef8c Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 9 Feb 2023 02:13:49 -0700 Subject: Convert 16-bit greyscale to 8-bit when saving as JPEG --- modules/images.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index c2ca8849..c0c68913 100644 --- a/modules/images.py +++ b/modules/images.py @@ -553,6 +553,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i elif extension.lower() in (".jpg", ".jpeg", ".webp"): if image_to_save.mode == 'RGBA': image_to_save = image_to_save.convert("RGB") + elif image_to_save.mode == 'I;16': + image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("L") image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) -- cgit v1.2.3 From 73a97cac11456adcda05872364c605ebcd3982ad Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 9 Feb 2023 17:04:55 -0700 Subject: Use RGB for webp Doesn't support greyscale (L) --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index c0c68913..b335502b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -554,7 +554,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if image_to_save.mode == 'RGBA': image_to_save = image_to_save.convert("RGB") elif image_to_save.mode == 'I;16': - image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("L") + image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L") image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) -- cgit v1.2.3 From 33947a3c6631179bba02285f09b5853b9ecf5782 Mon Sep 17 00:00:00 2001 From: minux302 Date: Fri, 10 Feb 2023 17:58:35 +0900 Subject: fix arg for hypernetwork train api --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index eb7b1da5..5a9ac5f1 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -498,7 +498,7 @@ class Api: if not apply_optimizations: sd_hijack.undo_optimizations() try: - hypernetwork, filename = train_hypernetwork(*args) + hypernetwork, filename = train_hypernetwork(**args) except Exception as e: error = e finally: -- cgit v1.2.3 From bf9b1d64a3101b592713f584d5ef0533b6df1e0f Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 10 Feb 2023 15:27:08 -0700 Subject: Fix face restorers setting --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 79fbf724..8bc6923a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -364,7 +364,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { })) options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), + "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), })) -- cgit v1.2.3 From 9e27af76d14dc6d8a5062ab9c0db128a917ada17 Mon Sep 17 00:00:00 2001 From: RcINS Date: Sat, 11 Feb 2023 10:12:16 +0800 Subject: Fix DPM++ SDE not deterministic across different batch sizes (#5210) --- modules/sd_samplers_kdiffusion.py | 37 +++++++++++++++++++++++++++++-------- modules/shared.py | 1 + 2 files changed, 30 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index f076fc55..d143d41e 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -269,6 +269,15 @@ class KDiffusionSampler: return sigmas + def create_noise_sampler(self, x, sigmas, seeds): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seeds) + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) @@ -278,18 +287,24 @@ class KDiffusionSampler: xi = x + noise * sigma_sched[0] extra_params_kwargs = self.initialize(p) - if 'sigma_min' in inspect.signature(self.func).parameters: + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in inspect.signature(self.func).parameters: + if 'sigma_max' in parameters: extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in inspect.signature(self.func).parameters: + if 'n' in parameters: extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in inspect.signature(self.func).parameters: + if 'sigma_sched' in parameters: extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in inspect.signature(self.func).parameters: + if 'sigmas' in parameters: extra_params_kwargs['sigmas'] = sigma_sched + if self.funcname == 'sample_dpmpp_sde': + noise_sampler = self.create_noise_sampler(x, sigmas, p.all_seeds) + extra_params_kwargs['noise_sampler'] = noise_sampler + self.model_wrap_cfg.init_latent = x self.last_latent = x extra_args={ @@ -303,7 +318,7 @@ class KDiffusionSampler: return samples - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None): + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps = steps or p.steps sigmas = self.get_sigmas(p, steps) @@ -311,14 +326,20 @@ class KDiffusionSampler: x = x * sigmas[0] extra_params_kwargs = self.initialize(p) - if 'sigma_min' in inspect.signature(self.func).parameters: + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in inspect.signature(self.func).parameters: + if 'n' in parameters: extra_params_kwargs['n'] = steps else: extra_params_kwargs['sigmas'] = sigmas + if self.funcname == 'sample_dpmpp_sde': + noise_sampler = self.create_noise_sampler(x, sigmas, p.all_seeds) + extra_params_kwargs['noise_sampler'] = noise_sampler + self.last_latent = x samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ 'cond': conditioning, diff --git a/modules/shared.py b/modules/shared.py index 79fbf724..22344431 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -414,6 +414,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { options_templates.update(options_section(('compatibility', "Compatibility"), { "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), + "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), })) -- cgit v1.2.3 From b78c5e87baaf8c88d039bf60082c3b5ae35ec4ff Mon Sep 17 00:00:00 2001 From: opparco Date: Sat, 11 Feb 2023 11:18:38 +0900 Subject: Add cfg_denoised_callback --- modules/script_callbacks.py | 29 +++++++++++++++++++++++++++++ modules/sd_samplers_kdiffusion.py | 4 ++++ 2 files changed, 33 insertions(+) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 4bb45ec7..edd0e2a7 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -46,6 +46,18 @@ class CFGDenoiserParams: """Total number of sampling steps planned""" +class CFGDenoisedParams: + def __init__(self, x, sampling_step, total_sampling_steps): + self.x = x + """Latent image representation in the process of being denoised""" + + self.sampling_step = sampling_step + """Current Sampling step number""" + + self.total_sampling_steps = total_sampling_steps + """Total number of sampling steps planned""" + + class UiTrainTabParams: def __init__(self, txt2img_preview_params): self.txt2img_preview_params = txt2img_preview_params @@ -68,6 +80,7 @@ callback_map = dict( callbacks_before_image_saved=[], callbacks_image_saved=[], callbacks_cfg_denoiser=[], + callbacks_cfg_denoised=[], callbacks_before_component=[], callbacks_after_component=[], callbacks_image_grid=[], @@ -150,6 +163,14 @@ def cfg_denoiser_callback(params: CFGDenoiserParams): report_exception(c, 'cfg_denoiser_callback') +def cfg_denoised_callback(params: CFGDenoisedParams): + for c in callback_map['callbacks_cfg_denoised']: + try: + c.callback(params) + except Exception: + report_exception(c, 'cfg_denoised_callback') + + def before_component_callback(component, **kwargs): for c in callback_map['callbacks_before_component']: try: @@ -283,6 +304,14 @@ def on_cfg_denoiser(callback): add_callback(callback_map['callbacks_cfg_denoiser'], callback) +def on_cfg_denoised(callback): + """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. + The callback is called with one argument: + - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details. + """ + add_callback(callback_map['callbacks_cfg_denoised'], callback) + + def on_before_component(callback): """register a function to be called before a component is created. The callback is called with arguments: diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index f076fc55..28847397 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -8,6 +8,7 @@ from modules import prompt_parser, devices, sd_samplers_common from modules.shared import opts, state import modules.shared as shared from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback samplers_k_diffusion = [ ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), @@ -136,6 +137,9 @@ class CFGDenoiser(torch.nn.Module): x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]}) + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) + cfg_denoised_callback(denoised_params) + devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": -- cgit v1.2.3 From 0a4917ac4021eb6cf0da27c060c13bdd5b5d2a9f Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Mon, 13 Feb 2023 03:33:28 -0800 Subject: Apply extra networks per-batch instead of per-session (fixes wildcards) --- modules/processing.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index e1b53ac0..e4b989d4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -543,8 +543,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() - _, extra_network_data = extra_networks.parse_prompts(p.all_prompts[0:1]) - if p.scripts is not None: p.scripts.process(p) @@ -582,9 +580,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": sd_vae_approx.model() - if not p.disable_extra_networks: - extra_networks.activate(p, extra_network_data) - with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) @@ -609,7 +604,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if len(prompts) == 0: break - prompts, _ = extra_networks.parse_prompts(prompts) + prompts, extra_network_data = extra_networks.parse_prompts(prompts) + + if not p.disable_extra_networks: + with devices.autocast(): + extra_networks.activate(p, extra_network_data) if p.scripts is not None: p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) -- cgit v1.2.3 From 7893533674e37de258d647f22b06430e0f924bd7 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Mon, 13 Feb 2023 11:04:34 -0500 Subject: add version to extensions table --- modules/extensions.py | 6 ++++++ modules/ui_extensions.py | 8 +++----- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/extensions.py b/modules/extensions.py index 5e12b1aa..1975fca1 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -2,6 +2,7 @@ import os import sys import traceback +import time import git from modules import paths, shared @@ -25,6 +26,7 @@ class Extension: self.status = '' self.can_update = False self.is_builtin = is_builtin + self.version = '' repo = None try: @@ -40,6 +42,10 @@ class Extension: try: self.remote = next(repo.remote().urls, None) self.status = 'unknown' + head = repo.head.commit + ts = time.asctime(time.gmtime(repo.head.commit.committed_date)) + self.version = f'{head.hexsha[:7]} ({ts})' + except Exception: self.remote = None diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 37d30e1f..bd4308ef 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -80,6 +80,7 @@ def extension_table(): Extension URL + Version Update @@ -87,11 +88,7 @@ def extension_table(): """ for ext in extensions.extensions: - remote = "" - if ext.is_builtin: - remote = "built-in" - elif ext.remote: - remote = f"""{html.escape("built-in" if ext.is_builtin else ext.remote or '')}""" + remote = f"""{html.escape("built-in" if ext.is_builtin else ext.remote or '')}""" if ext.can_update: ext_status = f"""""" @@ -102,6 +99,7 @@ def extension_table(): {remote} + {ext.version} {ext_status} """ -- cgit v1.2.3 From 1615f786eeb5e407f7414835fbb73e7b6f8337de Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 14 Feb 2023 20:54:02 -0700 Subject: Download model if none are found --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index d847d358..07072e5c 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -105,7 +105,7 @@ def checkpoint_tiles(): def list_models(): checkpoints_list.clear() checkpoint_alisases.clear() - model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], ext_blacklist=[".vae.safetensors"]) + model_list = modelloader.load_models(model_path=model_path, model_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors", command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.safetensors"]) cmd_ckpt = shared.cmd_opts.ckpt if os.path.exists(cmd_ckpt): -- cgit v1.2.3 From f55a7e04d812e8cb07d622efb321abbad54d2d4a Mon Sep 17 00:00:00 2001 From: RcINS Date: Wed, 15 Feb 2023 16:57:18 +0800 Subject: Fix error when batch count > 1 --- modules/sd_samplers_kdiffusion.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d143d41e..86d657e2 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -269,14 +269,15 @@ class KDiffusionSampler: return sigmas - def create_noise_sampler(self, x, sigmas, seeds): + def create_noise_sampler(self, x, sigmas, p): """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" if shared.opts.no_dpmpp_sde_batch_determinism: return None from k_diffusion.sampling import BrownianTreeNoiseSampler sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seeds) + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) @@ -302,7 +303,7 @@ class KDiffusionSampler: extra_params_kwargs['sigmas'] = sigma_sched if self.funcname == 'sample_dpmpp_sde': - noise_sampler = self.create_noise_sampler(x, sigmas, p.all_seeds) + noise_sampler = self.create_noise_sampler(x, sigmas, p) extra_params_kwargs['noise_sampler'] = noise_sampler self.model_wrap_cfg.init_latent = x @@ -337,7 +338,7 @@ class KDiffusionSampler: extra_params_kwargs['sigmas'] = sigmas if self.funcname == 'sample_dpmpp_sde': - noise_sampler = self.create_noise_sampler(x, sigmas, p.all_seeds) + noise_sampler = self.create_noise_sampler(x, sigmas, p) extra_params_kwargs['noise_sampler'] = noise_sampler self.last_latent = x -- cgit v1.2.3 From c4bfd20f317243d7ceac6e2fbf30b18bbebd3e6d Mon Sep 17 00:00:00 2001 From: Shondoit Date: Thu, 12 Jan 2023 15:03:46 +0100 Subject: Hijack to add weighted_forward to model: return loss * weight map --- modules/sd_hijack.py | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'modules') diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 8fdc5990..57ed5635 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -1,5 +1,6 @@ import torch from torch.nn.functional import silu +from types import MethodType import modules.textual_inversion.textual_inversion from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint @@ -76,6 +77,54 @@ def fix_checkpoint(): pass +def weighted_loss(sd_model, pred, target, mean=True): + #Calculate the weight normally, but ignore the mean + loss = sd_model._old_get_loss(pred, target, mean=False) + + #Check if we have weights available + weight = getattr(sd_model, '_custom_loss_weight', None) + if weight is not None: + loss *= weight + + #Return the loss, as mean if specified + return loss.mean() if mean else loss + +def weighted_forward(sd_model, x, c, w, *args, **kwargs): + try: + #Temporarily append weights to a place accessible during loss calc + sd_model._custom_loss_weight = w + + #Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely + #Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set + if not hasattr(sd_model, '_old_get_loss'): + sd_model._old_get_loss = sd_model.get_loss + sd_model.get_loss = MethodType(weighted_loss, sd_model) + + #Run the standard forward function, but with the patched 'get_loss' + return sd_model.forward(x, c, *args, **kwargs) + finally: + try: + #Delete temporary weights if appended + del sd_model._custom_loss_weight + except AttributeError as e: + pass + + #If we have an old loss function, reset the loss function to the original one + if hasattr(sd_model, '_old_get_loss'): + sd_model.get_loss = sd_model._old_get_loss + del sd_model._old_get_loss + +def apply_weighted_forward(sd_model): + #Add new function 'weighted_forward' that can be called to calc weighted loss + sd_model.weighted_forward = MethodType(weighted_forward, sd_model) + +def undo_weighted_forward(sd_model): + try: + del sd_model.weighted_forward + except AttributeError as e: + pass + + class StableDiffusionModelHijack: fixes = None comments = [] @@ -104,6 +153,8 @@ class StableDiffusionModelHijack: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + apply_weighted_forward(m) + self.optimization_method = apply_optimizations() self.clip = m.cond_stage_model @@ -132,6 +183,7 @@ class StableDiffusionModelHijack: m.cond_stage_model = m.cond_stage_model.wrapped undo_optimizations() + undo_weighted_forward(m) self.apply_circular(False) self.layers = None -- cgit v1.2.3 From 21642000b33a3069e3408ea1a50239006176badb Mon Sep 17 00:00:00 2001 From: Shondoit Date: Thu, 12 Jan 2023 15:29:19 +0100 Subject: Add PNG alpha channel as weight maps to data entries --- modules/textual_inversion/dataset.py | 51 +++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 13 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index d31963d4..f4ce4552 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -19,9 +19,10 @@ re_numbers_at_start = re.compile(r"^[-\d]+\s*") class DatasetEntry: - def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None): + def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None, weight=None): self.filename = filename self.filename_text = filename_text + self.weight = weight self.latent_dist = latent_dist self.latent_sample = latent_sample self.cond = cond @@ -56,10 +57,16 @@ class PersonalizedBase(Dataset): print("Preparing dataset...") for path in tqdm.tqdm(self.image_paths): + alpha_channel = None if shared.state.interrupted: raise Exception("interrupted") try: - image = Image.open(path).convert('RGB') + image = Image.open(path) + #Currently does not work for single color transparency + #We would need to read image.info['transparency'] for that + if 'A' in image.getbands(): + alpha_channel = image.getchannel('A') + image = image.convert('RGB') if not varsize: image = image.resize((width, height), PIL.Image.BICUBIC) except Exception: @@ -87,17 +94,33 @@ class PersonalizedBase(Dataset): with devices.autocast(): latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0)) - if latent_sampling_method == "once" or (latent_sampling_method == "deterministic" and not isinstance(latent_dist, DiagonalGaussianDistribution)): - latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) - latent_sampling_method = "once" - entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample) - elif latent_sampling_method == "deterministic": - # Works only for DiagonalGaussianDistribution - latent_dist.std = 0 - latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) - entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample) - elif latent_sampling_method == "random": - entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist) + #Perform latent sampling, even for random sampling. + #We need the sample dimensions for the weights + if latent_sampling_method == "deterministic": + if isinstance(latent_dist, DiagonalGaussianDistribution): + # Works only for DiagonalGaussianDistribution + latent_dist.std = 0 + else: + latent_sampling_method = "once" + latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) + + if alpha_channel is not None: + channels, *latent_size = latent_sample.shape + weight_img = alpha_channel.resize(latent_size) + npweight = np.array(weight_img).astype(np.float32) + #Repeat for every channel in the latent sample + weight = torch.tensor([npweight] * channels).reshape([channels] + latent_size) + #Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default. + weight -= weight.min() + weight /= weight.mean() + else: + #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later + weight = torch.ones([channels] + latent_size) + + if latent_sampling_method == "random": + entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight) + else: + entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample, weight=weight) if not (self.tag_drop_out != 0 or self.shuffle_tags): entry.cond_text = self.create_text(filename_text) @@ -110,6 +133,7 @@ class PersonalizedBase(Dataset): del torchdata del latent_dist del latent_sample + del weight self.length = len(self.dataset) self.groups = list(groups.values()) @@ -195,6 +219,7 @@ class BatchLoader: self.cond_text = [entry.cond_text for entry in data] self.cond = [entry.cond for entry in data] self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1) + self.weight = torch.stack([entry.weight for entry in data]).squeeze(1) #self.emb_index = [entry.emb_index for entry in data] #print(self.latent_sample.device) -- cgit v1.2.3 From bc50936745e1a349afdc28cf1540109ba20bc71a Mon Sep 17 00:00:00 2001 From: Shondoit Date: Thu, 12 Jan 2023 15:34:11 +0100 Subject: Call weighted_forward during training --- modules/hypernetworks/hypernetwork.py | 3 ++- modules/textual_inversion/textual_inversion.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 825a93b2..9c79b7d0 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -640,13 +640,14 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + w = batch.weight.to(devices.device, non_blocking=pin_memory) if tag_drop_out != 0 or shuffle_tags: shared.sd_model.cond_stage_model.to(devices.device) c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory) shared.sd_model.cond_stage_model.to(devices.cpu) else: c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) - loss = shared.sd_model(x, c)[0] / gradient_step + loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step del x del c diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index a1a406c2..8853c868 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -480,6 +480,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + w = batch.weight.to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text) if is_training_inpainting_model: @@ -490,7 +491,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st else: cond = c - loss = shared.sd_model(x, cond)[0] / gradient_step + loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step del x _loss_step += loss.item() -- cgit v1.2.3 From edb10092de516dda5271130ed53628387780a859 Mon Sep 17 00:00:00 2001 From: Shondoit Date: Thu, 12 Jan 2023 16:29:00 +0100 Subject: Add ability to choose using weighted loss or not --- modules/hypernetworks/hypernetwork.py | 13 +++++++++---- modules/textual_inversion/dataset.py | 15 ++++++++++----- modules/textual_inversion/textual_inversion.py | 13 +++++++++---- modules/ui.py | 4 ++++ 4 files changed, 32 insertions(+), 13 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 9c79b7d0..f4fb69e0 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -496,7 +496,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, shared.reload_hypernetworks() -def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # images allows training previews to have infotext. Importing it at the top causes a circular import problem. from modules import images @@ -554,7 +554,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi pin_memory = shared.opts.pin_memory - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize) + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight) if shared.opts.save_training_settings_to_txt: saved_params = dict( @@ -640,14 +640,19 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) - w = batch.weight.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) if tag_drop_out != 0 or shuffle_tags: shared.sd_model.cond_stage_model.to(devices.device) c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory) shared.sd_model.cond_stage_model.to(devices.cpu) else: c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) - loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step + if use_weight: + loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step + del w + else: + loss = shared.sd_model.forward(x, c)[0] / gradient_step del x del c diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index f4ce4552..1568b2b8 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -31,7 +31,7 @@ class DatasetEntry: class PersonalizedBase(Dataset): - def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False): + def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False, use_weight=False): re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None self.placeholder_token = placeholder_token @@ -64,7 +64,7 @@ class PersonalizedBase(Dataset): image = Image.open(path) #Currently does not work for single color transparency #We would need to read image.info['transparency'] for that - if 'A' in image.getbands(): + if use_weight and 'A' in image.getbands(): alpha_channel = image.getchannel('A') image = image.convert('RGB') if not varsize: @@ -104,7 +104,7 @@ class PersonalizedBase(Dataset): latent_sampling_method = "once" latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) - if alpha_channel is not None: + if use_weight and alpha_channel is not None: channels, *latent_size = latent_sample.shape weight_img = alpha_channel.resize(latent_size) npweight = np.array(weight_img).astype(np.float32) @@ -113,9 +113,11 @@ class PersonalizedBase(Dataset): #Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default. weight -= weight.min() weight /= weight.mean() - else: + elif use_weight: #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later weight = torch.ones([channels] + latent_size) + else: + weight = None if latent_sampling_method == "random": entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight) @@ -219,7 +221,10 @@ class BatchLoader: self.cond_text = [entry.cond_text for entry in data] self.cond = [entry.cond for entry in data] self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1) - self.weight = torch.stack([entry.weight for entry in data]).squeeze(1) + if all(entry.weight is not None for entry in data): + self.weight = torch.stack([entry.weight for entry in data]).squeeze(1) + else: + self.weight = None #self.emb_index = [entry.emb_index for entry in data] #print(self.latent_sample.device) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 8853c868..c63c7d1d 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -351,7 +351,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat assert log_directory, "Log directory is empty" -def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): save_embedding_every = save_embedding_every or 0 create_image_every = create_image_every or 0 template_file = textual_inversion_templates.get(template_filename, None) @@ -410,7 +410,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st pin_memory = shared.opts.pin_memory - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize) + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight) if shared.opts.save_training_settings_to_txt: save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()}) @@ -480,7 +480,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) - w = batch.weight.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text) if is_training_inpainting_model: @@ -491,7 +492,11 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st else: cond = c - loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step + if use_weight: + loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step + del w + else: + loss = shared.sd_model.forward(x, cond)[0] / gradient_step del x _loss_step += loss.item() diff --git a/modules/ui.py b/modules/ui.py index f5df1ffe..efb87c23 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1191,6 +1191,8 @@ def create_ui(): create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") @@ -1304,6 +1306,7 @@ def create_ui(): shuffle_tags, tag_drop_out, latent_sampling_method, + use_weight, create_image_every, save_embedding_every, template_file, @@ -1337,6 +1340,7 @@ def create_ui(): shuffle_tags, tag_drop_out, latent_sampling_method, + use_weight, create_image_every, save_embedding_every, template_file, -- cgit v1.2.3 From c4ea16a03f8f9c9a9add4049ce5be1a8060464f5 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Wed, 15 Feb 2023 19:47:30 -0700 Subject: Add ".vae.ckpt" to ext_blacklist --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index 07072e5c..127e9663 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -105,7 +105,7 @@ def checkpoint_tiles(): def list_models(): checkpoints_list.clear() checkpoint_alisases.clear() - model_list = modelloader.load_models(model_path=model_path, model_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors", command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.safetensors"]) + model_list = modelloader.load_models(model_path=model_path, model_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors", command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) cmd_ckpt = shared.cmd_opts.ckpt if os.path.exists(cmd_ckpt): -- cgit v1.2.3 From b20737815a55cd90cfab2a1a3d60d682a67b127a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Thu, 16 Feb 2023 21:44:46 -0800 Subject: Fix params.txt saving for infotexts modified by process_batch --- modules/processing.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index e1b53ac0..73894822 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -585,10 +585,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not p.disable_extra_networks: extra_networks.activate(p, extra_network_data) - with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: - processed = Processed(p, [], p.seed, "") - file.write(processed.infotext(p, 0)) - if state.job_count == -1: state.job_count = p.n_iter @@ -614,6 +610,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) + # params.txt should be saved after scripts.process_batch, since the + # infotext could be modified by that callback + # Example: a wildcard processed by process_batch sets an extra model + # strength, which is saved as "Model Strength: 1.0" in the infotext + if n == 0: + with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: + processed = Processed(p, [], p.seed, "") + file.write(processed.infotext(p, 0)) + uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) -- cgit v1.2.3 From 9c7e6d5bbaa55205d0678369588c019108fb30a7 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 18 Feb 2023 11:31:02 -0500 Subject: store and print real torch version --- modules/ui.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index f5df1ffe..d9df3781 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1779,10 +1779,15 @@ def versions_html(): else: xformers_version = "N/A" + try: + torch_version = torch.__long_version__ + except: + torch_version = torch.__version__ + return f""" python: {python_version}  •  -torch: {torch.__version__} +torch: {torch_version}  •  xformers: {xformers_version}  •  -- cgit v1.2.3 From b5f69ad6afcdb8ba718da636b4a3f8aad5bd7cbf Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 08:38:38 +0300 Subject: simply long version display for torch in UI --- modules/ui.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index d9df3781..54efb6a4 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1779,15 +1779,10 @@ def versions_html(): else: xformers_version = "N/A" - try: - torch_version = torch.__long_version__ - except: - torch_version = torch.__version__ - return f""" python: {python_version}  •  -torch: {torch_version} +torch: {getattr(torch, '__long_version__',torch.__version__)}  •  xformers: {xformers_version}  •  -- cgit v1.2.3 From a742facd95189eb078087bce9cafbfad0723cff4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 09:30:49 +0300 Subject: make PNG info tab work properly with parameter overrides --- modules/generation_parameters_copypaste.py | 7 ++++--- modules/ui.py | 10 +++++----- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fc9e17aa..89dc23bf 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -74,8 +74,8 @@ def image_from_url_text(filedata): return image -def add_paste_fields(tabname, init_img, fields): - paste_fields[tabname] = {"init_img": init_img, "fields": fields} +def add_paste_fields(tabname, init_img, fields, override_settings_component=None): + paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component} # backwards compatibility for existing extensions import modules.ui @@ -110,6 +110,7 @@ def connect_paste_params_buttons(): for binding in registered_param_bindings: destination_image_component = paste_fields[binding.tabname]["init_img"] fields = paste_fields[binding.tabname]["fields"] + override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"] destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None) destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) @@ -130,7 +131,7 @@ def connect_paste_params_buttons(): ) if binding.source_text_component is not None and fields is not None: - connect_paste(binding.paste_button, fields, binding.source_text_component, binding.override_settings_component, binding.tabname) + connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname) if binding.source_tabname is not None and fields is not None: paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) diff --git a/modules/ui.py b/modules/ui.py index 54efb6a4..2fdbda42 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -631,9 +631,9 @@ def create_ui(): (hr_resize_y, "Hires resize-2"), *modules.scripts.scripts_txt2img.infotext_fields ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields) + parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, override_settings_component=override_settings, + paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, )) txt2img_preview_params = [ @@ -963,10 +963,10 @@ def create_ui(): (mask_blur, "Mask blur"), *modules.scripts.scripts_img2img.infotext_fields ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields) + parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) + parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, override_settings_component=override_settings, + paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, )) modules.scripts.scripts_current = None -- cgit v1.2.3 From 15f4b217b10448449ae211df24c86a7cb0e187f4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 09:50:14 +0300 Subject: fix the a merge conflict resolve i did that entirely breaks image generation --- modules/processing.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 269a1a9f..2009d3bf 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -580,9 +580,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": sd_vae_approx.model() - if not p.disable_extra_networks: - extra_networks.activate(p, extra_network_data) - if state.job_count == -1: state.job_count = p.n_iter -- cgit v1.2.3 From 164699163718a73a273b86f67a16d3807bccda0e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 09:54:04 +0300 Subject: display 8 (rather than 7) characters of the extension commit hash in the installed extensions table --- modules/extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/extensions.py b/modules/extensions.py index 1975fca1..3eef9eaf 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -44,7 +44,7 @@ class Extension: self.status = 'unknown' head = repo.head.commit ts = time.asctime(time.gmtime(repo.head.commit.committed_date)) - self.version = f'{head.hexsha[:7]} ({ts})' + self.version = f'{head.hexsha[:8]} ({ts})' except Exception: self.remote = None -- cgit v1.2.3 From fb2354cb2ae47f9e9b70f0e04f34925bbb31b1ac Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 10:12:45 +0300 Subject: reword settings for 4chan export, remove unneded try/excepts, add try/except for actually saving JPG --- modules/images.py | 24 +++++++++--------------- modules/shared.py | 6 +++--- 2 files changed, 12 insertions(+), 18 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index 34d08b73..dcf5d90c 100644 --- a/modules/images.py +++ b/modules/images.py @@ -18,7 +18,7 @@ import string import json import hashlib -from modules import sd_samplers, shared, script_callbacks +from modules import sd_samplers, shared, script_callbacks, errors from modules.shared import opts, cmd_opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -575,25 +575,19 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image.already_saved_as = fullfn - try: - target_side_length = int(opts.target_side_length) - except ValueError: - target_side_length = 4000 - try: - img_downscale_threshold = float(opts.img_downscale_threshold) - except ValueError: - img_downscale_threshold = 4 - - oversize = image.width > target_side_length or image.height > target_side_length - if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > img_downscale_threshold * 1024 * 1024): + oversize = image.width > opts.target_side_length or image.height > opts.target_side_length + if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024): ratio = image.width / image.height if oversize and ratio > 1: - image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS) + image = image.resize((opts.target_side_length, image.height * opts.target_side_length // image.width), LANCZOS) elif oversize: - image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS) + image = image.resize((image.width * opts.target_side_length // image.height, opts.target_side_length), LANCZOS) - _atomically_save_image(image, fullfn_without_extension, ".jpg") + try: + _atomically_save_image(image, fullfn_without_extension, ".jpg") + except Exception as e: + errors.display(e, "saving image as downscaled JPG") if opts.save_txt and info is not None: txt_fullfn = f"{fullfn_without_extension}.txt" diff --git a/modules/shared.py b/modules/shared.py index d68c366c..4f496533 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -325,9 +325,9 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "export_for_4chan": OptionInfo(True, "If PNG image is larger than Downscale threshold or any dimension is larger than Target length, downscale the image to dimensions and save a copy as JPG"), - "img_downscale_threshold": OptionInfo(4, "Downscale threshold (MB)"), - "target_side_length": OptionInfo(4000, "Target length"), + "export_for_4chan": OptionInfo(True, "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG"), + "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), + "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), -- cgit v1.2.3 From fd4ac5187a1ae42be3f131770ea21e2158f75dcd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 19 Feb 2023 10:55:39 +0300 Subject: Revert "Aspect ratio sliders" --- modules/shared.py | 21 --------------------- modules/ui.py | 12 ++---------- 2 files changed, 2 insertions(+), 31 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 2983ee44..e324a48a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -139,24 +139,6 @@ ui_reorder_categories = [ "scripts", ] -aspect_ratio_defaults = [ - "🔓", - "1:1", - "3:2", - "4:3", - "5:4", - "16:9", - "9:16", - "1.85:1", - "2.35:1", - "2.39:1", - "2.40:1", - "21:9", - "1.375:1", - "1.66:1", - "1.75:1" -] - cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -477,9 +459,6 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), - "aspect_ratios_rounding": OptionInfo(True, "Round aspect ratios for more flexibility?", gr.Checkbox), - "aspect_ratios_rounding_method": OptionInfo("Ceiling", "Aspect ratios rounding method", gr.Radio,{"choices": ["Round", "Ceiling", "Floor"]}), - "aspect_ratios": OptionInfo(", ".join(aspect_ratio_defaults), "txt2img/img2img aspect ratios"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) diff --git a/modules/ui.py b/modules/ui.py index 2fc1fee5..2fdbda42 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -424,10 +424,6 @@ def ordered_ui_categories(): yield category -def aspect_ratio_list(): - return [ratio.strip() for ratio in shared.opts.aspect_ratios.split(",")] - - def get_value_for_setting(key): value = getattr(opts, key) @@ -483,9 +479,7 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - with gr.Column(elem_id="txt2img_size_toolbox", scale=0): - aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="txt2img_ratio", show_label=False, label="Aspect Ratio") - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") if opts.dimensions_and_batch_together: with gr.Column(elem_id="txt2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") @@ -763,9 +757,7 @@ def create_ui(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_size_toolbox", scale=0): - aspect_ratio_dropdown = gr.Dropdown(value="🔓", choices=aspect_ratio_list(), interactive=True, type="value", elem_id="img2img_ratio", show_label=False, label="Aspect Ratio") - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") if opts.dimensions_and_batch_together: with gr.Column(elem_id="img2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") -- cgit v1.2.3 From fe46a08f52c36ca61446f657296802ab3ceb2529 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 12:09:25 +0300 Subject: add slash to non-empty dirs in extra networks interface --- modules/ui_extra_networks.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'modules') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 90abec0a..30ceab4e 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -76,6 +76,10 @@ class ExtraNetworksPage: while subdir.startswith("/"): subdir = subdir[1:] + is_empty = len(os.listdir(x)) == 0 + if not is_empty and not subdir.endswith("/"): + subdir = subdir + "/" + subdirs[subdir] = 1 if subdirs: -- cgit v1.2.3 From b908bed8838fae89e5e83e57ca91808cf2d68077 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 12:23:40 +0300 Subject: remove unneeded return from #7583 --- modules/shared_items.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared_items.py b/modules/shared_items.py index b72b2bae..e792a134 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -20,4 +20,4 @@ def sd_vae_items(): def refresh_vae_list(): import modules.sd_vae - return modules.sd_vae.refresh_vae_list() + modules.sd_vae.refresh_vae_list() -- cgit v1.2.3 From 11183b4d905d14c6a0164a4d13675b89b1bf4ceb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 19 Feb 2023 12:44:56 +0300 Subject: fix for #6700 --- modules/textual_inversion/dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 1568b2b8..af9fbcf2 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -115,7 +115,7 @@ class PersonalizedBase(Dataset): weight /= weight.mean() elif use_weight: #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later - weight = torch.ones([channels] + latent_size) + weight = torch.ones(latent_sample.shape) else: weight = None -- cgit v1.2.3 From c77f01ff31072715afe80413ecaf6b3d00797d34 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 19 Feb 2023 20:37:40 +0900 Subject: fix auto sd download issue --- modules/sd_models.py | 8 +++++++- modules/shared.py | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index 127e9663..ac4903f4 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -105,9 +105,15 @@ def checkpoint_tiles(): def list_models(): checkpoints_list.clear() checkpoint_alisases.clear() - model_list = modelloader.load_models(model_path=model_path, model_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors", command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) cmd_ckpt = shared.cmd_opts.ckpt + if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file: + model_url = None + else: + model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" + + model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) + if os.path.exists(cmd_ckpt): checkpoint_info = CheckpointInfo(cmd_ckpt) checkpoint_info.register() diff --git a/modules/shared.py b/modules/shared.py index 2c2edfbd..805f9cc1 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -108,6 +108,7 @@ parser.add_argument("--server-name", type=str, help="Sets hostname of server", d parser.add_argument("--gradio-queue", action='store_true', help="Uses gradio queue; experimental option; breaks restart UI button") parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) +parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) script_loading.preload_extensions(extensions.extensions_dir, parser) -- cgit v1.2.3 From 014e7323f6f54a38183f8de52dc83f2248eee251 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 19 Feb 2023 20:49:07 +0900 Subject: when exists --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index ac4903f4..93959f55 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -107,7 +107,7 @@ def list_models(): checkpoint_alisases.clear() cmd_ckpt = shared.cmd_opts.ckpt - if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file: + if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt): model_url = None else: model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" -- cgit v1.2.3