From 42082e8a3239c1c32cd9e2a03a20b610af857b51 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 18:18:28 -0400 Subject: performance increase --- modules/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..9f00ce3c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -105,7 +105,7 @@ class StableDiffusionProcessing: """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing """ - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -140,6 +140,7 @@ class StableDiffusionProcessing: self.denoising_strength: float = denoising_strength self.sampler_noise_scheduler_override = None self.ddim_discretize = ddim_discretize or opts.ddim_discretize + self.s_min_uncond = s_min_uncond or opts.s_min_uncond self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option @@ -162,6 +163,7 @@ class StableDiffusionProcessing: self.all_seeds = None self.all_subseeds = None self.iteration = 0 + @property def sd_model(self): -- cgit v1.2.3 From a609bd56b4206460d1df3c3022025fc78b66718f Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 22:18:35 -0500 Subject: Transition to using settings through UI instead of cmd line args. Added feature to only apply to hr-fix. Install package using requirements_versions.txt --- launch.py | 3 --- modules/processing.py | 35 +++++++++++++++++++++++++++++++++++ modules/sd_models.py | 7 ------- modules/shared.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ requirements_versions.txt | 1 + 5 files changed, 80 insertions(+), 10 deletions(-) (limited to 'modules/processing.py') diff --git a/launch.py b/launch.py index 846c4c20..68e08114 100644 --- a/launch.py +++ b/launch.py @@ -280,9 +280,6 @@ def prepare_environment(): elif platform.system() == "Linux": run_pip(f"install {xformers_package}", "xformers") - if not is_installed("tomesd") and args.token_merging: - run_pip(f"install tomesd") - if not is_installed("pyngrok") and args.ngrok: run_pip("install pyngrok", "ngrok") diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..e115aadd 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -29,6 +29,7 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType +import tomesd # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 @@ -500,9 +501,28 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() + if opts.token_merging: + + if p.hr_second_pass_steps < 1 and not opts.token_merging_hr_only: + tomesd.apply_patch( + p.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) + res = process_images_inner(p) finally: + # undo model optimizations made by tomesd + if opts.token_merging: + tomesd.remove_patch(p.sd_model) + # restore opts to original state if p.override_settings_restore_afterwards: for k, v in stored_opts.items(): @@ -938,6 +958,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() + # apply token merging optimizations from tomesd for high-res pass + # check if hr_only so we don't redundantly apply patch + if opts.token_merging and opts.token_merging_hr_only: + tomesd.apply_patch( + self.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) + samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) return samples diff --git a/modules/sd_models.py b/modules/sd_models.py index 2c05ec17..87c49b83 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -431,13 +431,6 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_ with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd): sd_model = instantiate_from_config(sd_config.model) - if shared.cmd_opts.token_merging: - import tomesd - ratio = shared.cmd_opts.token_merging_ratio - - tomesd.apply_patch(sd_model, ratio=ratio) - print(f"Model accelerated using {(ratio * 100)}% token merging via tomesd.") - timer.record("token merging") except Exception as e: pass diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..d7379e24 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -427,6 +427,50 @@ options_templates.update(options_section((None, "Hidden options"), { "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) +options_templates.update(options_section(('token_merging', 'Token Merging'), { + "token_merging": OptionInfo( + False, "Enable redundant token merging via tomesd. (currently incompatible with controlnet extension)", + gr.Checkbox + ), + "token_merging_ratio": OptionInfo( + 0.5, "Merging Ratio", + gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} + ), + "token_merging_hr_only": OptionInfo( + True, "Apply only to high-res fix pass. Disabling can yield a ~20-35% speedup on contemporary resolutions.", + gr.Checkbox + ), + # More advanced/niche settings: + "token_merging_random": OptionInfo( + True, "Use random perturbations - Disabling might help with certain samplers", + gr.Checkbox + ), + "token_merging_merge_attention": OptionInfo( + True, "Merge attention", + gr.Checkbox + ), + "token_merging_merge_cross_attention": OptionInfo( + False, "Merge cross attention", + gr.Checkbox + ), + "token_merging_merge_mlp": OptionInfo( + False, "Merge mlp", + gr.Checkbox + ), + "token_merging_maximum_down_sampling": OptionInfo( + 1, "Maximum down sampling", + gr.Dropdown, lambda: {"choices": ["1", "2", "4", "8"]} + ), + "token_merging_stride_x": OptionInfo( + 2, "Stride - X", + gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} + ), + "token_merging_stride_y": OptionInfo( + 2, "Stride - Y", + gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} + ) +})) + options_templates.update() diff --git a/requirements_versions.txt b/requirements_versions.txt index df65431a..045230ab 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -28,3 +28,4 @@ torchsde==0.2.5 safetensors==0.3.0 httpcore<=0.15 fastapi==0.94.0 +tomesd>=0.1 \ No newline at end of file -- cgit v1.2.3 From c707b7df95a61b66a05be94e805e1be9a432e294 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 23:47:10 -0500 Subject: remove excess condition --- modules/processing.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index e115aadd..55735572 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -501,26 +501,26 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging: - - if p.hr_second_pass_steps < 1 and not opts.token_merging_hr_only: - tomesd.apply_patch( - p.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + if opts.token_merging and not opts.token_merging_hr_only: + print("applying token merging to all passes") + tomesd.apply_patch( + p.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) res = process_images_inner(p) finally: # undo model optimizations made by tomesd if opts.token_merging: + print('removing token merging model optimizations') tomesd.remove_patch(p.sd_model) # restore opts to original state @@ -961,6 +961,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # apply token merging optimizations from tomesd for high-res pass # check if hr_only so we don't redundantly apply patch if opts.token_merging and opts.token_merging_hr_only: + print("applying token merging for high-res pass") tomesd.apply_patch( self.sd_model, ratio=opts.token_merging_ratio, -- cgit v1.2.3 From 5c8e53d5e98da0eabf384318955c57842d612c07 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Tue, 4 Apr 2023 02:26:44 -0500 Subject: Allow different merge ratios to be used for each pass. Make toggle cmd flag work again. Remove ratio flag. Remove warning about controlnet being incompatible --- modules/cmd_args.py | 3 +-- modules/processing.py | 44 +++++++++++++++----------------------------- modules/sd_models.py | 29 ++++++++++++++++++++++++++++- modules/shared.py | 6 +++++- 4 files changed, 49 insertions(+), 33 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 4314f97b..8e5a7fab 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -103,5 +103,4 @@ parser.add_argument("--no-hashing", action='store_true', help="disable sha256 ha parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) # token merging / tomesd -parser.add_argument("--token-merging", action='store_true', help="Provides generation speedup by merging redundant tokens. (compatible with --xformers)", default=False) -parser.add_argument("--token-merging-ratio", type=float, help="Adjusts ratio of merged to untouched tokens. Range: (0.0-1.0], Defaults to 0.5", default=0.5) +parser.add_argument("--token-merging", action='store_true', help="Provides speed and memory improvements by merging redundant tokens. This has a more pronounced effect on higher resolutions.", default=False) diff --git a/modules/processing.py b/modules/processing.py index 55735572..670a7a28 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -501,26 +501,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging and not opts.token_merging_hr_only: - print("applying token merging to all passes") - tomesd.apply_patch( - p.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: + print("\nApplying token merging\n") + sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) res = process_images_inner(p) finally: # undo model optimizations made by tomesd - if opts.token_merging: - print('removing token merging model optimizations') + if opts.token_merging or cmd_opts.token_merging: + print('\nRemoving token merging model optimizations\n') tomesd.remove_patch(p.sd_model) # restore opts to original state @@ -959,20 +949,16 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): devices.torch_gc() # apply token merging optimizations from tomesd for high-res pass - # check if hr_only so we don't redundantly apply patch - if opts.token_merging and opts.token_merging_hr_only: - print("applying token merging for high-res pass") - tomesd.apply_patch( - self.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + # check if hr_only so we are not redundantly patching + if (cmd_opts.token_merging or opts.token_merging) and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): + # case where user wants to use separate merge ratios + if not opts.token_merging_hr_only: + # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) + print('Temporarily reverting token merging optimizations in preparation for next pass') + tomesd.remove_patch(self.sd_model) + + print("\nApplying token merging for high-res pass\n") + sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) diff --git a/modules/sd_models.py b/modules/sd_models.py index 87c49b83..696a2333 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -16,6 +16,7 @@ from modules import paths, shared, modelloader, devices, script_callbacks, sd_va from modules.paths import models_path from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer +import tomesd model_dir = "Stable-diffusion" model_path = os.path.abspath(os.path.join(paths.models_path, model_dir)) @@ -545,4 +546,30 @@ def unload_model_weights(sd_model=None, info=None): print(f"Unloaded weights {timer.summary()}.") - return sd_model \ No newline at end of file + return sd_model + + +def apply_token_merging(sd_model, hr: bool): + """ + Applies speed and memory optimizations from tomesd. + + Args: + hr (bool): True if called in the context of a high-res pass + """ + + ratio = shared.opts.token_merging_ratio + if hr: + ratio = shared.opts.token_merging_ratio_hr + print("effective hr pass merge ratio is "+str(ratio)) + + tomesd.apply_patch( + sd_model, + ratio=ratio, + max_downsample=shared.opts.token_merging_maximum_down_sampling, + sx=shared.opts.token_merging_stride_x, + sy=shared.opts.token_merging_stride_y, + use_rand=shared.opts.token_merging_random, + merge_attn=shared.opts.token_merging_merge_attention, + merge_crossattn=shared.opts.token_merging_merge_cross_attention, + merge_mlp=shared.opts.token_merging_merge_mlp + ) diff --git a/modules/shared.py b/modules/shared.py index d7379e24..c7572e98 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -429,7 +429,7 @@ options_templates.update(options_section((None, "Hidden options"), { options_templates.update(options_section(('token_merging', 'Token Merging'), { "token_merging": OptionInfo( - False, "Enable redundant token merging via tomesd. (currently incompatible with controlnet extension)", + 0.5, "Enable redundant token merging via tomesd. This can provide significant speed and memory improvements.", gr.Checkbox ), "token_merging_ratio": OptionInfo( @@ -440,6 +440,10 @@ options_templates.update(options_section(('token_merging', 'Token Merging'), { True, "Apply only to high-res fix pass. Disabling can yield a ~20-35% speedup on contemporary resolutions.", gr.Checkbox ), + "token_merging_ratio_hr": OptionInfo( + 0.5, "Merging Ratio (high-res pass) - If 'Apply only to high-res' is enabled, this will always be the ratio used.", + gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} + ), # More advanced/niche settings: "token_merging_random": OptionInfo( True, "Use random perturbations - Disabling might help with certain samplers", -- cgit v1.2.3 From b3593d0997bfdcca7f8aa01663e81720db50e494 Mon Sep 17 00:00:00 2001 From: For Sure Date: Thu, 6 Apr 2023 19:42:26 +0300 Subject: Add support for saving init images in img2img --- modules/processing.py | 8 ++++++++ modules/shared.py | 3 +++ 2 files changed, 11 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..5556afc5 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -3,6 +3,7 @@ import math import os import sys import warnings +import hashlib import torch import numpy as np @@ -476,6 +477,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Init image hash": getattr(p, 'init_img_hash', None) } generation_params.update(p.extra_generation_params) @@ -1007,6 +1009,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.color_corrections = [] imgs = [] for img in self.init_images: + + # Save init image + if opts.save_init_img: + self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest() + images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False) + image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..69c2b21e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -39,6 +39,7 @@ restricted_opts = { "outdir_grids", "outdir_txt2img_grids", "outdir_save", + "outdir_init_images" } ui_reorder_categories = [ @@ -253,6 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), + "save_init_img": OptionInfo(True, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), @@ -268,6 +270,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), { "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), })) options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { -- cgit v1.2.3 From d609f6030ec464b371a899ced366c62bbd9a4a91 Mon Sep 17 00:00:00 2001 From: gk Date: Fri, 7 Apr 2023 21:04:46 +0900 Subject: Add [batch_number] and [generation_number] filename patterns --- modules/images.py | 6 ++++++ modules/processing.py | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/images.py b/modules/images.py index b3535070..5c0cf1d8 100644 --- a/modules/images.py +++ b/modules/images.py @@ -352,6 +352,8 @@ class FilenameGenerator: 'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_words': lambda self: self.prompt_words(), + 'batch_number': lambda self: self.p.batch_index + 1, + 'generation_number': lambda self: self.p.iteration * self.p.batch_size + self.p.batch_index + 1, } default_time_format = '%Y%m%d%H%M%S' @@ -403,6 +405,10 @@ class FilenameGenerator: for m in re_pattern.finditer(x): text, pattern = m.groups() + + if pattern is not None and (pattern.lower() == 'batch_number' and self.p.batch_size == 1 or pattern.lower() == 'generation_number' and self.p.n_iter == 1 and self.p.batch_size == 1): + continue + res += text if pattern is None: diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..0e6a60ba 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -670,6 +670,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) for i, x_sample in enumerate(x_samples_ddim): + p.batch_index = i + x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) @@ -718,7 +720,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.return_mask: output_images.append(image_mask) - + if opts.return_mask_composite: output_images.append(image_mask_composite) -- cgit v1.2.3 From 1c1106260300ca3956d9619875e28278b148adab Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Mon, 10 Apr 2023 03:37:15 -0500 Subject: add token merging options to infotext when necessary. Bump tomesd version --- modules/generation_parameters_copypaste.py | 37 ++++++++++++++++++++++++++++++ modules/processing.py | 22 ++++++++++++++---- modules/shared.py | 2 +- requirements_versions.txt | 2 +- 4 files changed, 57 insertions(+), 6 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 6df76858..a7ede534 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -282,6 +282,32 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-1"] = 0 res["Hires resize-2"] = 0 + # Infer additional override settings for token merging + print("inferring settings for tomesd") + token_merging_ratio = res.get("Token merging ratio", None) + token_merging_ratio_hr = res.get("Token merging ratio hr", None) + + if token_merging_ratio is not None or token_merging_ratio_hr is not None: + res["Token merging"] = 'True' + + if token_merging_ratio is None: + res["Token merging hr only"] = 'True' + else: + res["Token merging hr only"] = 'False' + + if res.get("Token merging random", None) is None: + res["Token merging random"] = 'False' + if res.get("Token merging merge attention", None) is None: + res["Token merging merge attention"] = 'True' + if res.get("Token merging merge cross attention", None) is None: + res["Token merging merge cross attention"] = 'False' + if res.get("Token merging merge mlp", None) is None: + res["Token merging merge mlp"] = 'False' + if res.get("Token merging stride x", None) is None: + res["Token merging stride x"] = '2' + if res.get("Token merging stride y", None) is None: + res["Token merging stride y"] = '2' + restore_old_hires_fix_params(res) return res @@ -304,6 +330,17 @@ infotext_to_setting_name_mapping = [ ('UniPC skip type', 'uni_pc_skip_type'), ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), + ('Token merging', 'token_merging'), + ('Token merging ratio', 'token_merging_ratio'), + ('Token merging hr only', 'token_merging_hr_only'), + ('Token merging ratio hr', 'token_merging_ratio_hr'), + ('Token merging random', 'token_merging_random'), + ('Token merging merge attention', 'token_merging_merge_attention'), + ('Token merging merge cross attention', 'token_merging_merge_cross_attention'), + ('Token merging merge mlp', 'token_merging_merge_mlp'), + ('Token merging maximum downsampling', 'token_merging_maximum_downsampling'), + ('Token merging stride x', 'token_merging_stride_x'), + ('Token merging stride y', 'token_merging_stride_y') ] diff --git a/modules/processing.py b/modules/processing.py index 670a7a28..95058d0b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -31,6 +31,12 @@ from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType import tomesd +# add a logger for the processing module +logger = logging.getLogger(__name__) +# manually set output level here since there is no option to do so yet through launch options +# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') + + # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 opt_f = 8 @@ -477,6 +483,14 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Token merging ratio": None if not (opts.token_merging or cmd_opts.token_merging) or opts.token_merging_hr_only else opts.token_merging_ratio, + "Token merging ratio hr": None if not (opts.token_merging or cmd_opts.token_merging) else opts.token_merging_ratio_hr, + "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, + "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, + "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, + "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, + "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, + "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y } generation_params.update(p.extra_generation_params) @@ -502,16 +516,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: sd_vae.reload_vae_weights() if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: - print("\nApplying token merging\n") sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) + logger.debug('Token merging applied') res = process_images_inner(p) finally: # undo model optimizations made by tomesd if opts.token_merging or cmd_opts.token_merging: - print('\nRemoving token merging model optimizations\n') tomesd.remove_patch(p.sd_model) + logger.debug('Token merging model optimizations removed') # restore opts to original state if p.override_settings_restore_afterwards: @@ -954,11 +968,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # case where user wants to use separate merge ratios if not opts.token_merging_hr_only: # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) - print('Temporarily reverting token merging optimizations in preparation for next pass') tomesd.remove_patch(self.sd_model) + logger.debug('Temporarily removed token merging optimizations in preparation for next pass') - print("\nApplying token merging for high-res pass\n") sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) + logger.debug('Applied token merging for high-res pass') samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) diff --git a/modules/shared.py b/modules/shared.py index 568acdc4..d9db7317 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -446,7 +446,7 @@ options_templates.update(options_section(('token_merging', 'Token Merging'), { ), # More advanced/niche settings: "token_merging_random": OptionInfo( - True, "Use random perturbations - Disabling might help with certain samplers", + False, "Use random perturbations - Can improve outputs for certain samplers. For others, it may cause visaul artifacting.", gr.Checkbox ), "token_merging_merge_attention": OptionInfo( diff --git a/requirements_versions.txt b/requirements_versions.txt index 03522715..f972f975 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -28,4 +28,4 @@ torchsde==0.2.5 safetensors==0.3.0 httpcore<=0.15 fastapi==0.94.0 -tomesd>=0.1.1 \ No newline at end of file +tomesd>=0.1.2 \ No newline at end of file -- cgit v1.2.3 From 3af152d488db0c521f6058676e1a65c7157ccc14 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:17:14 -0400 Subject: Fix image mask composite for weird resolutions --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..f49992d9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -708,7 +708,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: image_mask = p.mask_for_overlay.convert('RGB') - image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA') + image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') if opts.save_mask: images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") -- cgit v1.2.3 From fbab3fc6d122fb4e6648dd82291a70fc348c0b4a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:24:55 -0400 Subject: Only handle image mask if any option enabled --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index f49992d9..5c6edc60 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -706,7 +706,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image.info["parameters"] = text output_images.append(image) - if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: + if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): image_mask = p.mask_for_overlay.convert('RGB') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') -- cgit v1.2.3 From 56f8a6b081e3dbfc93956e5c44e507e74a2ba040 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 16 Apr 2023 20:34:52 -0400 Subject: Fix sampler schedules with step multiplier --- modules/processing.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..f6514637 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -639,8 +639,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) - uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) - c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) + try: + step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 + except: + step_multiplier = 1 + uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) + c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: -- cgit v1.2.3 From 4d0c8163036b42dd66aad922c22d3b3d19217bd9 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 16 Apr 2023 20:39:45 -0400 Subject: Modify step multiplier flow --- modules/processing.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index f6514637..15ad629f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -639,10 +639,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) - try: - step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except: - step_multiplier = 1 + step_multiplier = 1 + if shared.opts.fix_second_order_samplers_schedule: + try: + step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 + except: + pass uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) -- cgit v1.2.3 From 9de7298898951d6038eeadee78f7be3b09d2cfc3 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 16 Apr 2023 21:06:37 -0400 Subject: Update processing.py --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 15ad629f..2f391ee4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -640,7 +640,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: file.write(processed.infotext(p, 0)) step_multiplier = 1 - if shared.opts.fix_second_order_samplers_schedule: + if not shared.opts.dont_fix_second_order_samplers_schedule: try: step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 except: -- cgit v1.2.3 From 5fe0dd79beaa5ef737ff85254ee9870f60ae9464 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 11:29:37 +0300 Subject: rename CPU RNG to RNG source in settings, add infotext and parameters copypaste support to RNG source --- modules/devices.py | 4 ++-- modules/generation_parameters_copypaste.py | 5 +++++ modules/processing.py | 3 ++- modules/sd_samplers_common.py | 3 ++- modules/sd_samplers_kdiffusion.py | 2 +- modules/shared.py | 2 +- 6 files changed, 13 insertions(+), 6 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/devices.py b/modules/devices.py index 3bc86a6a..c705a3cb 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -95,7 +95,7 @@ def randn(seed, shape): from modules.shared import opts torch.manual_seed(seed) - if opts.use_cpu_randn or device.type == 'mps': + if opts.randn_source == "CPU" or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) return torch.randn(shape, device=device) @@ -103,7 +103,7 @@ def randn(seed, shape): def randn_without_seed(shape): from modules.shared import opts - if opts.use_cpu_randn or device.type == 'mps': + if opts.randn_source == "CPU" or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) return torch.randn(shape, device=device) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 6df76858..e7269363 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -284,6 +284,10 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model restore_old_hires_fix_params(res) + # Missing RNG means the default was set, which is GPU RNG + if "RNG" not in res: + res["RNG"] = "GPU" + return res @@ -304,6 +308,7 @@ infotext_to_setting_name_mapping = [ ('UniPC skip type', 'uni_pc_skip_type'), ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), + ('RNG', 'randn_source'), ] diff --git a/modules/processing.py b/modules/processing.py index 5556afc5..7bac154d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -477,7 +477,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Init image hash": getattr(p, 'init_img_hash', None) + "Init image hash": getattr(p, 'init_img_hash', None), + "RNG": (opts.randn_source if opts.randn_source != "GPU" else None) } generation_params.update(p.extra_generation_params) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index e6a372d5..bc074238 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -61,7 +61,8 @@ def store_latent(decoded): class InterruptedException(BaseException): pass -if opts.use_cpu_randn: + +if opts.randn_source == "CPU": import torchsde._brownian.brownian_interval def torchsde_randn(size, dtype, device, seed): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 13f4567a..a547d1b5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -190,7 +190,7 @@ class TorchHijack: if noise.shape == x.shape: return noise - if opts.use_cpu_randn or x.device.type == 'mps': + if opts.randn_source == "CPU" or x.device.type == 'mps': return torch.randn_like(x, device=devices.cpu).to(x.device) else: return torch.randn_like(x) diff --git a/modules/shared.py b/modules/shared.py index b5b401fe..73704889 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -334,7 +334,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "use_cpu_randn": OptionInfo(False, "Use CPU for random number generation to make manual seeds generate the same image across platforms. This may change existing seeds."), + "randn_source": OptionInfo("GPU", "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", gr.Radio, {"choices": ["GPU", "CPU"]}), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.3 From 1d11e896984c883f6a0debb3abaef945595cbc70 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 15:57:09 +0300 Subject: rework Negative Guidance minimum sigma to work with AND, add infotext and copypaste parameters support --- javascript/hints.js | 3 ++- modules/generation_parameters_copypaste.py | 1 + modules/processing.py | 3 ++- modules/sd_samplers_kdiffusion.py | 43 +++++++++++++++++------------- 4 files changed, 30 insertions(+), 20 deletions(-) (limited to 'modules/processing.py') diff --git a/javascript/hints.js b/javascript/hints.js index c6bae360..44d418da 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -111,7 +111,8 @@ titles = { "Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.", "Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.", "Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.", - "Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited." + "Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited.", + "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction." } diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index e7269363..99f1a0d3 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -309,6 +309,7 @@ infotext_to_setting_name_mapping = [ ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), ('RNG', 'randn_source'), + ('NGMS', 's_min_uncond'), ] diff --git a/modules/processing.py b/modules/processing.py index 04a06290..c50784f4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -480,7 +480,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, "Init image hash": getattr(p, 'init_img_hash', None), - "RNG": (opts.randn_source if opts.randn_source != "GPU" else None) + "RNG": opts.randn_source if opts.randn_source != "GPU" else None, + "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, } generation_params.update(p.extra_generation_params) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d42d5fcf..f8aaac59 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -115,20 +115,21 @@ class CFGDenoiser(torch.nn.Module): sigma_in = denoiser_params.sigma tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond + skip_uncond = False - if self.step % 2 and s_min_uncond > 0 and not is_edit_model: - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - sigma_threshold = s_min_uncond - if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_threshold*sigma_threshold) ): - uncond = torch.zeros([0,0,uncond.shape[2]]) - x_in=x_in[:x_in.shape[0]//2] - sigma_in=sigma_in[:sigma_in.shape[0]//2] + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] - if tensor.shape[1] == uncond.shape[1]: - if not is_edit_model: - cond_in = torch.cat([tensor, uncond]) - else: + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: cond_in = torch.cat([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = torch.cat([tensor, uncond]) if shared.batch_cond_uncond: x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in)) @@ -152,9 +153,15 @@ class CFGDenoiser(torch.nn.Module): x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - if uncond.shape[0]: + if not skip_uncond: x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + if skip_uncond: + #x_out = torch.cat([x_out, x_out[0:batch_size]]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + denoised_image_indexes = [x[0][0] for x in conds_list] + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) @@ -165,13 +172,12 @@ class CFGDenoiser(torch.nn.Module): elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - if not is_edit_model: - if uncond.shape[0]: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - else: - denoised = x_out - else: + if is_edit_model: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) if self.mask is not None: denoised = self.init_latent * self.mask + self.nmask * denoised @@ -221,6 +227,7 @@ class KDiffusionSampler: self.eta = None self.config = None self.last_latent = None + self.s_min_uncond = None self.conditioning_key = sd_model.model.conditioning_key -- cgit v1.2.3 From 7428fb5176ccfd203bddcfa30d75c8df5a772cb4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 16:28:51 +0300 Subject: add is_hr_pass field for processing --- modules/processing.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index c50784f4..8d7b2462 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -164,6 +164,7 @@ class StableDiffusionProcessing: self.all_seeds = None self.all_subseeds = None self.iteration = 0 + self.is_hr_pass = False @property @@ -883,6 +884,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not self.enable_hr: return samples + self.is_hr_pass = True + target_width = self.hr_upscale_to_x target_height = self.hr_upscale_to_y @@ -952,6 +955,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + self.is_hr_pass = False + return samples -- cgit v1.2.3 From 0d1ef296b9f7c4e78060ce167bb784246fa09de1 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 1 May 2023 05:22:53 +0900 Subject: checkpoint override enhancement --- modules/processing.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index a48fff99..e8808beb 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -498,6 +498,11 @@ def process_images(p: StableDiffusionProcessing) -> Processed: stored_opts = {k: opts.data[k] for k in p.override_settings.keys()} try: + # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint + if sd_models.checkpoint_alisases.get(p.override_settings.get('sd_model_checkpoint')) is None: + p.override_settings.pop('sd_model_checkpoint', None) + sd_models.reload_model_weights() + for k, v in p.override_settings.items(): setattr(opts, k, v) @@ -514,8 +519,6 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if p.override_settings_restore_afterwards: for k, v in stored_opts.items(): setattr(opts, k, v) - if k == 'sd_model_checkpoint': - sd_models.reload_model_weights() if k == 'sd_vae': sd_vae.reload_vae_weights() -- cgit v1.2.3 From e960781511eb175943be09b314ac2be46b6fc684 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Wed, 3 May 2023 13:12:43 -0500 Subject: fix maximum downsampling option --- modules/generation_parameters_copypaste.py | 4 +++- modules/processing.py | 1 + modules/shared.py | 5 +---- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 34c1b860..83382e93 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -306,6 +306,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Token merging stride x"] = '2' if res.get("Token merging stride y", None) is None: res["Token merging stride y"] = '2' + if res.get("Token merging maximum down sampling", None) is None: + res["Token merging maximum down sampling"] = '1' restore_old_hires_fix_params(res) @@ -341,7 +343,7 @@ infotext_to_setting_name_mapping = [ ('Token merging merge attention', 'token_merging_merge_attention'), ('Token merging merge cross attention', 'token_merging_merge_cross_attention'), ('Token merging merge mlp', 'token_merging_merge_mlp'), - ('Token merging maximum downsampling', 'token_merging_maximum_downsampling'), + ('Token merging maximum down sampling', 'token_merging_maximum_down_sampling'), ('Token merging stride x', 'token_merging_stride_x'), ('Token merging stride y', 'token_merging_stride_y'), ('RNG', 'randn_source'), diff --git a/modules/processing.py b/modules/processing.py index d5d1da5a..6807a301 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -495,6 +495,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y, + "Token merging maximum down sampling": None if opts.token_merging_maximum_down_sampling == 1 else opts.token_merging_maximum_down_sampling, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, diff --git a/modules/shared.py b/modules/shared.py index 7b81ffc9..a7a72dd5 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -488,10 +488,7 @@ options_templates.update(options_section(('token_merging', 'Token Merging'), { False, "Merge mlp", gr.Checkbox ), - "token_merging_maximum_down_sampling": OptionInfo( - 1, "Maximum down sampling", - gr.Dropdown, lambda: {"choices": ["1", "2", "4", "8"]} - ), + "token_merging_maximum_down_sampling": OptionInfo(1, "Maximum down sampling", gr.Radio, lambda: {"choices": ['1', '2', '4', '8']}), "token_merging_stride_x": OptionInfo( 2, "Stride - X", gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} -- cgit v1.2.3 From ab4ab4e595e89d1a9a39db70539d5944fdbe47fa Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:23:49 +0300 Subject: add version to infotext, footer and console output when starting --- launch.py | 17 +++++++++++++++++ modules/processing.py | 11 +++++++++++ modules/shared.py | 1 + modules/ui.py | 6 +++--- 4 files changed, 32 insertions(+), 3 deletions(-) (limited to 'modules/processing.py') diff --git a/launch.py b/launch.py index 1dc12dae..2a33adc8 100644 --- a/launch.py +++ b/launch.py @@ -19,6 +19,7 @@ python = sys.executable git = os.environ.get('GIT', "git") index_url = os.environ.get('INDEX_URL', "") stored_commit_hash = None +stored_git_tag = None dir_repos = "repositories" if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: @@ -70,6 +71,20 @@ def commit_hash(): return stored_commit_hash +def git_tag(): + global stored_git_tag + + if stored_git_tag is not None: + return stored_git_tag + + try: + stored_git_tag = run(f"{git} describe --tags").strip() + except Exception: + stored_git_tag = "" + + return stored_git_tag + + def run(command, desc=None, errdesc=None, custom_env=None, live=False): if desc is not None: print(desc) @@ -246,8 +261,10 @@ def prepare_environment(): check_python_version() commit = commit_hash() + tag = git_tag() print(f"Python {sys.version}") + print(f"Version: {tag}") print(f"Commit hash: {commit}") if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): diff --git a/modules/processing.py b/modules/processing.py index e8808beb..e786791a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -458,6 +458,16 @@ def fix_seed(p): p.subseed = get_fixed_seed(p.subseed) +def program_version(): + import launch + + res = launch.git_tag() + if res == "": + res = None + + return res + + def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): index = position_in_batch + iteration * p.batch_size @@ -483,6 +493,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + "Version": program_version() if opts.add_version_to_infotext else None, } generation_params.update(p.extra_generation_params) diff --git a/modules/shared.py b/modules/shared.py index dd374713..f40faa79 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -394,6 +394,7 @@ options_templates.update(options_section(('ui', "User interface"), { "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), diff --git a/modules/ui.py b/modules/ui.py index 16c46515..b2916e9c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1923,7 +1923,7 @@ def versions_html(): python_version = ".".join([str(x) for x in sys.version_info[0:3]]) commit = launch.commit_hash() - short_commit = commit[0:8] + tag = launch.git_tag() if shared.xformers_available: import xformers @@ -1932,6 +1932,8 @@ def versions_html(): xformers_version = "N/A" return f""" +version: {tag} + •  python: {python_version}  •  torch: {getattr(torch, '__long_version__',torch.__version__)} @@ -1940,7 +1942,5 @@ xformers: {xformers_version}  •  gradio: {gr.__version__}  •  -commit: {short_commit} - •  checkpoint: N/A """ -- cgit v1.2.3 From 3ba6c3c83c0983a025c7bddc08bb7f49481b3cbb Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 9 May 2023 22:17:58 +0300 Subject: Fix up string formatting/concatenation to f-strings where feasible --- modules/api/api.py | 22 ++++++------ modules/call_queue.py | 5 +-- modules/esrgan_model.py | 11 +++--- modules/esrgan_model_arch.py | 16 ++++----- modules/extra_networks_hypernet.py | 3 +- modules/generation_parameters_copypaste.py | 4 +-- modules/hashes.py | 4 +-- modules/images.py | 8 ++--- modules/interrogate.py | 4 +-- modules/models/diffusion/ddpm_edit.py | 4 +-- modules/models/diffusion/uni_pc/uni_pc.py | 4 +-- modules/ngrok.py | 4 +-- modules/paths.py | 2 +- modules/processing.py | 13 ++++++-- modules/progress.py | 3 +- modules/realesrgan_model.py | 8 ++--- modules/scripts.py | 5 +-- modules/sd_hijack_clip_old.py | 3 +- modules/sd_hijack_unet.py | 2 +- modules/sd_models.py | 4 +-- modules/sd_models_config.py | 2 +- modules/sd_samplers_kdiffusion.py | 2 +- modules/sd_vae.py | 2 +- modules/styles.py | 2 +- modules/textual_inversion/autocrop.py | 6 ++-- modules/textual_inversion/dataset.py | 2 +- modules/textual_inversion/preprocess.py | 6 ++-- modules/textual_inversion/textual_inversion.py | 12 +++---- modules/ui.py | 46 +++++++++++++------------- modules/ui_extensions.py | 3 +- modules/ui_extra_networks.py | 4 ++- scripts/custom_code.py | 2 +- scripts/loopback.py | 2 +- scripts/xyz_grid.py | 2 +- 34 files changed, 121 insertions(+), 101 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/api/api.py b/modules/api/api.py index cdbdce32..9bb95dfd 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -570,20 +570,20 @@ class Api: filename = create_embedding(**args) # create empty embedding sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used shared.state.end() - return CreateResponse(info = "create embedding filename: {filename}".format(filename = filename)) + return CreateResponse(info=f"create embedding filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info = "create embedding error: {error}".format(error = e)) + return TrainResponse(info=f"create embedding error: {e}") def create_hypernetwork(self, args: dict): try: shared.state.begin() filename = create_hypernetwork(**args) # create empty embedding shared.state.end() - return CreateResponse(info = "create hypernetwork filename: {filename}".format(filename = filename)) + return CreateResponse(info=f"create hypernetwork filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info = "create hypernetwork error: {error}".format(error = e)) + return TrainResponse(info=f"create hypernetwork error: {e}") def preprocess(self, args: dict): try: @@ -593,13 +593,13 @@ class Api: return PreprocessResponse(info = 'preprocess complete') except KeyError as e: shared.state.end() - return PreprocessResponse(info = "preprocess error: invalid token: {error}".format(error = e)) + return PreprocessResponse(info=f"preprocess error: invalid token: {e}") except AssertionError as e: shared.state.end() - return PreprocessResponse(info = "preprocess error: {error}".format(error = e)) + return PreprocessResponse(info=f"preprocess error: {e}") except FileNotFoundError as e: shared.state.end() - return PreprocessResponse(info = 'preprocess error: {error}'.format(error = e)) + return PreprocessResponse(info=f'preprocess error: {e}') def train_embedding(self, args: dict): try: @@ -617,10 +617,10 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info = "train embedding complete: filename: {filename} error: {error}".format(filename = filename, error = error)) + return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError as msg: shared.state.end() - return TrainResponse(info = "train embedding error: {msg}".format(msg = msg)) + return TrainResponse(info=f"train embedding error: {msg}") def train_hypernetwork(self, args: dict): try: @@ -641,10 +641,10 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info="train embedding complete: filename: {filename} error: {error}".format(filename=filename, error=error)) + return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError as msg: shared.state.end() - return TrainResponse(info="train embedding error: {error}".format(error=error)) + return TrainResponse(info=f"train embedding error: {error}") def get_memory(self): try: diff --git a/modules/call_queue.py b/modules/call_queue.py index 1829f3a6..447bb764 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -60,7 +60,7 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): max_debug_str_len = 131072 # (1024*1024)/8 print("Error completing request", file=sys.stderr) - argStr = f"Arguments: {str(args)} {str(kwargs)}" + argStr = f"Arguments: {args} {kwargs}" print(argStr[:max_debug_str_len], file=sys.stderr) if len(argStr) > max_debug_str_len: print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr) @@ -73,7 +73,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): if extra_outputs_array is None: extra_outputs_array = [None, ''] - res = extra_outputs_array + [f"
{html.escape(type(e).__name__+': '+str(e))}
"] + error_message = f'{type(e).__name__}: {e}' + res = extra_outputs_array + [f"
{html.escape(error_message)}
"] shared.state.skipped = False shared.state.interrupted = False diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 9a9c38f1..f4369257 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -156,13 +156,16 @@ class UpscalerESRGAN(Upscaler): def load_model(self, path: str): if "http" in path: - filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, - file_name="%s.pth" % self.model_name, - progress=True) + filename = load_file_from_url( + url=self.model_url, + model_dir=self.model_path, + file_name=f"{self.model_name}.pth", + progress=True, + ) else: filename = path if not os.path.exists(filename) or filename is None: - print("Unable to load %s from %s" % (self.model_path, filename)) + print(f"Unable to load {self.model_path} from {filename}") return None state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None) diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 1b52b0f5..6071fea7 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -38,7 +38,7 @@ class RRDBNet(nn.Module): elif upsample_mode == 'pixelshuffle': upsample_block = pixelshuffle_block else: - raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode)) + raise NotImplementedError(f'upsample mode [{upsample_mode}] is not found') if upscale == 3: upsampler = upsample_block(nf, nf, 3, act_type=act_type, convtype=convtype) else: @@ -261,10 +261,10 @@ class Upsample(nn.Module): def extra_repr(self): if self.scale_factor is not None: - info = 'scale_factor=' + str(self.scale_factor) + info = f'scale_factor={self.scale_factor}' else: - info = 'size=' + str(self.size) - info += ', mode=' + self.mode + info = f'size={self.size}' + info += f', mode={self.mode}' return info @@ -350,7 +350,7 @@ def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0): elif act_type == 'sigmoid': # [0, 1] range output layer = nn.Sigmoid() else: - raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type)) + raise NotImplementedError(f'activation layer [{act_type}] is not found') return layer @@ -372,7 +372,7 @@ def norm(norm_type, nc): elif norm_type == 'none': def norm_layer(x): return Identity() else: - raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type)) + raise NotImplementedError(f'normalization layer [{norm_type}] is not found') return layer @@ -388,7 +388,7 @@ def pad(pad_type, padding): elif pad_type == 'zero': layer = nn.ZeroPad2d(padding) else: - raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type)) + raise NotImplementedError(f'padding layer [{pad_type}] is not implemented') return layer @@ -432,7 +432,7 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias= pad_type='zero', norm_type=None, act_type='relu', mode='CNA', convtype='Conv2D', spectral_norm=False): """ Conv layer with padding, normalization, activation """ - assert mode in ['CNA', 'NAC', 'CNAC'], 'Wrong conv mode [{:s}]'.format(mode) + assert mode in ['CNA', 'NAC', 'CNAC'], f'Wrong conv mode [{mode}]' padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py index 33d100dd..04f27c9f 100644 --- a/modules/extra_networks_hypernet.py +++ b/modules/extra_networks_hypernet.py @@ -10,7 +10,8 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork): additional = shared.opts.sd_hypernetwork if additional != "None" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0: - p.all_prompts = [x + f"" for x in p.all_prompts] + hypernet_prompt_text = f"" + p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts] params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) names = [] diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 78248ed2..fe8b18b2 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -269,8 +269,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model v = v[1:-1] if v[0] == '"' and v[-1] == '"' else v m = re_imagesize.match(v) if m is not None: - res[k+"-1"] = m.group(1) - res[k+"-2"] = m.group(2) + res[f"{k}-1"] = m.group(1) + res[f"{k}-2"] = m.group(2) else: res[k] = v diff --git a/modules/hashes.py b/modules/hashes.py index 83272a07..032120f4 100644 --- a/modules/hashes.py +++ b/modules/hashes.py @@ -13,7 +13,7 @@ cache_data = None def dump_cache(): - with filelock.FileLock(cache_filename+".lock"): + with filelock.FileLock(f"{cache_filename}.lock"): with open(cache_filename, "w", encoding="utf8") as file: json.dump(cache_data, file, indent=4) @@ -22,7 +22,7 @@ def cache(subsection): global cache_data if cache_data is None: - with filelock.FileLock(cache_filename+".lock"): + with filelock.FileLock(f"{cache_filename}.lock"): if not os.path.isfile(cache_filename): cache_data = {} else: diff --git a/modules/images.py b/modules/images.py index 6ceb7c7c..a41965ab 100644 --- a/modules/images.py +++ b/modules/images.py @@ -467,7 +467,7 @@ def get_next_sequence_number(path, basename): """ result = -1 if basename != '': - basename = basename + "-" + basename = f"{basename}-" prefix_length = len(basename) for p in os.listdir(path): @@ -536,7 +536,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i add_number = opts.save_images_add_number or file_decoration == '' if file_decoration != "" and add_number: - file_decoration = "-" + file_decoration + file_decoration = f"-{file_decoration}" file_decoration = namegen.apply(file_decoration) + suffix @@ -566,7 +566,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i def _atomically_save_image(image_to_save, filename_without_extension, extension): # save image with .tmp extension to avoid race condition when another process detects new image in the directory - temp_file_path = filename_without_extension + ".tmp" + temp_file_path = f"{filename_without_extension}.tmp" image_format = Image.registered_extensions()[extension] if extension.lower() == '.png': @@ -626,7 +626,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if opts.save_txt and info is not None: txt_fullfn = f"{fullfn_without_extension}.txt" with open(txt_fullfn, "w", encoding="utf8") as file: - file.write(info + "\n") + file.write(f"{info}\n") else: txt_fullfn = None diff --git a/modules/interrogate.py b/modules/interrogate.py index e1665708..9f7d657f 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -28,7 +28,7 @@ def category_types(): def download_default_clip_interrogate_categories(content_dir): print("Downloading CLIP categories...") - tmpdir = content_dir + "_tmp" + tmpdir = f"{content_dir}_tmp" category_types = ["artists", "flavors", "mediums", "movements"] try: @@ -214,7 +214,7 @@ class InterrogateModels: if shared.opts.interrogate_return_ranks: res += f", ({match}:{score/100:.3f})" else: - res += ", " + match + res += f", {match}" except Exception: print("Error interrogating", file=sys.stderr) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index f3d49c44..f880bc3c 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -223,7 +223,7 @@ class DDPM(pl.LightningModule): for k in keys: for ik in ignore_keys: if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) + print(f"Deleting key {k} from state_dict.") del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) @@ -386,7 +386,7 @@ class DDPM(pl.LightningModule): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + loss_dict_ema = {f"{key}_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index eb5f4e76..11b330bc 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -94,7 +94,7 @@ class NoiseScheduleVP: """ if schedule not in ['discrete', 'linear', 'cosine']: - raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) + raise ValueError(f"Unsupported noise schedule {schedule}. The schedule needs to be 'discrete' or 'linear' or 'cosine'") self.schedule = schedule if schedule == 'discrete': @@ -469,7 +469,7 @@ class UniPC: t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) return t else: - raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + raise ValueError(f"Unsupported skip_type {skip_type}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'") def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): """ diff --git a/modules/ngrok.py b/modules/ngrok.py index 1ad7989b..7a7b4b26 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -7,8 +7,8 @@ def connect(token, port, region): else: if ':' in token: # token = authtoken:username:password - account = token.split(':')[1] + ':' + token.split(':')[-1] - token = token.split(':')[0] + token, username, password = token.split(':', 2) + account = f"{username}:{password}" config = conf.PyngrokConfig( auth_token=token, region=region diff --git a/modules/paths.py b/modules/paths.py index 0e1e00e7..acf1894b 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -16,7 +16,7 @@ for possible_sd_path in possible_sd_paths: sd_path = os.path.abspath(possible_sd_path) break -assert sd_path is not None, "Couldn't find Stable Diffusion in any of: " + str(possible_sd_paths) +assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possible_sd_paths}" path_dirs = [ (sd_path, 'ldm', 'Stable Diffusion', []), diff --git a/modules/processing.py b/modules/processing.py index e786791a..1a76e552 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -500,7 +500,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) - negative_prompt_text = "\nNegative prompt: " + p.all_negative_prompts[index] if p.all_negative_prompts[index] else "" + negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() @@ -780,7 +780,16 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts) + res = Processed( + p, + images_list=output_images, + seed=p.all_seeds[0], + info=infotext(), + comments="".join(f"\n\n{comment}" for comment in comments), + subseed=p.all_subseeds[0], + index_of_first_image=index_of_first_image, + infotexts=infotexts, + ) if p.scripts is not None: p.scripts.postprocess(p, res) diff --git a/modules/progress.py b/modules/progress.py index 5655346b..948e6f00 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -96,7 +96,8 @@ def progressapi(req: ProgressRequest): if image is not None: buffered = io.BytesIO() image.save(buffered, format="png") - live_preview = 'data:image/png;base64,' + base64.b64encode(buffered.getvalue()).decode("ascii") + base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') + live_preview = f"data:image/png;base64,{base64_image}" id_live_preview = shared.state.id_live_preview else: live_preview = None diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index d6079433..efd7fca5 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -28,9 +28,9 @@ class UpscalerRealESRGAN(Upscaler): for scaler in scalers: if scaler.local_data_path.startswith("http"): filename = modelloader.friendly_name(scaler.local_data_path) - local = next(iter([local_model for local_model in local_model_paths if local_model.endswith(filename + '.pth')]), None) - if local: - scaler.local_data_path = local + local_model_candidates = [local_model for local_model in local_model_paths if local_model.endswith(f"{filename}.pth")] + if local_model_candidates: + scaler.local_data_path = local_model_candidates[0] if scaler.name in opts.realesrgan_enabled_models: self.scalers.append(scaler) @@ -47,7 +47,7 @@ class UpscalerRealESRGAN(Upscaler): info = self.load_model(path) if not os.path.exists(info.local_data_path): - print("Unable to load RealESRGAN model: %s" % info.name) + print(f"Unable to load RealESRGAN model: {info.name}") return img upsampler = RealESRGANer( diff --git a/modules/scripts.py b/modules/scripts.py index 4d0bbd66..d945b89f 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -163,7 +163,8 @@ class Script: """helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id""" need_tabname = self.show(True) == self.show(False) - tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else "" + tabkind = 'img2img' if self.is_img2img else 'txt2txt' + tabname = f"{tabkind}_" if need_tabname else "" title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower())) return f'script_{tabname}{title}_{item_id}' @@ -526,7 +527,7 @@ def add_classes_to_gradio_component(comp): this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others """ - comp.elem_classes = ["gradio-" + comp.get_block_name(), *(comp.elem_classes or [])] + comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] if getattr(comp, 'multiselect', False): comp.elem_classes.append('multiselect') diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py index 6d9fbbe6..a3476e95 100644 --- a/modules/sd_hijack_clip_old.py +++ b/modules/sd_hijack_clip_old.py @@ -75,7 +75,8 @@ def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, text self.hijack.comments += hijack_comments if len(used_custom_terms) > 0: - self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) + embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms) + self.hijack.comments.append(f"Used embeddings: {embedding_names}") self.hijack.fixes = hijack_fixes return self.process_tokens(remade_batch_tokens, batch_multipliers) diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 15858263..ca1daf45 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -18,7 +18,7 @@ class TorchHijackForUnet: if hasattr(torch, item): return getattr(torch, item) - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") def cat(self, tensors, *args, **kwargs): if len(tensors) == 2: diff --git a/modules/sd_models.py b/modules/sd_models.py index 59adc7cc..36f643e1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -47,7 +47,7 @@ class CheckpointInfo: self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] self.hash = model_hash(filename) - self.sha256 = hashes.sha256_from_cache(self.filename, "checkpoint/" + name) + self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name}") self.shorthash = self.sha256[0:10] if self.sha256 else None self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]' @@ -69,7 +69,7 @@ class CheckpointInfo: checkpoint_alisases[id] = self def calculate_shorthash(self): - self.sha256 = hashes.sha256(self.filename, "checkpoint/" + self.name) + self.sha256 = hashes.sha256(self.filename, f"checkpoint/{self.name}") if self.sha256 is None: return diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 9398f528..7a79925a 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -111,7 +111,7 @@ def find_checkpoint_config_near_filename(info): if info is None: return None - config = os.path.splitext(info.filename)[0] + ".yaml" + config = f"{os.path.splitext(info.filename)[0]}.yaml" if os.path.exists(config): return config diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index eb98e599..0fc9f456 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -198,7 +198,7 @@ class TorchHijack: if hasattr(torch, item): return getattr(torch, item) - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") def randn_like(self, x): if self.sampler_noises: diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 9b00f76e..521e485a 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -89,7 +89,7 @@ def refresh_vae_list(): def find_vae_near_checkpoint(checkpoint_file): checkpoint_path = os.path.splitext(checkpoint_file)[0] - for vae_location in [checkpoint_path + ".vae.pt", checkpoint_path + ".vae.ckpt", checkpoint_path + ".vae.safetensors"]: + for vae_location in [f"{checkpoint_path}.vae.pt", f"{checkpoint_path}.vae.ckpt", f"{checkpoint_path}.vae.safetensors"]: if os.path.isfile(vae_location): return vae_location diff --git a/modules/styles.py b/modules/styles.py index 9ed85991..11642075 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -74,7 +74,7 @@ class StyleDatabase: def save_styles(self, path: str) -> None: # Always keep a backup file around if os.path.exists(path): - shutil.copy(path, path + ".bak") + shutil.copy(path, f"{path}.bak") fd = os.open(path, os.O_RDWR|os.O_CREAT) with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 68e1103c..ba1bdcd4 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -111,7 +111,7 @@ def focal_point(im, settings): if corner_centroid is not None: color = BLUE box = corner_centroid.bounding(max_size * corner_centroid.weight) - d.text((box[0], box[1]-15), "Edge: %.02f" % corner_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(corner_points) > 1: for f in corner_points: @@ -119,7 +119,7 @@ def focal_point(im, settings): if entropy_centroid is not None: color = "#ff0" box = entropy_centroid.bounding(max_size * entropy_centroid.weight) - d.text((box[0], box[1]-15), "Entropy: %.02f" % entropy_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(entropy_points) > 1: for f in entropy_points: @@ -127,7 +127,7 @@ def focal_point(im, settings): if face_centroid is not None: color = RED box = face_centroid.bounding(max_size * face_centroid.weight) - d.text((box[0], box[1]-15), "Face: %.02f" % face_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(face_points) > 1: for f in face_points: diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index af9fbcf2..41610e03 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -72,7 +72,7 @@ class PersonalizedBase(Dataset): except Exception: continue - text_filename = os.path.splitext(path)[0] + ".txt" + text_filename = f"{os.path.splitext(path)[0]}.txt" filename = os.path.basename(path) if os.path.exists(text_filename): diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 4a29151d..da0bcb26 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -63,9 +63,9 @@ def save_pic_with_caption(image, index, params: PreprocessParams, existing_capti image.save(os.path.join(params.dstdir, f"{basename}.png")) if params.preprocess_txt_action == 'prepend' and existing_caption: - caption = existing_caption + ' ' + caption + caption = f"{existing_caption} {caption}" elif params.preprocess_txt_action == 'append' and existing_caption: - caption = caption + ' ' + existing_caption + caption = f"{caption} {existing_caption}" elif params.preprocess_txt_action == 'copy' and existing_caption: caption = existing_caption @@ -174,7 +174,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre params.src = filename existing_caption = None - existing_caption_filename = os.path.splitext(filename)[0] + '.txt' + existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt" if os.path.exists(existing_caption_filename): with open(existing_caption_filename, 'r', encoding="utf8") as file: existing_caption = file.read() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 379df243..4368eb63 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -69,7 +69,7 @@ class Embedding: 'hash': self.checksum(), 'optimizer_state_dict': self.optimizer_state_dict, } - torch.save(optimizer_saved_dict, filename + '.optim') + torch.save(optimizer_saved_dict, f"{filename}.optim") def checksum(self): if self.cached_checksum is not None: @@ -437,8 +437,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0) if shared.opts.save_optimizer_state: optimizer_state_dict = None - if os.path.exists(filename + '.optim'): - optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu') + if os.path.exists(f"{filename}.optim"): + optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu') if embedding.checksum() == optimizer_saved_dict.get('hash', None): optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) @@ -599,7 +599,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st data = torch.load(last_saved_file) info.add_text("sd-ti-embedding", embedding_to_b64(data)) - title = "<{}>".format(data.get('name', '???')) + title = f"<{data.get('name', '???')}>" try: vectorSize = list(data['string_to_param'].values())[0].shape[0] @@ -608,8 +608,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st checkpoint = sd_models.select_checkpoint() footer_left = checkpoint.model_name - footer_mid = '[{}]'.format(checkpoint.shorthash) - footer_right = '{}v {}s'.format(vectorSize, steps_done) + footer_mid = f'[{checkpoint.shorthash}]' + footer_right = f'{vectorSize}v {steps_done}s' captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) captioned_image = insert_image_data_embed(captioned_image, data) diff --git a/modules/ui.py b/modules/ui.py index 34b2aaff..d02f6e82 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -101,7 +101,7 @@ def visit(x, func, path=""): for c in x.children: visit(c, func, path) elif x.label is not None: - func(path + "/" + str(x.label), x) + func(f"{path}/{x.label}", x) def add_style(name: str, prompt: str, negative_prompt: str): @@ -166,7 +166,7 @@ def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_di img = Image.open(image) filename = os.path.basename(image) left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, left + ".txt"), 'a')) + print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a')) return [gr.update(), None] @@ -182,29 +182,29 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): - with FormRow(elem_id=target_interface + '_seed_row', variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') + with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=target_interface + '_random_seed', label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_seed', label='Reuse seed') + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') + reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False) + seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) # Components to show/hide based on the 'Extra' checkbox seed_extras = [] - with FormRow(visible=False, elem_id=target_interface + '_subseed_row') as seed_extra_row_1: + with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=target_interface + '_random_subseed') - reuse_subseed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_subseed') - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength') + random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") + reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") with FormRow(visible=False) as seed_extra_row_2: seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w') - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h') + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed]) random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed]) @@ -765,7 +765,7 @@ def create_ui(): ) button.click( fn=lambda: None, - _js="switch_to_"+name.replace(" ", "_"), + _js=f"switch_to_{name.replace(' ', '_')}", inputs=[], outputs=[], ) @@ -1462,18 +1462,18 @@ def create_ui(): elif t == bool: comp = gr.Checkbox else: - raise Exception(f'bad options item type: {str(t)} for key {key}') + raise Exception(f'bad options item type: {t} for key {key}') - elem_id = "setting_"+key + elem_id = f"setting_{key}" if info.refresh is not None: if is_quicksettings: res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) - create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) + create_refresh_button(res, info.refresh, info.component_args, f"refresh_{key}") else: with FormRow(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) - create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) + create_refresh_button(res, info.refresh, info.component_args, f"refresh_{key}") else: res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) @@ -1545,7 +1545,7 @@ def create_ui(): current_tab.__exit__() gr.Group() - current_tab = gr.TabItem(elem_id="settings_{}".format(elem_id), label=text) + current_tab = gr.TabItem(elem_id=f"settings_{elem_id}", label=text) current_tab.__enter__() current_row = gr.Column(variant='compact') current_row.__enter__() @@ -1664,7 +1664,7 @@ def create_ui(): for interface, label, ifid in interfaces: if label in shared.opts.hidden_tabs: continue - with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid): + with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): interface.render() if os.path.exists(os.path.join(script_path, "notification.mp3")): @@ -1771,10 +1771,10 @@ def create_ui(): def loadsave(path, x): def apply_field(obj, field, condition=None, init_field=None): - key = path + "/" + field + key = f"{path}/{field}" if getattr(obj, 'custom_script_source', None) is not None: - key = 'customscript/' + obj.custom_script_source + '/' + key + key = f"customscript/{obj.custom_script_source}/{key}" if getattr(obj, 'do_not_save_to_config', False): return diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 99ac8756..d9faf85a 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -61,7 +61,8 @@ def save_config_state(name): if not name: name = "Config" current_config_state["name"] = name - filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + "_" + name + ".json") + timestamp = datetime.now().strftime('%Y_%m_%d-%H_%M_%S') + filename = os.path.join(config_states_dir, f"{timestamp}_{name}.json") print(f"Saving backup of webui/extension state to {filename}.") with open(filename, "w", encoding="utf-8") as f: json.dump(current_config_state, f) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 86c05a55..8c3dea56 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -69,7 +69,9 @@ class ExtraNetworksPage: pass def link_preview(self, filename): - return "./sd_extra_networks/thumb?filename=" + urllib.parse.quote(filename.replace('\\', '/')) + "&mtime=" + str(os.path.getmtime(filename)) + quoted_filename = urllib.parse.quote(filename.replace('\\', '/')) + mtime = os.path.getmtime(filename) + return f"./sd_extra_networks/thumb?filename={quoted_filename}&mtime={mtime}" def search_terms_from_path(self, filename, possible_directories=None): abspath = os.path.abspath(filename) diff --git a/scripts/custom_code.py b/scripts/custom_code.py index 4071d86d..f36a3675 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -77,7 +77,7 @@ return process_images(p) module.display = display indent = " " * indent_level - indented = code.replace('\n', '\n' + indent) + indented = code.replace('\n', f"\n{indent}") body = f"""def __webuitemp__(): {indent}{indented} __webuitemp__()""" diff --git a/scripts/loopback.py b/scripts/loopback.py index d3065fe6..ad6609be 100644 --- a/scripts/loopback.py +++ b/scripts/loopback.py @@ -84,7 +84,7 @@ class Script(scripts.Script): p.color_corrections = initial_color_corrections if append_interrogation != "None": - p.prompt = original_prompt + ", " if original_prompt != "" else "" + p.prompt = f"{original_prompt}, " if original_prompt else "" if append_interrogation == "CLIP": p.prompt += shared.interrogator.interrogate(p.init_images[0]) elif append_interrogation == "DeepBooru": diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 01d97791..a725d74a 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -439,7 +439,7 @@ class Script(scripts.Script): z_type.change(fn=select_axis, inputs=[z_type,z_values_dropdown], outputs=[fill_z_button,z_values,z_values_dropdown]) def get_dropdown_update_from_params(axis,params): - val_key = axis + " Values" + val_key = f"{axis} Values" vals = params.get(val_key,"") valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] return gr.update(value = valslist) -- cgit v1.2.3 From 96d6ca4199e7c5eee8d451618de5161cea317c40 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:25:25 +0300 Subject: manual fixes for ruff --- extensions-builtin/LDSR/ldsr_model_arch.py | 2 +- extensions-builtin/LDSR/scripts/ldsr_model.py | 3 +- extensions-builtin/LDSR/sd_hijack_autoencoder.py | 10 +- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 26 ++--- extensions-builtin/ScuNET/scunet_model_arch.py | 9 +- extensions-builtin/SwinIR/scripts/swinir_model.py | 2 +- modules/api/api.py | 129 +++++++++++----------- modules/api/models.py | 5 +- modules/codeformer/codeformer_arch.py | 2 +- modules/esrgan_model_arch.py | 2 + modules/extra_networks_hypernet.py | 2 +- modules/images.py | 4 +- modules/img2img.py | 1 - modules/interrogate.py | 1 - modules/modelloader.py | 6 +- modules/models/diffusion/ddpm_edit.py | 26 ++--- modules/models/diffusion/uni_pc/sampler.py | 3 +- modules/processing.py | 2 +- modules/prompt_parser.py | 11 +- modules/textual_inversion/autocrop.py | 2 +- modules/ui.py | 8 +- modules/upscaler.py | 2 +- 22 files changed, 129 insertions(+), 129 deletions(-) (limited to 'modules/processing.py') diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index 2339de7f..a5fb8907 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -243,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True) log["sample_noquant"] = x_sample_noquant log["sample_diff"] = torch.abs(x_sample_noquant - x_sample) - except: + except Exception: pass log["sample"] = x_sample diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index da19cff1..e8dc083c 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -7,7 +7,8 @@ from basicsr.utils.download_util import load_file_from_url from modules.upscaler import Upscaler, UpscalerData from ldsr_model_arch import LDSR from modules import shared, script_callbacks -import sd_hijack_autoencoder, sd_hijack_ddpm_v1 +import sd_hijack_autoencoder +import sd_hijack_ddpm_v1 class UpscalerLDSR(Upscaler): diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py index db2231dd..6303fed5 100644 --- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py @@ -1,16 +1,21 @@ # The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo # The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo # As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder - +import numpy as np import torch import pytorch_lightning as pl import torch.nn.functional as F from contextlib import contextmanager + +from torch.optim.lr_scheduler import LambdaLR + +from ldm.modules.ema import LitEma from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from ldm.modules.diffusionmodules.model import Encoder, Decoder from ldm.util import instantiate_from_config import ldm.models.autoencoder +from packaging import version class VQModel(pl.LightningModule): def __init__(self, @@ -249,7 +254,8 @@ class VQModel(pl.LightningModule): if plot_ema: with self.ema_scope(): xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) + if x.shape[1] > 3: + xrec_ema = self.to_rgb(xrec_ema) log["reconstructions_ema"] = xrec_ema return log diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 5c0488e5..4d3f6c56 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -450,7 +450,7 @@ class LatentDiffusionV1(DDPMV1): self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: + except Exception: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor @@ -877,16 +877,6 @@ class LatentDiffusionV1(DDPMV1): c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): @@ -1157,8 +1147,10 @@ class LatentDiffusionV1(DDPMV1): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) return img, intermediates @torch.no_grad() @@ -1205,8 +1197,10 @@ class LatentDiffusionV1(DDPMV1): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) if return_intermediates: return img, intermediates @@ -1322,7 +1316,7 @@ class LatentDiffusionV1(DDPMV1): if inpaint: # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] + h, w = z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py index 43ca8d36..8028918a 100644 --- a/extensions-builtin/ScuNET/scunet_model_arch.py +++ b/extensions-builtin/ScuNET/scunet_model_arch.py @@ -61,7 +61,9 @@ class WMSA(nn.Module): Returns: output: tensor shape [b h w c] """ - if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) + if self.type != 'W': + x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) + x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size) h_windows = x.size(1) w_windows = x.size(2) @@ -85,8 +87,9 @@ class WMSA(nn.Module): output = self.linear(output) output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size) - if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), - dims=(1, 2)) + if self.type != 'W': + output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2)) + return output def relative_embedding(self): diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index e8783bca..d77c3a92 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -45,7 +45,7 @@ class UpscalerSwinIR(Upscaler): img = upscale(img, model) try: torch.cuda.empty_cache() - except: + except Exception: pass return img diff --git a/modules/api/api.py b/modules/api/api.py index d47c39fc..f52d371b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -15,7 +15,8 @@ from secrets import compare_digest import modules.shared as shared from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing -from modules.api.models import * +from modules.api import models +from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.textual_inversion.preprocess import preprocess @@ -25,20 +26,21 @@ from modules.sd_models import checkpoints_list, unload_model_weights, reload_mod from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices -from typing import List +from typing import Dict, List, Any import piexif import piexif.helper + def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) - except: - raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}") + except Exception: + raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") def script_name_to_index(name, scripts): try: return [script.title().lower() for script in scripts].index(name.lower()) - except: + except Exception: raise HTTPException(status_code=422, detail=f"Script '{name}' not found") def validate_sampler_name(name): @@ -99,7 +101,7 @@ def api_middleware(app: FastAPI): import starlette # importing just so it can be placed on silent list from rich.console import Console console = Console() - except: + except Exception: import traceback rich_available = False @@ -166,36 +168,36 @@ class Api: self.app = app self.queue_lock = queue_lock api_middleware(self.app) - self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) - self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) - self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) - self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) - self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) - self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) + self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse) + self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse) + self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse) + self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse) + self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse) + self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse) self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"]) self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"]) - self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel) + self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel) self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) - self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel) - self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem]) - self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem]) - self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem]) - self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem]) - self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem]) - self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem]) - self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) - self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse) + self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel) + self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem]) + self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem]) + self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem]) + self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem]) + self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem]) + self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem]) + self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem]) + self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) - self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse) - self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse) - self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse) - self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) - self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) - self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse) + self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse) + self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse) + self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse) + self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse) + self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse) + self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse) self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) - self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList) + self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] @@ -224,7 +226,7 @@ class Api: t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles] i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles] - return ScriptsList(txt2img = t2ilist, img2img = i2ilist) + return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist) def get_script(self, script_name, script_runner): if script_name is None or script_name == "": @@ -276,7 +278,7 @@ class Api: script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] return script_args - def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): + def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI): script_runner = scripts.scripts_txt2img if not script_runner.scripts: script_runner.initialize_scripts(False) @@ -320,9 +322,9 @@ class Api: b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] - return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) + return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) - def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): + def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI): init_images = img2imgreq.init_images if init_images is None: raise HTTPException(status_code=404, detail="Init image not found") @@ -381,9 +383,9 @@ class Api: img2imgreq.init_images = None img2imgreq.mask = None - return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) + return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) - def extras_single_image_api(self, req: ExtrasSingleImageRequest): + def extras_single_image_api(self, req: models.ExtrasSingleImageRequest): reqDict = setUpscalers(req) reqDict['image'] = decode_base64_to_image(reqDict['image']) @@ -391,9 +393,9 @@ class Api: with self.queue_lock: result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict) - return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) + return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) - def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): + def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest): reqDict = setUpscalers(req) image_list = reqDict.pop('imageList', []) @@ -402,15 +404,15 @@ class Api: with self.queue_lock: result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict) - return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) + return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) - def pnginfoapi(self, req: PNGInfoRequest): + def pnginfoapi(self, req: models.PNGInfoRequest): if(not req.image.strip()): - return PNGInfoResponse(info="") + return models.PNGInfoResponse(info="") image = decode_base64_to_image(req.image.strip()) if image is None: - return PNGInfoResponse(info="") + return models.PNGInfoResponse(info="") geninfo, items = images.read_info_from_image(image) if geninfo is None: @@ -418,13 +420,13 @@ class Api: items = {**{'parameters': geninfo}, **items} - return PNGInfoResponse(info=geninfo, items=items) + return models.PNGInfoResponse(info=geninfo, items=items) - def progressapi(self, req: ProgressRequest = Depends()): + def progressapi(self, req: models.ProgressRequest = Depends()): # copy from check_progress_call of ui.py if shared.state.job_count == 0: - return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) + return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) # avoid dividing zero progress = 0.01 @@ -446,9 +448,9 @@ class Api: if shared.state.current_image and not req.skip_current_image: current_image = encode_pil_to_base64(shared.state.current_image) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) + return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) - def interrogateapi(self, interrogatereq: InterrogateRequest): + def interrogateapi(self, interrogatereq: models.InterrogateRequest): image_b64 = interrogatereq.image if image_b64 is None: raise HTTPException(status_code=404, detail="Image not found") @@ -465,7 +467,7 @@ class Api: else: raise HTTPException(status_code=404, detail="Model not found") - return InterrogateResponse(caption=processed) + return models.InterrogateResponse(caption=processed) def interruptapi(self): shared.state.interrupt() @@ -570,36 +572,36 @@ class Api: filename = create_embedding(**args) # create empty embedding sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used shared.state.end() - return CreateResponse(info=f"create embedding filename: {filename}") + return models.CreateResponse(info=f"create embedding filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info=f"create embedding error: {e}") + return models.TrainResponse(info=f"create embedding error: {e}") def create_hypernetwork(self, args: dict): try: shared.state.begin() filename = create_hypernetwork(**args) # create empty embedding shared.state.end() - return CreateResponse(info=f"create hypernetwork filename: {filename}") + return models.CreateResponse(info=f"create hypernetwork filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info=f"create hypernetwork error: {e}") + return models.TrainResponse(info=f"create hypernetwork error: {e}") def preprocess(self, args: dict): try: shared.state.begin() preprocess(**args) # quick operation unless blip/booru interrogation is enabled shared.state.end() - return PreprocessResponse(info = 'preprocess complete') + return models.PreprocessResponse(info = 'preprocess complete') except KeyError as e: shared.state.end() - return PreprocessResponse(info=f"preprocess error: invalid token: {e}") + return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}") except AssertionError as e: shared.state.end() - return PreprocessResponse(info=f"preprocess error: {e}") + return models.PreprocessResponse(info=f"preprocess error: {e}") except FileNotFoundError as e: shared.state.end() - return PreprocessResponse(info=f'preprocess error: {e}') + return models.PreprocessResponse(info=f'preprocess error: {e}') def train_embedding(self, args: dict): try: @@ -617,10 +619,10 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") + return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError as msg: shared.state.end() - return TrainResponse(info=f"train embedding error: {msg}") + return models.TrainResponse(info=f"train embedding error: {msg}") def train_hypernetwork(self, args: dict): try: @@ -641,14 +643,15 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") + return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError: shared.state.end() - return TrainResponse(info=f"train embedding error: {error}") + return models.TrainResponse(info=f"train embedding error: {error}") def get_memory(self): try: - import os, psutil + import os + import psutil process = psutil.Process(os.getpid()) res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe @@ -675,10 +678,10 @@ class Api: 'events': warnings, } else: - cuda = { 'error': 'unavailable' } + cuda = {'error': 'unavailable'} except Exception as err: - cuda = { 'error': f'{err}' } - return MemoryResponse(ram = ram, cuda = cuda) + cuda = {'error': f'{err}'} + return models.MemoryResponse(ram=ram, cuda=cuda) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 4a70f440..4d291076 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -223,8 +223,9 @@ for key in _options: if(_options[key].dest != 'help'): flag = _options[key] _type = str - if _options[key].default is not None: _type = type(_options[key].default) - flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))}) + if _options[key].default is not None: + _type = type(_options[key].default) + flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))}) FlagsModel = create_model("Flags", **flags) diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index 11dcc3ee..f1a7cf09 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -7,7 +7,7 @@ from torch import nn, Tensor import torch.nn.functional as F from typing import Optional, List -from modules.codeformer.vqgan_arch import * +from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 6071fea7..7f8bc7c0 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -438,9 +438,11 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias= padding = padding if pad_type == 'zero' else 0 if convtype=='PartialConv2D': + from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) elif convtype=='DeformConv2D': + from torchvision.ops import DeformConv2d # not tested c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) elif convtype=='Conv3D': diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py index 04f27c9f..aa2a14ef 100644 --- a/modules/extra_networks_hypernet.py +++ b/modules/extra_networks_hypernet.py @@ -1,4 +1,4 @@ -from modules import extra_networks, shared, extra_networks +from modules import extra_networks, shared from modules.hypernetworks import hypernetwork diff --git a/modules/images.py b/modules/images.py index 3d5d76cc..5eb6d855 100644 --- a/modules/images.py +++ b/modules/images.py @@ -472,9 +472,9 @@ def get_next_sequence_number(path, basename): prefix_length = len(basename) for p in os.listdir(path): if p.startswith(basename): - l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element) + parts = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element) try: - result = max(int(l[0]), result) + result = max(int(parts[0]), result) except ValueError: pass diff --git a/modules/img2img.py b/modules/img2img.py index cdae301a..32b1ecd6 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -13,7 +13,6 @@ from modules.shared import opts, state import modules.shared as shared import modules.processing as processing from modules.ui import plaintext_to_html -import modules.images as images import modules.scripts diff --git a/modules/interrogate.py b/modules/interrogate.py index 9f7d657f..22df9216 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -11,7 +11,6 @@ import torch.hub from torchvision import transforms from torchvision.transforms.functional import InterpolationMode -import modules.shared as shared from modules import devices, paths, shared, lowvram, modelloader, errors blip_image_eval_size = 384 diff --git a/modules/modelloader.py b/modules/modelloader.py index cb85ac4f..cf685000 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -108,12 +108,12 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): print(f"Moving {file} from {src_path} to {dest_path}.") try: shutil.move(fullpath, dest_path) - except: + except Exception: pass if len(os.listdir(src_path)) == 0: print(f"Removing empty folder: {src_path}") shutil.rmtree(src_path, True) - except: + except Exception: pass @@ -141,7 +141,7 @@ def load_upscalers(): full_model = f"modules.{model_name}_model" try: importlib.import_module(full_model) - except: + except Exception: pass datas = [] diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index f880bc3c..611c2b69 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -479,7 +479,7 @@ class LatentDiffusion(DDPM): self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: + except Exception: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor @@ -891,16 +891,6 @@ class LatentDiffusion(DDPM): c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): @@ -1171,8 +1161,10 @@ class LatentDiffusion(DDPM): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) return img, intermediates @torch.no_grad() @@ -1219,8 +1211,10 @@ class LatentDiffusion(DDPM): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) if return_intermediates: return img, intermediates @@ -1337,7 +1331,7 @@ class LatentDiffusion(DDPM): if inpaint: # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] + h, w = z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. diff --git a/modules/models/diffusion/uni_pc/sampler.py b/modules/models/diffusion/uni_pc/sampler.py index a241c8a7..0a9defa1 100644 --- a/modules/models/diffusion/uni_pc/sampler.py +++ b/modules/models/diffusion/uni_pc/sampler.py @@ -54,7 +54,8 @@ class UniPCSampler(object): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] + while isinstance(ctmp, list): + ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") diff --git a/modules/processing.py b/modules/processing.py index 1a76e552..6f5233c1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -664,7 +664,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not shared.opts.dont_fix_second_order_samplers_schedule: try: step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except: + except Exception: pass uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index e084e948..3a720721 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -54,18 +54,21 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): """ def collect_steps(steps, tree): - l = [steps] + res = [steps] + class CollectSteps(lark.Visitor): def scheduled(self, tree): tree.children[-1] = float(tree.children[-1]) if tree.children[-1] < 1: tree.children[-1] *= steps tree.children[-1] = min(steps, int(tree.children[-1])) - l.append(tree.children[-1]) + res.append(tree.children[-1]) + def alternate(self, tree): - l.extend(range(1, steps+1)) + res.extend(range(1, steps+1)) + CollectSteps().visit(tree) - return sorted(set(l)) + return sorted(set(res)) def at_step(step, tree): class AtStep(lark.Transformer): diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index ba1bdcd4..d7d8d2e3 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -185,7 +185,7 @@ def image_face_points(im, settings): try: faces = classifier.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) - except: + except Exception: continue if len(faces) > 0: diff --git a/modules/ui.py b/modules/ui.py index 2171f3aa..6beda76f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1,15 +1,9 @@ -import html import json -import math import mimetypes import os -import platform -import random import sys -import tempfile -import time import traceback -from functools import partial, reduce +from functools import reduce import warnings import gradio as gr diff --git a/modules/upscaler.py b/modules/upscaler.py index e2eaa730..0ad4fe99 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -45,7 +45,7 @@ class Upscaler: try: import cv2 self.can_tile = True - except: + except Exception: pass @abstractmethod -- cgit v1.2.3 From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- extensions-builtin/Lora/lora.py | 1 - extensions-builtin/ScuNET/scripts/scunet_model.py | 1 - extensions-builtin/SwinIR/scripts/swinir_model.py | 3 +-- modules/codeformer/codeformer_arch.py | 4 +--- modules/codeformer/vqgan_arch.py | 2 -- modules/codeformer_model.py | 4 +--- modules/config_states.py | 2 +- modules/esrgan_model.py | 2 +- modules/esrgan_model_arch.py | 1 - modules/extensions.py | 1 - modules/generation_parameters_copypaste.py | 4 ---- modules/hypernetworks/hypernetwork.py | 3 +-- modules/hypernetworks/ui.py | 2 -- modules/images.py | 2 +- modules/img2img.py | 5 +---- modules/mac_specific.py | 1 - modules/modelloader.py | 1 - modules/models/diffusion/uni_pc/uni_pc.py | 1 - modules/processing.py | 5 ++--- modules/sd_hijack.py | 2 +- modules/sd_hijack_inpainting.py | 6 ------ modules/sd_hijack_ip2p.py | 5 +---- modules/sd_hijack_xlmr.py | 2 -- modules/sd_models.py | 2 +- modules/sd_models_config.py | 1 - modules/sd_samplers_kdiffusion.py | 1 - modules/sd_vae.py | 3 --- modules/shared.py | 3 --- modules/styles.py | 9 --------- modules/textual_inversion/autocrop.py | 4 +--- modules/textual_inversion/image_embedding.py | 2 +- modules/textual_inversion/preprocess.py | 4 ---- modules/textual_inversion/textual_inversion.py | 1 - modules/txt2img.py | 9 +++------ modules/ui.py | 5 ++--- modules/ui_extra_networks.py | 1 - modules/ui_postprocessing.py | 2 +- modules/upscaler.py | 2 -- modules/xlmr.py | 2 +- pyproject.toml | 11 +++++++---- scripts/custom_code.py | 2 +- scripts/outpainting_mk_2.py | 4 ++-- scripts/poor_mans_outpainting.py | 4 ++-- scripts/prompt_matrix.py | 7 ++----- scripts/prompts_from_file.py | 5 +---- scripts/sd_upscale.py | 4 ++-- scripts/xyz_grid.py | 6 ++---- webui.py | 2 +- 48 files changed, 42 insertions(+), 114 deletions(-) (limited to 'modules/processing.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index ba1293df..0ab43229 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -1,4 +1,3 @@ -import glob import os import re import torch diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index c7fd5739..aa2fdb3a 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -13,7 +13,6 @@ import modules.upscaler from modules import devices, modelloader from scunet_model_arch import SCUNet as net from modules.shared import opts -from modules import images class UpscalerScuNET(modules.upscaler.Upscaler): diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index d77c3a92..55dd94ab 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -1,4 +1,3 @@ -import contextlib import os import numpy as np @@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url from tqdm import tqdm from modules import modelloader, devices, script_callbacks, shared -from modules.shared import cmd_opts, opts, state +from modules.shared import opts, state from swinir_model_arch import SwinIR as net from swinir_model_arch_v2 import Swin2SR as net2 from modules.upscaler import Upscaler, UpscalerData diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index f1a7cf09..00c407de 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -1,14 +1,12 @@ # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py import math -import numpy as np import torch from torch import nn, Tensor import torch.nn.functional as F -from typing import Optional, List +from typing import Optional from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock -from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY def calc_mean_std(feat, eps=1e-5): diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index e7293683..820e6b12 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py ''' -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import copy from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8d84bbc9..8e56cb89 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -33,11 +33,9 @@ def setup_model(dirname): try: from torchvision.transforms.functional import normalize from modules.codeformer.codeformer_arch import CodeFormer - from basicsr.utils.download_util import load_file_from_url - from basicsr.utils import imwrite, img2tensor, tensor2img + from basicsr.utils import img2tensor, tensor2img from facelib.utils.face_restoration_helper import FaceRestoreHelper from facelib.detection.retinaface import retinaface - from modules.shared import cmd_opts net_class = CodeFormer diff --git a/modules/config_states.py b/modules/config_states.py index 2ea00929..8f1ff428 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -14,7 +14,7 @@ from collections import OrderedDict import git from modules import shared, extensions -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir +from modules.paths_internal import script_path, config_states_dir all_config_states = OrderedDict() diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index f4369257..85aa6934 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -6,7 +6,7 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url import modules.esrgan_model_arch as arch -from modules import shared, modelloader, images, devices +from modules import modelloader, images, devices from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 7f8bc7c0..4de9dd8d 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -2,7 +2,6 @@ from collections import OrderedDict import math -import functools import torch import torch.nn as nn import torch.nn.functional as F diff --git a/modules/extensions.py b/modules/extensions.py index 34d9d654..829f8cd9 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,7 +3,6 @@ import sys import traceback import time -from datetime import datetime import git from modules import shared diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fe8b18b2..f1c59c46 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,15 +1,11 @@ import base64 -import html import io -import math import os import re -from pathlib import Path import gradio as gr from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks -import tempfile from PIL import Image re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 1fc49537..9fe749b7 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -1,4 +1,3 @@ -import csv import datetime import glob import html @@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ -from collections import defaultdict, deque +from collections import deque from statistics import stdev, mean diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index 76599f5a..be168736 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -1,6 +1,4 @@ import html -import os -import re import gradio as gr import modules.hypernetworks.hypernetwork diff --git a/modules/images.py b/modules/images.py index 5eb6d855..7392cb8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -19,7 +19,7 @@ import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors -from modules.shared import opts, cmd_opts +from modules.shared import opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) diff --git a/modules/img2img.py b/modules/img2img.py index 32b1ecd6..d704bf90 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,12 +1,9 @@ -import math import os -import sys -import traceback import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError -from modules import devices, sd_samplers +from modules import sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 40ce2101..5c2f92a1 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,6 +1,5 @@ import torch import platform -from modules import paths from modules.sd_hijack_utils import CondFunc from packaging import version diff --git a/modules/modelloader.py b/modules/modelloader.py index cf685000..92ada694 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -1,4 +1,3 @@ -import glob import os import shutil import importlib diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index 11b330bc..a4c4ef4e 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -1,5 +1,4 @@ import torch -import torch.nn.functional as F import math from tqdm.auto import trange diff --git a/modules/processing.py b/modules/processing.py index 6f5233c1..c3932d6b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -2,7 +2,6 @@ import json import math import os import sys -import warnings import hashlib import torch @@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d8135211..81573b78 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -3,7 +3,7 @@ from torch.nn.functional import silu from types import MethodType import modules.textual_inversion.textual_inversion -from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint +from modules import devices, sd_hijack_optimizations, shared from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 55a2ce4d..344d75c8 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -1,15 +1,9 @@ -import os import torch -from einops import repeat -from omegaconf import ListConfig - import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 41ed54a2..6fe6b6ff 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -1,8 +1,5 @@ -import collections import os.path -import sys -import gc -import time + def should_hijack_ip2p(checkpoint_info): from modules import sd_models_config diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py index 4ac51c38..28528329 100644 --- a/modules/sd_hijack_xlmr.py +++ b/modules/sd_hijack_xlmr.py @@ -1,8 +1,6 @@ -import open_clip.tokenizer import torch from modules import sd_hijack_clip, devices -from modules.shared import opts class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): diff --git a/modules/sd_models.py b/modules/sd_models.py index 11c1a344..1c09c709 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -565,7 +565,7 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - from modules import lowvram, devices, sd_hijack + from modules import devices, sd_hijack timer = Timer() if model_data.sd_model: diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 7a79925a..9bfe1237 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -1,4 +1,3 @@ -import re import os import torch diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0fc9f456..3b8e9622 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,7 +1,6 @@ from collections import deque import torch import inspect -import einops import k_diffusion.sampling from modules import prompt_parser, devices, sd_samplers_common diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 521e485a..b7176125 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,8 +1,5 @@ -import torch -import safetensors.torch import os import collections -from collections import namedtuple from modules import paths, shared, devices, script_callbacks, sd_models import glob from copy import deepcopy diff --git a/modules/shared.py b/modules/shared.py index 4631965b..44cd2c0c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,12 +1,9 @@ -import argparse import datetime import json import os import sys import time -import requests -from PIL import Image import gradio as gr import tqdm diff --git a/modules/styles.py b/modules/styles.py index 11642075..c22769cf 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,18 +1,9 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - import csv import os import os.path import typing -import collections.abc as abc -import tempfile import shutil -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - class PromptStyle(typing.NamedTuple): name: str diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index d7d8d2e3..7770d22f 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -1,10 +1,8 @@ import cv2 import requests import os -from collections import defaultdict -from math import log, sqrt import numpy as np -from PIL import Image, ImageDraw +from PIL import ImageDraw GREEN = "#0F0" BLUE = "#00F" diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index 5593f88c..ee0e850a 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -2,7 +2,7 @@ import base64 import json import numpy as np import zlib -from PIL import Image, PngImagePlugin, ImageDraw, ImageFont +from PIL import Image, ImageDraw, ImageFont from fonts.ttf import Roboto import torch from modules.shared import opts diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index da0bcb26..d0cad09e 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,13 +1,9 @@ import os from PIL import Image, ImageOps import math -import platform -import sys import tqdm -import time from modules import paths, shared, images, deepbooru -from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f753b75f..9ed9ba45 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,7 +1,6 @@ import os import sys import traceback -import inspect from collections import namedtuple import torch diff --git a/modules/txt2img.py b/modules/txt2img.py index 16841d0f..f022381c 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,18 +1,15 @@ import modules.scripts -from modules import sd_samplers +from modules import sd_samplers, processing from modules.generation_parameters_copypaste import create_override_settings_dict -from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \ - StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, cmd_opts import modules.shared as shared -import modules.processing as processing from modules.ui import plaintext_to_html def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) - p = StableDiffusionProcessingTxt2Img( + p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, @@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step processed = modules.scripts.scripts_txt2img.run(p, *args) if processed is None: - processed = process_images(p) + processed = processing.process_images(p) p.close() diff --git a/modules/ui.py b/modules/ui.py index 6beda76f..f7e57593 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,10 +14,10 @@ from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress -from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path -from modules.shared import opts, cmd_opts, restricted_opts +from modules.shared import opts, cmd_opts import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste @@ -28,7 +28,6 @@ import modules.shared as shared import modules.styles import modules.textual_inversion.ui from modules import prompt_parser -from modules.images import save_image from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img from modules.textual_inversion import textual_inversion diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 49e06289..800e467a 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,4 +1,3 @@ -import glob import os.path import urllib.parse from pathlib import Path diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index f25639e5..c7dc1154 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -1,5 +1,5 @@ import gradio as gr -from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue +from modules import scripts, shared, ui_common, postprocessing, call_queue import modules.generation_parameters_copypaste as parameters_copypaste diff --git a/modules/upscaler.py b/modules/upscaler.py index 0ad4fe99..777593b0 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -2,8 +2,6 @@ import os from abc import abstractmethod import PIL -import numpy as np -import torch from PIL import Image import modules.shared diff --git a/modules/xlmr.py b/modules/xlmr.py index beab3fdf..e056c3f6 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -1,4 +1,4 @@ -from transformers import BertPreTrainedModel,BertModel,BertConfig +from transformers import BertPreTrainedModel, BertConfig import torch.nn as nn import torch from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig diff --git a/pyproject.toml b/pyproject.toml index 1e164abc..9caa9ba2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,13 @@ [tool.ruff] +exclude = ["extensions"] + ignore = [ "E501", - "E731", - "E402", # Module level import not at top of file - "F401" # Module imported but unused + + "F401", # Module imported but unused ] -exclude = ["extensions"] + +[tool.ruff.per-file-ignores] +"webui.py" = ["E402"] # Module level import not at top of file \ No newline at end of file diff --git a/scripts/custom_code.py b/scripts/custom_code.py index f36a3675..cc6f0d49 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -4,7 +4,7 @@ import ast import copy from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import cmd_opts def convertExpr2Expression(expr): diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index b10fed6c..665dbe89 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -7,9 +7,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state # this function is taken from https://github.com/parlance-zz/g-diffuser-bot diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index ddcbd2d3..c0bbecc1 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images, devices from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index e9b11517..fb06beab 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -1,14 +1,11 @@ import math -from collections import namedtuple -from copy import copy -import random import modules.scripts as scripts import gradio as gr from modules import images -from modules.processing import process_images, Processed -from modules.shared import opts, cmd_opts, state +from modules.processing import process_images +from modules.shared import opts, state import modules.sd_samplers diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 76dc5778..149bc85f 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -1,6 +1,4 @@ import copy -import math -import os import random import sys import traceback @@ -11,8 +9,7 @@ import gradio as gr from modules import sd_samplers from modules.processing import Processed, process_images -from PIL import Image -from modules.shared import opts, cmd_opts, state +from modules.shared import state def process_string_tag(tag): diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 332d76d9..d873a09c 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image -from modules import processing, shared, sd_samplers, images, devices +from modules import processing, shared, images, devices from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 2ff42ef8..332e0ecd 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -10,15 +10,13 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images, paths, sd_samplers, processing, sd_models, sd_vae +from modules import images, sd_samplers, processing, sd_models, sd_vae from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state import modules.shared as shared import modules.sd_samplers import modules.sd_models import modules.sd_vae -import glob -import os import re from modules.ui_components import ToolButton diff --git a/webui.py b/webui.py index ec3d2aba..48277075 100644 --- a/webui.py +++ b/webui.py @@ -43,7 +43,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states +from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer import modules.face_restoration import modules.gfpgan_model as gfpgan -- cgit v1.2.3 From 49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 18:28:15 +0300 Subject: Autofix Ruff W (not W605) (mostly whitespace) --- extensions-builtin/LDSR/ldsr_model_arch.py | 4 +- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 6 +-- extensions-builtin/ScuNET/scunet_model_arch.py | 2 +- extensions-builtin/SwinIR/scripts/swinir_model.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch_v2.py | 52 +++++++++++------------ launch.py | 2 +- modules/api/api.py | 4 +- modules/api/models.py | 2 +- modules/cmd_args.py | 2 +- modules/codeformer/codeformer_arch.py | 14 +++--- modules/codeformer/vqgan_arch.py | 38 ++++++++--------- modules/esrgan_model_arch.py | 4 +- modules/extras.py | 2 +- modules/hypernetworks/hypernetwork.py | 12 +++--- modules/images.py | 2 +- modules/mac_specific.py | 4 +- modules/masking.py | 2 +- modules/ngrok.py | 4 +- modules/processing.py | 2 +- modules/script_callbacks.py | 14 +++--- modules/sd_hijack.py | 12 +++--- modules/sd_hijack_optimizations.py | 32 +++++++------- modules/sd_models.py | 4 +- modules/sd_samplers_kdiffusion.py | 18 ++++---- modules/sub_quadratic_attention.py | 2 +- modules/textual_inversion/dataset.py | 4 +- modules/textual_inversion/preprocess.py | 2 +- modules/textual_inversion/textual_inversion.py | 16 +++---- modules/ui.py | 18 ++++---- modules/ui_extensions.py | 6 +-- modules/xlmr.py | 6 +-- pyproject.toml | 5 ++- scripts/img2imgalt.py | 14 +++--- scripts/loopback.py | 8 ++-- scripts/poor_mans_outpainting.py | 2 +- scripts/prompt_matrix.py | 2 +- scripts/prompts_from_file.py | 4 +- scripts/sd_upscale.py | 2 +- 39 files changed, 167 insertions(+), 166 deletions(-) (limited to 'modules/processing.py') diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index 2173de79..7f450086 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -130,11 +130,11 @@ class LDSR: im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS) else: print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)") - + # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge')) - + logs = self.run(model["model"], im_padded, diffusion_steps, eta) sample = logs["sample"] diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 57c02d12..631a08ef 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -460,7 +460,7 @@ class LatentDiffusionV1(DDPMV1): self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False - self.bbox_tokenizer = None + self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: @@ -792,7 +792,7 @@ class LatentDiffusionV1(DDPMV1): z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): + if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] @@ -890,7 +890,7 @@ class LatentDiffusionV1(DDPMV1): if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids + assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py index 8028918a..b51a8806 100644 --- a/extensions-builtin/ScuNET/scunet_model_arch.py +++ b/extensions-builtin/ScuNET/scunet_model_arch.py @@ -265,4 +265,4 @@ class SCUNet(nn.Module): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) \ No newline at end of file + nn.init.constant_(m.weight, 1.0) diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index 55dd94ab..0ba50487 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -150,7 +150,7 @@ def inference(img, model, tile, tile_overlap, window_size, scale): for w_idx in w_idx_list: if state.interrupted or state.skipped: break - + in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] out_patch = model(in_patch) out_patch_mask = torch.ones_like(out_patch) diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py index 73e37cfa..93b93274 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch.py +++ b/extensions-builtin/SwinIR/swinir_model_arch.py @@ -805,7 +805,7 @@ class SwinIR(nn.Module): def forward(self, x): H, W = x.shape[2:] x = self.check_image_size(x) - + self.mean = self.mean.type_as(x) x = (x - self.mean) * self.img_range diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py index 3ca9be78..dad22cca 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py +++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py @@ -241,7 +241,7 @@ class SwinTransformerBlock(nn.Module): attn_mask = None self.register_buffer("attn_mask", attn_mask) - + def calculate_mask(self, x_size): # calculate attention mask for SW-MSA H, W = x_size @@ -263,7 +263,7 @@ class SwinTransformerBlock(nn.Module): attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - return attn_mask + return attn_mask def forward(self, x, x_size): H, W = x_size @@ -288,7 +288,7 @@ class SwinTransformerBlock(nn.Module): attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C else: attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) - + # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C @@ -369,7 +369,7 @@ class PatchMerging(nn.Module): H, W = self.input_resolution flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim flops += H * W * self.dim // 2 - return flops + return flops class BasicLayer(nn.Module): """ A basic Swin Transformer layer for one stage. @@ -447,7 +447,7 @@ class BasicLayer(nn.Module): nn.init.constant_(blk.norm1.weight, 0) nn.init.constant_(blk.norm2.bias, 0) nn.init.constant_(blk.norm2.weight, 0) - + class PatchEmbed(nn.Module): r""" Image to Patch Embedding Args: @@ -492,7 +492,7 @@ class PatchEmbed(nn.Module): flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) if self.norm is not None: flops += Ho * Wo * self.embed_dim - return flops + return flops class RSTB(nn.Module): """Residual Swin Transformer Block (RSTB). @@ -531,7 +531,7 @@ class RSTB(nn.Module): num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path, norm_layer=norm_layer, @@ -622,7 +622,7 @@ class Upsample(nn.Sequential): else: raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') super(Upsample, self).__init__(*m) - + class Upsample_hf(nn.Sequential): """Upsample module. @@ -642,7 +642,7 @@ class Upsample_hf(nn.Sequential): m.append(nn.PixelShuffle(3)) else: raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') - super(Upsample_hf, self).__init__(*m) + super(Upsample_hf, self).__init__(*m) class UpsampleOneStep(nn.Sequential): @@ -667,8 +667,8 @@ class UpsampleOneStep(nn.Sequential): H, W = self.input_resolution flops = H * W * self.num_feat * 3 * 9 return flops - - + + class Swin2SR(nn.Module): r""" Swin2SR @@ -699,7 +699,7 @@ class Swin2SR(nn.Module): def __init__(self, img_size=64, patch_size=1, in_chans=3, embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6), - window_size=7, mlp_ratio=4., qkv_bias=True, + window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', @@ -764,7 +764,7 @@ class Swin2SR(nn.Module): num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results norm_layer=norm_layer, @@ -776,7 +776,7 @@ class Swin2SR(nn.Module): ) self.layers.append(layer) - + if self.upsampler == 'pixelshuffle_hf': self.layers_hf = nn.ModuleList() for i_layer in range(self.num_layers): @@ -787,7 +787,7 @@ class Swin2SR(nn.Module): num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results norm_layer=norm_layer, @@ -799,7 +799,7 @@ class Swin2SR(nn.Module): ) self.layers_hf.append(layer) - + self.norm = norm_layer(self.num_features) # build the last conv layer in deep feature extraction @@ -829,10 +829,10 @@ class Swin2SR(nn.Module): self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) self.conv_after_aux = nn.Sequential( nn.Conv2d(3, num_feat, 3, 1, 1), - nn.LeakyReLU(inplace=True)) + nn.LeakyReLU(inplace=True)) self.upsample = Upsample(upscale, num_feat) self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - + elif self.upsampler == 'pixelshuffle_hf': self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) @@ -846,7 +846,7 @@ class Swin2SR(nn.Module): nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - + elif self.upsampler == 'pixelshuffledirect': # for lightweight SR (to save parameters) self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, @@ -905,7 +905,7 @@ class Swin2SR(nn.Module): x = self.patch_unembed(x, x_size) return x - + def forward_features_hf(self, x): x_size = (x.shape[2], x.shape[3]) x = self.patch_embed(x) @@ -919,7 +919,7 @@ class Swin2SR(nn.Module): x = self.norm(x) # B L C x = self.patch_unembed(x, x_size) - return x + return x def forward(self, x): H, W = x.shape[2:] @@ -951,7 +951,7 @@ class Swin2SR(nn.Module): x = self.conv_after_body(self.forward_features(x)) + x x_before = self.conv_before_upsample(x) x_out = self.conv_last(self.upsample(x_before)) - + x_hf = self.conv_first_hf(x_before) x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf x_hf = self.conv_before_upsample_hf(x_hf) @@ -977,15 +977,15 @@ class Swin2SR(nn.Module): x_first = self.conv_first(x) res = self.conv_after_body(self.forward_features(x_first)) + x_first x = x + self.conv_last(res) - + x = x / self.img_range + self.mean if self.upsampler == "pixelshuffle_aux": return x[:, :, :H*self.upscale, :W*self.upscale], aux - + elif self.upsampler == "pixelshuffle_hf": x_out = x_out / self.img_range + self.mean return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale] - + else: return x[:, :, :H*self.upscale, :W*self.upscale] @@ -1014,4 +1014,4 @@ if __name__ == '__main__': x = torch.randn((1, 3, height, width)) x = model(x) - print(x.shape) \ No newline at end of file + print(x.shape) diff --git a/launch.py b/launch.py index 670af87c..62b33f14 100644 --- a/launch.py +++ b/launch.py @@ -327,7 +327,7 @@ def prepare_environment(): if args.update_all_extensions: git_pull_recursive(extensions_dir) - + if "--exit" in sys.argv: print("Exiting because of --exit argument") exit(0) diff --git a/modules/api/api.py b/modules/api/api.py index 594fa655..165985c3 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -227,7 +227,7 @@ class Api: script_idx = script_name_to_index(script_name, script_runner.selectable_scripts) script = script_runner.selectable_scripts[script_idx] return script, script_idx - + def get_scripts_list(self): t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles] i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles] @@ -237,7 +237,7 @@ class Api: def get_script(self, script_name, script_runner): if script_name is None or script_name == "": return None, None - + script_idx = script_name_to_index(script_name, script_runner.scripts) return script_runner.scripts[script_idx] diff --git a/modules/api/models.py b/modules/api/models.py index 4d291076..006ccdb7 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -289,4 +289,4 @@ class MemoryResponse(BaseModel): class ScriptsList(BaseModel): txt2img: list = Field(default=None,title="Txt2img", description="Titles of scripts (txt2img)") - img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)") \ No newline at end of file + img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)") diff --git a/modules/cmd_args.py b/modules/cmd_args.py index e01ca655..f4a4ab36 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -102,4 +102,4 @@ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gra parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) -parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') \ No newline at end of file +parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index 45c70f84..12db6814 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -119,7 +119,7 @@ class TransformerSALayer(nn.Module): tgt_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, query_pos: Optional[Tensor] = None): - + # self attention tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) @@ -159,7 +159,7 @@ class Fuse_sft_block(nn.Module): @ARCH_REGISTRY.register() class CodeFormer(VQAutoEncoder): - def __init__(self, dim_embd=512, n_head=8, n_layers=9, + def __init__(self, dim_embd=512, n_head=8, n_layers=9, codebook_size=1024, latent_size=256, connect_list=('32', '64', '128', '256'), fix_modules=('quantize', 'generator')): @@ -179,14 +179,14 @@ class CodeFormer(VQAutoEncoder): self.feat_emb = nn.Linear(256, self.dim_embd) # transformer - self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) + self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) for _ in range(self.n_layers)]) # logits_predict head self.idx_pred_layer = nn.Sequential( nn.LayerNorm(dim_embd), nn.Linear(dim_embd, codebook_size, bias=False)) - + self.channels = { '16': 512, '32': 256, @@ -221,7 +221,7 @@ class CodeFormer(VQAutoEncoder): enc_feat_dict = {} out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list] for i, block in enumerate(self.encoder.blocks): - x = block(x) + x = block(x) if i in out_list: enc_feat_dict[str(x.shape[-1])] = x.clone() @@ -266,11 +266,11 @@ class CodeFormer(VQAutoEncoder): fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list] for i, block in enumerate(self.generator.blocks): - x = block(x) + x = block(x) if i in fuse_list: # fuse after i-th block f_size = str(x.shape[-1]) if w>0: x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w) out = x # logits doesn't need softmax before cross_entropy loss - return out, logits, lq_feat \ No newline at end of file + return out, logits, lq_feat diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index b24a0394..09ee6660 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -13,7 +13,7 @@ from basicsr.utils.registry import ARCH_REGISTRY def normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - + @torch.jit.script def swish(x): @@ -210,15 +210,15 @@ class AttnBlock(nn.Module): # compute attention b, c, h, w = q.shape q = q.reshape(b, c, h*w) - q = q.permute(0, 2, 1) + q = q.permute(0, 2, 1) k = k.reshape(b, c, h*w) - w_ = torch.bmm(q, k) + w_ = torch.bmm(q, k) w_ = w_ * (int(c)**(-0.5)) w_ = F.softmax(w_, dim=2) # attend to values v = v.reshape(b, c, h*w) - w_ = w_.permute(0, 2, 1) + w_ = w_.permute(0, 2, 1) h_ = torch.bmm(v, w_) h_ = h_.reshape(b, c, h, w) @@ -270,18 +270,18 @@ class Encoder(nn.Module): def forward(self, x): for block in self.blocks: x = block(x) - + return x class Generator(nn.Module): def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions): super().__init__() - self.nf = nf - self.ch_mult = ch_mult + self.nf = nf + self.ch_mult = ch_mult self.num_resolutions = len(self.ch_mult) self.num_res_blocks = res_blocks - self.resolution = img_size + self.resolution = img_size self.attn_resolutions = attn_resolutions self.in_channels = emb_dim self.out_channels = 3 @@ -315,24 +315,24 @@ class Generator(nn.Module): blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1)) self.blocks = nn.ModuleList(blocks) - + def forward(self, x): for block in self.blocks: x = block(x) - + return x - + @ARCH_REGISTRY.register() class VQAutoEncoder(nn.Module): def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256, beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None): super().__init__() logger = get_root_logger() - self.in_channels = 3 - self.nf = nf - self.n_blocks = res_blocks + self.in_channels = 3 + self.nf = nf + self.n_blocks = res_blocks self.codebook_size = codebook_size self.embed_dim = emb_dim self.ch_mult = ch_mult @@ -363,11 +363,11 @@ class VQAutoEncoder(nn.Module): self.kl_weight ) self.generator = Generator( - self.nf, + self.nf, self.embed_dim, - self.ch_mult, - self.n_blocks, - self.resolution, + self.ch_mult, + self.n_blocks, + self.resolution, self.attn_resolutions ) @@ -432,4 +432,4 @@ class VQGANDiscriminator(nn.Module): raise ValueError('Wrong params!') def forward(self, x): - return self.main(x) \ No newline at end of file + return self.main(x) diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 4de9dd8d..2b9888ba 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -105,7 +105,7 @@ class ResidualDenseBlock_5C(nn.Module): Modified options that can be used: - "Partial Convolution based Padding" arXiv:1811.11718 - "Spectral normalization" arXiv:1802.05957 - - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C. + - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C. {Rakotonirina} and A. {Rasoanaivo} """ @@ -170,7 +170,7 @@ class GaussianNoise(nn.Module): scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x sampled_noise = self.noise.repeat(*x.size()).normal_() * scale x = x + sampled_noise - return x + return x def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) diff --git a/modules/extras.py b/modules/extras.py index eb4f0b42..bdf9b3b7 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -199,7 +199,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ result_is_inpainting_model = True else: theta_0[key] = theta_func2(a, b, multiplier) - + theta_0[key] = to_half(theta_0[key], save_as_half) shared.state.sampling_step += 1 diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 38ef074f..570b5603 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -540,7 +540,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi return hypernetwork, filename scheduler = LearnRateScheduler(learn_rate, steps, initial_step) - + clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None if clip_grad: clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False) @@ -593,7 +593,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi print(e) scaler = torch.cuda.amp.GradScaler() - + batch_size = ds.batch_size gradient_step = ds.gradient_step # n steps = batch_size * gradient_step * n image processed @@ -636,7 +636,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi if clip_grad: clip_grad_sched.step(hypernetwork.step) - + with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) if use_weight: @@ -657,14 +657,14 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi _loss_step += loss.item() scaler.scale(loss).backward() - + # go back until we reach gradient accumulation steps if (j + 1) % gradient_step != 0: continue loss_logging.append(_loss_step) if clip_grad: clip_grad(weights, clip_grad_sched.learn_rate) - + scaler.step(optimizer) scaler.update() hypernetwork.step += 1 @@ -674,7 +674,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi _loss_step = 0 steps_done = hypernetwork.step + 1 - + epoch_num = hypernetwork.step // steps_per_epoch epoch_step = hypernetwork.step % steps_per_epoch diff --git a/modules/images.py b/modules/images.py index 3b8b62d9..b2de3662 100644 --- a/modules/images.py +++ b/modules/images.py @@ -367,7 +367,7 @@ class FilenameGenerator: self.seed = seed self.prompt = prompt self.image = image - + def hasprompt(self, *args): lower = self.prompt.lower() if self.p is None or self.prompt is None: diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 5c2f92a1..d74c6b95 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -42,7 +42,7 @@ if has_mps: # MPS workaround for https://github.com/pytorch/pytorch/issues/79383 CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs), lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps')) - # MPS workaround for https://github.com/pytorch/pytorch/issues/80800 + # MPS workaround for https://github.com/pytorch/pytorch/issues/80800 CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs), lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps') # MPS workaround for https://github.com/pytorch/pytorch/issues/90532 @@ -60,4 +60,4 @@ if has_mps: # MPS workaround for https://github.com/pytorch/pytorch/issues/92311 if platform.processor() == 'i386': for funcName in ['torch.argmax', 'torch.Tensor.argmax']: - CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps') \ No newline at end of file + CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps') diff --git a/modules/masking.py b/modules/masking.py index a5c4d2da..be9f84c7 100644 --- a/modules/masking.py +++ b/modules/masking.py @@ -4,7 +4,7 @@ from PIL import Image, ImageFilter, ImageOps def get_crop_region(mask, pad=0): """finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle. For example, if a user has painted the top-right part of a 512x512 image", the result may be (256, 0, 512, 256)""" - + h, w = mask.shape crop_left = 0 diff --git a/modules/ngrok.py b/modules/ngrok.py index 7a7b4b26..67a74e85 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -13,7 +13,7 @@ def connect(token, port, region): config = conf.PyngrokConfig( auth_token=token, region=region ) - + # Guard for existing tunnels existing = ngrok.get_tunnels(pyngrok_config=config) if existing: @@ -24,7 +24,7 @@ def connect(token, port, region): print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' 'You can use this link after the launch is complete.') return - + try: if account is None: public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url diff --git a/modules/processing.py b/modules/processing.py index c3932d6b..f902b9df 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -164,7 +164,7 @@ class StableDiffusionProcessing: self.all_subseeds = None self.iteration = 0 self.is_hr_pass = False - + @property def sd_model(self): diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 17109732..7d9dd736 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -32,22 +32,22 @@ class CFGDenoiserParams: def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond): self.x = x """Latent image representation in the process of being denoised""" - + self.image_cond = image_cond """Conditioning image""" - + self.sigma = sigma """Current sigma noise step value""" - + self.sampling_step = sampling_step """Current Sampling step number""" - + self.total_sampling_steps = total_sampling_steps """Total number of sampling steps planned""" - + self.text_cond = text_cond """ Encoder hidden states of text conditioning from prompt""" - + self.text_uncond = text_uncond """ Encoder hidden states of text conditioning from negative prompt""" @@ -240,7 +240,7 @@ def add_callback(callbacks, fun): callbacks.append(ScriptCallback(filename, fun)) - + def remove_current_script_callbacks(): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if len(stack) > 0 else 'unknown file' diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index e374aeb8..7e50f1ab 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -34,7 +34,7 @@ def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th - + optimization_method = None can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp @@ -92,12 +92,12 @@ def fix_checkpoint(): def weighted_loss(sd_model, pred, target, mean=True): #Calculate the weight normally, but ignore the mean loss = sd_model._old_get_loss(pred, target, mean=False) - + #Check if we have weights available weight = getattr(sd_model, '_custom_loss_weight', None) if weight is not None: loss *= weight - + #Return the loss, as mean if specified return loss.mean() if mean else loss @@ -105,7 +105,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): try: #Temporarily append weights to a place accessible during loss calc sd_model._custom_loss_weight = w - + #Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely #Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set if not hasattr(sd_model, '_old_get_loss'): @@ -120,7 +120,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): del sd_model._custom_loss_weight except AttributeError: pass - + #If we have an old loss function, reset the loss function to the original one if hasattr(sd_model, '_old_get_loss'): sd_model.get_loss = sd_model._old_get_loss @@ -184,7 +184,7 @@ class StableDiffusionModelHijack: def undo_hijack(self, m): if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: - m.cond_stage_model = m.cond_stage_model.wrapped + m.cond_stage_model = m.cond_stage_model.wrapped elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index a174bbe1..f00fe55c 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -62,10 +62,10 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None): end = i + 2 s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) s1 *= self.scale - + s2 = s1.softmax(dim=-1) del s1 - + r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) del s2 del q, k, v @@ -95,43 +95,43 @@ def split_cross_attention_forward(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k_in = k_in * self.scale - + del context, x - + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in - + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - + mem_free_total = get_available_vram() - + gb = 1024 ** 3 tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() modifier = 3 if q.element_size() == 2 else 2.5 mem_required = tensor_size * modifier steps = 1 - + if mem_required > mem_free_total: steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") - + if steps > 64: max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') - + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] for i in range(0, q.shape[1], slice_size): end = i + slice_size s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) - + s2 = s1.softmax(dim=-1, dtype=q.dtype) del s1 - + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) del s2 - + del q, k, v r1 = r1.to(dtype) @@ -228,7 +228,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k = k * self.scale - + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) r = einsum_op(q, k, v) r = r.to(dtype) @@ -369,7 +369,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None): q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2) k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2) v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2) - + del q_in, k_in, v_in dtype = q.dtype @@ -451,7 +451,7 @@ def cross_attention_attnblock_forward(self, x): h3 += x return h3 - + def xformers_attnblock_forward(self, x): try: h_ = x diff --git a/modules/sd_models.py b/modules/sd_models.py index d1e946a5..3316d021 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -165,7 +165,7 @@ def model_hash(filename): def select_checkpoint(): model_checkpoint = shared.opts.sd_model_checkpoint - + checkpoint_info = checkpoint_alisases.get(model_checkpoint, None) if checkpoint_info is not None: return checkpoint_info @@ -372,7 +372,7 @@ def enable_midas_autodownload(): if not os.path.exists(path): if not os.path.exists(midas_path): mkdir(midas_path) - + print(f"Downloading midas model weights for {model_type} to {path}") request.urlretrieve(midas_urls[model_type], path) print(f"{model_type} downloaded") diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 2f733cf5..e9e41818 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -93,10 +93,10 @@ class CFGDenoiser(torch.nn.Module): if shared.sd_model.model.conditioning_key == "crossattn-adm": image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} else: image_uncond = image_cond - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} if not is_edit_model: x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) @@ -316,7 +316,7 @@ class KDiffusionSampler: sigma_sched = sigmas[steps - t_enc - 1:] xi = x + noise * sigma_sched[0] - + extra_params_kwargs = self.initialize(p) parameters = inspect.signature(self.func).parameters @@ -339,9 +339,9 @@ class KDiffusionSampler: self.model_wrap_cfg.init_latent = x self.last_latent = x extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond } @@ -374,9 +374,9 @@ class KDiffusionSampler: self.last_latent = x samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index cc38debd..497568eb 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -179,7 +179,7 @@ def efficient_dot_product_attention( chunk_idx, min(query_chunk_size, q_tokens) ) - + summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale) summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk compute_query_chunk_attn: ComputeQueryChunkAttn = partial( diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 41610e03..b9621fc9 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -118,7 +118,7 @@ class PersonalizedBase(Dataset): weight = torch.ones(latent_sample.shape) else: weight = None - + if latent_sampling_method == "random": entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight) else: @@ -243,4 +243,4 @@ class BatchLoaderRandom(BatchLoader): return self def collate_wrapper_random(batch): - return BatchLoaderRandom(batch) \ No newline at end of file + return BatchLoaderRandom(batch) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index d0cad09e..a009d8e8 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -125,7 +125,7 @@ def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, thr default=None ) return wh and center_crop(image, *wh) - + def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): width = process_width diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 9e1b2b9a..d489ed1e 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -323,16 +323,16 @@ def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epo tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step) def tensorboard_add_scaler(tensorboard_writer, tag, value, step): - tensorboard_writer.add_scalar(tag=tag, + tensorboard_writer.add_scalar(tag=tag, scalar_value=value, global_step=step) def tensorboard_add_image(tensorboard_writer, tag, pil_image, step): # Convert a pil image to a torch tensor img_tensor = torch.as_tensor(np.array(pil_image, copy=True)) - img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], + img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], len(pil_image.getbands())) img_tensor = img_tensor.permute((2, 0, 1)) - + tensorboard_writer.add_image(tag, img_tensor, global_step=step) def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_model_every, create_image_every, log_directory, name="embedding"): @@ -402,7 +402,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st if initial_step >= steps: shared.state.textinfo = "Model has already been trained beyond specified max steps" return embedding, filename - + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \ @@ -412,7 +412,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed - + if shared.opts.training_enable_tensorboard: tensorboard_writer = tensorboard_setup(log_directory) @@ -439,7 +439,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu') if embedding.checksum() == optimizer_saved_dict.get('hash', None): optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) - + if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) print("Loaded existing optimizer from checkpoint") @@ -485,7 +485,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st if clip_grad: clip_grad_sched.step(embedding.step) - + with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) if use_weight: @@ -513,7 +513,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st # go back until we reach gradient accumulation steps if (j + 1) % gradient_step != 0: continue - + if clip_grad: clip_grad(embedding.vec, clip_grad_sched.learn_rate) diff --git a/modules/ui.py b/modules/ui.py index 1efb656a..ff82fff6 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1171,7 +1171,7 @@ def create_ui(): process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - + with gr.Column(visible=False) as process_multicrop_col: gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') with gr.Row(): @@ -1183,7 +1183,7 @@ def create_ui(): with gr.Row(): process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - + with gr.Row(): with gr.Column(scale=3): gr.HTML(value="") @@ -1226,7 +1226,7 @@ def create_ui(): with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - + with FormRow(): clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) @@ -1565,7 +1565,7 @@ def create_ui(): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") - + def unload_sd_weights(): modules.sd_models.unload_model_weights() @@ -1841,15 +1841,15 @@ def versions_html(): return f""" version: {tag} - •  + • python: {python_version} - •  + • torch: {getattr(torch, '__long_version__',torch.__version__)} - •  + • xformers: {xformers_version} - •  + • gradio: {gr.__version__} - •  + • checkpoint: N/A """ diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index ed70abe5..af497733 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -467,7 +467,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" {html.escape(description)}

Added: {html.escape(added)}

{install_code} - + """ for tag in [x for x in extension_tags if x not in tags]: @@ -535,9 +535,9 @@ def create_ui(): hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"]) sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index") - with gr.Row(): + with gr.Row(): search_extensions_text = gr.Text(label="Search").style(container=False) - + install_result = gr.HTML() available_extensions_table = gr.HTML() diff --git a/modules/xlmr.py b/modules/xlmr.py index e056c3f6..a407a3ca 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -28,7 +28,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): config_class = BertSeriesConfig def __init__(self, config=None, **kargs): - # modify initialization for autoloading + # modify initialization for autoloading if config is None: config = XLMRobertaConfig() config.attention_probs_dropout_prob= 0.1 @@ -74,7 +74,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): text["attention_mask"] = torch.tensor( text['attention_mask']).to(device) features = self(**text) - return features['projection_state'] + return features['projection_state'] def forward( self, @@ -134,4 +134,4 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation): base_model_prefix = 'roberta' - config_class= RobertaSeriesConfig \ No newline at end of file + config_class= RobertaSeriesConfig diff --git a/pyproject.toml b/pyproject.toml index c88907be..d4a1bbf4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,7 @@ extend-select = [ "B", "C", "I", + "W", ] exclude = [ @@ -20,7 +21,7 @@ ignore = [ "I001", # Import block is un-sorted or un-formatted "C901", # Function is too complex "C408", # Rewrite as a literal - + "W605", # invalid escape sequence, messes with some docstrings ] [tool.ruff.per-file-ignores] @@ -28,4 +29,4 @@ ignore = [ [tool.ruff.flake8-bugbear] # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`. -extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"] \ No newline at end of file +extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"] diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index bb00fb3f..1e833fa8 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -149,9 +149,9 @@ class Script(scripts.Script): sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment")) return [ - info, + info, override_sampler, - override_prompt, original_prompt, original_negative_prompt, + override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment, @@ -191,17 +191,17 @@ class Script(scripts.Script): self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment) rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p) - + combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5) - + sampler = sd_samplers.create_sampler(p.sampler_name, p.sd_model) sigmas = sampler.model_wrap.get_sigmas(p.steps) - + noise_dt = combined_noise - (p.init_latent / sigmas[0]) - + p.seed = p.seed + 1 - + return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning) p.sample = sample_extra diff --git a/scripts/loopback.py b/scripts/loopback.py index ad6609be..2d5feaf9 100644 --- a/scripts/loopback.py +++ b/scripts/loopback.py @@ -14,7 +14,7 @@ class Script(scripts.Script): def show(self, is_img2img): return is_img2img - def ui(self, is_img2img): + def ui(self, is_img2img): loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops")) final_denoising_strength = gr.Slider(minimum=0, maximum=1, step=0.01, label='Final denoising strength', value=0.5, elem_id=self.elem_id("final_denoising_strength")) denoising_curve = gr.Dropdown(label="Denoising strength curve", choices=["Aggressive", "Linear", "Lazy"], value="Linear") @@ -104,7 +104,7 @@ class Script(scripts.Script): p.seed = processed.seed + 1 p.denoising_strength = calculate_denoising_strength(i + 1) - + if state.skipped: break @@ -121,7 +121,7 @@ class Script(scripts.Script): all_images.append(last_image) p.inpainting_fill = original_inpainting_fill - + if state.interrupted: break @@ -132,7 +132,7 @@ class Script(scripts.Script): if opts.return_grid: grids.append(grid) - + all_images = grids + all_images processed = Processed(p, all_images, initial_seed, initial_info) diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index c0bbecc1..ea0632b6 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -19,7 +19,7 @@ class Script(scripts.Script): def ui(self, is_img2img): if not is_img2img: return None - + pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels")) mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur")) inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill")) diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index fb06beab..88324fe6 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -96,7 +96,7 @@ class Script(scripts.Script): p.prompt_for_display = positive_prompt processed = process_images(p) - grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) + grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[0].height, prompt_matrix_parts, margin_size) processed.images.insert(0, grid) processed.index_of_first_image = 1 diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 9607077a..2378816f 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -109,7 +109,7 @@ class Script(scripts.Script): def title(self): return "Prompts from file or textbox" - def ui(self, is_img2img): + def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate")) checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch")) @@ -166,7 +166,7 @@ class Script(scripts.Script): proc = process_images(copy_p) images += proc.images - + if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) all_prompts += proc.all_prompts diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 0b1d3096..e614c23b 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -16,7 +16,7 @@ class Script(scripts.Script): def show(self, is_img2img): return is_img2img - def ui(self, is_img2img): + def ui(self, is_img2img): info = gr.HTML("

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

") overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap")) scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor")) -- cgit v1.2.3 From 55e52c878ab669d5b11b001a4152ee1a3b8d4880 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 09:24:56 -0500 Subject: remove command line option --- modules/cmd_args.py | 1 - modules/processing.py | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 46043e33..f4a4ab36 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -102,5 +102,4 @@ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gra parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) -parser.add_argument("--token-merging", action='store_true', help="Provides speed and memory improvements by merging redundant tokens. This has a more pronounced effect on higher resolutions.", default=False) parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') diff --git a/modules/processing.py b/modules/processing.py index 8ba3a96b..6828e898 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -496,8 +496,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Token merging ratio": None if not (opts.token_merging or cmd_opts.token_merging) or opts.token_merging_hr_only else opts.token_merging_ratio, - "Token merging ratio hr": None if not (opts.token_merging or cmd_opts.token_merging) else opts.token_merging_ratio_hr, + "Token merging ratio": None if not opts.token_merging or opts.token_merging_hr_only else opts.token_merging_ratio, + "Token merging ratio hr": None if not opts.token_merging else opts.token_merging_ratio_hr, "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, @@ -538,7 +538,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: + if opts.token_merging and not opts.token_merging_hr_only: sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) logger.debug('Token merging applied') @@ -546,7 +546,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: finally: # undo model optimizations made by tomesd - if opts.token_merging or cmd_opts.token_merging: + if opts.token_merging: tomesd.remove_patch(p.sd_model) logger.debug('Token merging model optimizations removed') @@ -1004,7 +1004,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # apply token merging optimizations from tomesd for high-res pass # check if hr_only so we are not redundantly patching - if (cmd_opts.token_merging or opts.token_merging) and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): + if opts.token_merging and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): # case where user wants to use separate merge ratios if not opts.token_merging_hr_only: # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) -- cgit v1.2.3 From ac83627a31daac06f4d48b0e7db223ef807fe8e5 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 10:23:42 -0500 Subject: heavily simplify --- modules/generation_parameters_copypaste.py | 36 ------------------------- modules/processing.py | 35 +++++++++++-------------- modules/sd_models.py | 11 +++----- modules/shared.py | 42 +++--------------------------- 4 files changed, 23 insertions(+), 101 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fb56254f..a0a98bbc 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -282,33 +282,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-1"] = 0 res["Hires resize-2"] = 0 - # Infer additional override settings for token merging - token_merging_ratio = res.get("Token merging ratio", None) - token_merging_ratio_hr = res.get("Token merging ratio hr", None) - - if token_merging_ratio is not None or token_merging_ratio_hr is not None: - res["Token merging"] = 'True' - - if token_merging_ratio is None: - res["Token merging hr only"] = 'True' - else: - res["Token merging hr only"] = 'False' - - if res.get("Token merging random", None) is None: - res["Token merging random"] = 'False' - if res.get("Token merging merge attention", None) is None: - res["Token merging merge attention"] = 'True' - if res.get("Token merging merge cross attention", None) is None: - res["Token merging merge cross attention"] = 'False' - if res.get("Token merging merge mlp", None) is None: - res["Token merging merge mlp"] = 'False' - if res.get("Token merging stride x", None) is None: - res["Token merging stride x"] = '2' - if res.get("Token merging stride y", None) is None: - res["Token merging stride y"] = '2' - if res.get("Token merging maximum down sampling", None) is None: - res["Token merging maximum down sampling"] = '1' - restore_old_hires_fix_params(res) # Missing RNG means the default was set, which is GPU RNG @@ -335,17 +308,8 @@ infotext_to_setting_name_mapping = [ ('UniPC skip type', 'uni_pc_skip_type'), ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), - ('Token merging', 'token_merging'), ('Token merging ratio', 'token_merging_ratio'), - ('Token merging hr only', 'token_merging_hr_only'), ('Token merging ratio hr', 'token_merging_ratio_hr'), - ('Token merging random', 'token_merging_random'), - ('Token merging merge attention', 'token_merging_merge_attention'), - ('Token merging merge cross attention', 'token_merging_merge_cross_attention'), - ('Token merging merge mlp', 'token_merging_merge_mlp'), - ('Token merging maximum down sampling', 'token_merging_maximum_down_sampling'), - ('Token merging stride x', 'token_merging_stride_x'), - ('Token merging stride y', 'token_merging_stride_y'), ('RNG', 'randn_source'), ('NGMS', 's_min_uncond') ] diff --git a/modules/processing.py b/modules/processing.py index 6828e898..32ff61e9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -34,7 +34,7 @@ import tomesd # add a logger for the processing module logger = logging.getLogger(__name__) # manually set output level here since there is no option to do so yet through launch options -# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') +logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -496,15 +496,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Token merging ratio": None if not opts.token_merging or opts.token_merging_hr_only else opts.token_merging_ratio, - "Token merging ratio hr": None if not opts.token_merging else opts.token_merging_ratio_hr, - "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, - "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, - "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, - "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, - "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, - "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y, - "Token merging maximum down sampling": None if opts.token_merging_maximum_down_sampling == 1 else opts.token_merging_maximum_down_sampling, + "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, + "Token merging ratio hr": None if not p.enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, @@ -538,15 +531,15 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging and not opts.token_merging_hr_only: + if opts.token_merging_ratio > 0: sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) - logger.debug('Token merging applied') + logger.debug(f"Token merging applied to first pass. Ratio: '{opts.token_merging_ratio}'") res = process_images_inner(p) finally: # undo model optimizations made by tomesd - if opts.token_merging: + if opts.token_merging_ratio > 0: tomesd.remove_patch(p.sd_model) logger.debug('Token merging model optimizations removed') @@ -1003,19 +996,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): devices.torch_gc() # apply token merging optimizations from tomesd for high-res pass - # check if hr_only so we are not redundantly patching - if opts.token_merging and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): - # case where user wants to use separate merge ratios - if not opts.token_merging_hr_only: - # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) + if opts.token_merging_ratio_hr > 0: + # in case the user has used separate merge ratios + if opts.token_merging_ratio > 0: tomesd.remove_patch(self.sd_model) - logger.debug('Temporarily removed token merging optimizations in preparation for next pass') + logger.debug('Adjusting token merging ratio for high-res pass') sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) - logger.debug('Applied token merging for high-res pass') + logger.debug(f"Applied token merging for high-res pass. Ratio: '{opts.token_merging_ratio_hr}'") samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + if opts.token_merging_ratio_hr > 0 or opts.token_merging_ratio > 0: + tomesd.remove_patch(self.sd_model) + logger.debug('Removed token merging optimizations from model') + self.is_hr_pass = False return samples diff --git a/modules/sd_models.py b/modules/sd_models.py index 4787193c..4c9a0a1f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -596,11 +596,8 @@ def apply_token_merging(sd_model, hr: bool): tomesd.apply_patch( sd_model, ratio=ratio, - max_downsample=shared.opts.token_merging_maximum_down_sampling, - sx=shared.opts.token_merging_stride_x, - sy=shared.opts.token_merging_stride_y, - use_rand=shared.opts.token_merging_random, - merge_attn=shared.opts.token_merging_merge_attention, - merge_crossattn=shared.opts.token_merging_merge_cross_attention, - merge_mlp=shared.opts.token_merging_merge_mlp + use_rand=False, # can cause issues with some samplers + merge_attn=True, + merge_crossattn=False, + merge_mlp=False ) diff --git a/modules/shared.py b/modules/shared.py index 4b346585..0d96c14c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -459,47 +459,13 @@ options_templates.update(options_section((None, "Hidden options"), { })) options_templates.update(options_section(('token_merging', 'Token Merging'), { - "token_merging": OptionInfo( - False, "Enable redundant token merging via tomesd. This can provide significant speed and memory improvements.", - gr.Checkbox - ), - "token_merging_ratio": OptionInfo( - 0.5, "Merging Ratio", - gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} - ), - "token_merging_hr_only": OptionInfo( - True, "Apply only to high-res fix pass. Disabling can yield a ~20-35% speedup on contemporary resolutions.", - gr.Checkbox - ), "token_merging_ratio_hr": OptionInfo( - 0.5, "Merging Ratio (high-res pass) - If 'Apply only to high-res' is enabled, this will always be the ratio used.", + 0, "Merging Ratio (high-res pass)", gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} ), - # More advanced/niche settings: - "token_merging_random": OptionInfo( - False, "Use random perturbations - Can improve outputs for certain samplers. For others, it may cause visual artifacting.", - gr.Checkbox - ), - "token_merging_merge_attention": OptionInfo( - True, "Merge attention", - gr.Checkbox - ), - "token_merging_merge_cross_attention": OptionInfo( - False, "Merge cross attention", - gr.Checkbox - ), - "token_merging_merge_mlp": OptionInfo( - False, "Merge mlp", - gr.Checkbox - ), - "token_merging_maximum_down_sampling": OptionInfo(1, "Maximum down sampling", gr.Radio, lambda: {"choices": [1, 2, 4, 8]}), - "token_merging_stride_x": OptionInfo( - 2, "Stride - X", - gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} - ), - "token_merging_stride_y": OptionInfo( - 2, "Stride - Y", - gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} + "token_merging_ratio": OptionInfo( + 0, "Merging Ratio", + gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} ) })) -- cgit v1.2.3 From c2fdb44880e07f43aee2f7edc1dc36a9516501e8 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 11:11:02 -0500 Subject: fix for img2img --- modules/processing.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 32ff61e9..94fe2625 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -34,7 +34,7 @@ import tomesd # add a logger for the processing module logger = logging.getLogger(__name__) # manually set output level here since there is no option to do so yet through launch options -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') +# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -478,6 +478,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter index = position_in_batch + iteration * p.batch_size clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) + enable_hr = getattr(p, 'enable_hr', False) generation_params = { "Steps": p.steps, @@ -497,7 +498,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, - "Token merging ratio hr": None if not p.enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, + "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, -- cgit v1.2.3 From cdac5ace1456ba779d5a0171ff8757f31955bfee Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 11:54:02 +0300 Subject: suppress ENSD infotext for samplers that don't use it --- modules/processing.py | 11 +++++++---- modules/sd_samplers.py | 8 +++++++- modules/sd_samplers_common.py | 21 ++++++++++++++++++++- modules/sd_samplers_compvis.py | 8 ++++++-- modules/sd_samplers_kdiffusion.py | 16 ++++++++-------- 5 files changed, 48 insertions(+), 16 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 94fe2625..15806f78 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -13,7 +13,7 @@ from skimage import exposure from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -480,6 +480,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) enable_hr = getattr(p, 'enable_hr', False) + uses_ensd = opts.eta_noise_seed_delta != 0 + if uses_ensd: + uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p) + generation_params = { "Steps": p.steps, "Sampler": p.sampler_name, @@ -496,17 +500,16 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Denoising strength": getattr(p, 'denoising_strength', None), "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, - "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "ENSD": opts.eta_noise_seed_delta if uses_ensd else None, "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, } - generation_params.update(p.extra_generation_params) - generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 4f1bf21d..f22aad8f 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -14,12 +14,18 @@ samplers_for_img2img = [] samplers_map = {} -def create_sampler(name, model): +def find_sampler_config(name): if name is not None: config = all_samplers_map.get(name, None) else: config = all_samplers[0] + return config + + +def create_sampler(name, model): + config = find_sampler_config(name) + assert config is not None, f'bad sampler name: {name}' sampler = config.constructor(model) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index bc074238..92880caf 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np import torch from PIL import Image -from modules import devices, processing, images, sd_vae_approx +from modules import devices, processing, images, sd_vae_approx, sd_samplers from modules.shared import opts, state import modules.shared as shared @@ -58,6 +58,25 @@ def store_latent(decoded): shared.state.assign_current_image(sample_to_image(decoded)) +def is_sampler_using_eta_noise_seed_delta(p): + """returns whether sampler from config will use eta noise seed delta for image creation""" + + sampler_config = sd_samplers.find_sampler_config(p.sampler_name) + + eta = p.eta + + if eta is None and p.sampler is not None: + eta = p.sampler.eta + + if eta is None and sampler_config is not None: + eta = 0 if sampler_config.options.get("default_eta_is_0", False) else 1.0 + + if eta == 0: + return False + + return sampler_config.options.get("uses_ensd", False) + + class InterruptedException(BaseException): pass diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index b1ee3be7..bdae8b40 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -11,7 +11,7 @@ import modules.models.diffusion.uni_pc samplers_data_compvis = [ - sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}), + sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {"default_eta_is_0": True, "uses_ensd": True}), sd_samplers_common.SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}), sd_samplers_common.SamplerData('UniPC', lambda model: VanillaStableDiffusionSampler(modules.models.diffusion.uni_pc.UniPCSampler, model), [], {}), ] @@ -134,7 +134,11 @@ class VanillaStableDiffusionSampler: self.update_step(x) def initialize(self, p): - self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim + if self.is_ddim: + self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim + else: + self.eta = 0.0 + if self.eta != 0.0: p.extra_generation_params["Eta DDIM"] = self.eta diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 61f23ad7..5455561a 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -11,21 +11,21 @@ from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), ('Heun', 'sample_heun', ['k_heun'], {}), ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), ] -- cgit v1.2.3 From a61cbef02c7652f96d333b28e01f5230e225224e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 12:36:15 +0300 Subject: add second_order field to sampler config --- modules/processing.py | 8 ++------ modules/sd_samplers_kdiffusion.py | 14 +++++++------- 2 files changed, 9 insertions(+), 13 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 15806f78..678c4468 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -681,12 +681,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) - step_multiplier = 1 - if not shared.opts.dont_fix_second_order_samplers_schedule: - try: - step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except Exception: - pass + sampler_config = sd_samplers.find_sampler_config(p.sampler_name) + step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 5455561a..552c6c64 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -14,20 +14,20 @@ samplers_k_diffusion = [ ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True}), ] samplers_data_k_diffusion = [ -- cgit v1.2.3 From 315f109427a5dbbf48b0da560640523800fe3c1d Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 17 May 2023 10:26:32 +0300 Subject: Copy s_min_uncond to Processed Should fix #10416 --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 678c4468..cd63b9a6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -316,6 +316,7 @@ class Processed: self.s_tmin = p.s_tmin self.s_tmax = p.s_tmax self.s_noise = p.s_noise + self.s_min_uncond = p.s_min_uncond self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0] self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0] -- cgit v1.2.3 From 9fd6c1e3430f5947add23e2e94ac816c2546481c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 20:22:38 +0300 Subject: move some settings to the new Optimization page add slider for token merging for img2img rework StableDiffusionProcessing to have the token_merging_ratio field fix a bug with applying png optimizations for live previews when they shouldn't be applied --- modules/processing.py | 52 +++++++++++++++++++++++++-------------------------- modules/progress.py | 6 +++++- modules/sd_models.py | 36 +++++++++++++++++++---------------- modules/shared.py | 8 ++++++-- 4 files changed, 56 insertions(+), 46 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index cd63b9a6..2b8dd361 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -29,12 +29,6 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType -import tomesd - -# add a logger for the processing module -logger = logging.getLogger(__name__) -# manually set output level here since there is no option to do so yet through launch options -# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -156,6 +150,8 @@ class StableDiffusionProcessing: self.override_settings_restore_afterwards = override_settings_restore_afterwards self.is_using_inpainting_conditioning = False self.disable_extra_networks = False + self.token_merging_ratio = 0 + self.token_merging_ratio_hr = 0 if not seed_enable_extras: self.subseed = -1 @@ -171,6 +167,7 @@ class StableDiffusionProcessing: self.all_subseeds = None self.iteration = 0 self.is_hr_pass = False + self.sampler = None @property @@ -280,6 +277,12 @@ class StableDiffusionProcessing: def close(self): self.sampler = None + def get_token_merging_ratio(self, for_hr=False): + if for_hr: + return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio + + return self.token_merging_ratio or opts.token_merging_ratio + class Processed: def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): @@ -309,6 +312,8 @@ class Processed: self.styles = p.styles self.job_timestamp = state.job_timestamp self.clip_skip = opts.CLIP_stop_at_last_layers + self.token_merging_ratio = p.token_merging_ratio + self.token_merging_ratio_hr = p.token_merging_ratio_hr self.eta = p.eta self.ddim_discretize = p.ddim_discretize @@ -367,6 +372,9 @@ class Processed: def infotext(self, p: StableDiffusionProcessing, index): return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) + def get_token_merging_ratio(self, for_hr=False): + return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio + # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 def slerp(val, low, high): @@ -480,6 +488,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) enable_hr = getattr(p, 'enable_hr', False) + token_merging_ratio = p.get_token_merging_ratio() + token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True) uses_ensd = opts.eta_noise_seed_delta != 0 if uses_ensd: @@ -502,8 +512,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": opts.eta_noise_seed_delta if uses_ensd else None, - "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, - "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, + "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio, + "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, @@ -536,17 +546,12 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging_ratio > 0: - sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) - logger.debug(f"Token merging applied to first pass. Ratio: '{opts.token_merging_ratio}'") + sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio()) res = process_images_inner(p) finally: - # undo model optimizations made by tomesd - if opts.token_merging_ratio > 0: - tomesd.remove_patch(p.sd_model) - logger.debug('Token merging model optimizations removed') + sd_models.apply_token_merging(p.sd_model, 0) # restore opts to original state if p.override_settings_restore_afterwards: @@ -996,21 +1001,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() - # apply token merging optimizations from tomesd for high-res pass - if opts.token_merging_ratio_hr > 0: - # in case the user has used separate merge ratios - if opts.token_merging_ratio > 0: - tomesd.remove_patch(self.sd_model) - logger.debug('Adjusting token merging ratio for high-res pass') - - sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) - logger.debug(f"Applied token merging for high-res pass. Ratio: '{opts.token_merging_ratio_hr}'") + sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True)) samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) - if opts.token_merging_ratio_hr > 0 or opts.token_merging_ratio > 0: - tomesd.remove_patch(self.sd_model) - logger.debug('Removed token merging optimizations from model') + sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.is_hr_pass = False @@ -1173,3 +1168,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): devices.torch_gc() return samples + + def get_token_merging_ratio(self, for_hr=False): + return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio diff --git a/modules/progress.py b/modules/progress.py index 269863c9..f405f07f 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -98,7 +98,11 @@ def progressapi(req: ProgressRequest): if opts.live_previews_image_format == "png": # using optimize for large images takes an enormous amount of time - save_kwargs = {"optimize": max(*image.size) > 256} + if max(*image.size) <= 256: + save_kwargs = {"optimize": True} + else: + save_kwargs = {"optimize": False, "compress_level": 1} + else: save_kwargs = {} diff --git a/modules/sd_models.py b/modules/sd_models.py index e612be10..4bd8783e 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -583,23 +583,27 @@ def unload_model_weights(sd_model=None, info=None): return sd_model -def apply_token_merging(sd_model, hr: bool): +def apply_token_merging(sd_model, token_merging_ratio): """ Applies speed and memory optimizations from tomesd. - - Args: - hr (bool): True if called in the context of a high-res pass """ - ratio = shared.opts.token_merging_ratio - if hr: - ratio = shared.opts.token_merging_ratio_hr - - tomesd.apply_patch( - sd_model, - ratio=ratio, - use_rand=False, # can cause issues with some samplers - merge_attn=True, - merge_crossattn=False, - merge_mlp=False - ) + current_token_merging_ratio = getattr(sd_model, 'applied_token_merged_ratio', 0) + + if current_token_merging_ratio == token_merging_ratio: + return + + if current_token_merging_ratio > 0: + tomesd.remove_patch(sd_model) + + if token_merging_ratio > 0: + tomesd.apply_patch( + sd_model, + ratio=token_merging_ratio, + use_rand=False, # can cause issues with some samplers + merge_attn=True, + merge_crossattn=False, + merge_mlp=False + ) + + sd_model.applied_token_merged_ratio = token_merging_ratio diff --git a/modules/shared.py b/modules/shared.py index 47bc6d0e..76af8b9c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -413,8 +413,13 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP nrtwork; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different vidocard vendors"), +})) + +options_templates.update(options_section(('optimizations', "Optimizations"), { + "s_min_uncond": OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), - "token_merging_ratio_hr": OptionInfo(0.0, "Togen merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}), + "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), + "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { @@ -498,7 +503,6 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), -- cgit v1.2.3