From 8179901f9cc3fe7d2d4b24edc8fe59275a1332f8 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sun, 12 Mar 2023 09:12:54 -0400 Subject: disable pil checks --- scripts/xyz_grid.py | 1 + 1 file changed, 1 insertion(+) (limited to 'scripts/xyz_grid.py') diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index ce584981..f6513dae 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -515,6 +515,7 @@ class Script(scripts.Script): zs = process_axis(z_opt, z_values) # this could be moved to common code, but unlikely to be ever triggered anywhere else + Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes grid_mp = round(len(xs) * len(ys) * len(zs) * p.width * p.height / 1000000) assert grid_mp < opts.img_max_size_mp, f'Error: Resulting grid would be too large ({grid_mp} MPixels) (max configured size is {opts.img_max_size_mp} MPixels)' -- cgit v1.2.3 From e7ac09b25a885a66d3e789b1c175885ad3165e92 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Wed, 22 Mar 2023 02:11:38 -0400 Subject: fixes xyz extra_generation_params not being saved --- scripts/xyz_grid.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'scripts/xyz_grid.py') diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index ce584981..3551aca0 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -247,7 +247,7 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend state.job = f"{index(ix, iy, iz) + 1} out of {list_size}" - processed: Processed = cell(x, y, z) + processed: Processed = cell(x, y, z, ix, iy, iz) if processed_result is None: # Use our first processed result object as a template container to hold our full results @@ -558,8 +558,6 @@ class Script(scripts.Script): print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})") shared.total_tqdm.updateTotal(total_steps) - grid_infotext = [None] - state.xyz_plot_x = AxisInfo(x_opt, xs) state.xyz_plot_y = AxisInfo(y_opt, ys) state.xyz_plot_z = AxisInfo(z_opt, zs) @@ -588,7 +586,9 @@ class Script(scripts.Script): else: second_axes_processed = 'y' - def cell(x, y, z): + grid_infotext = [None] * (1 + len(zs)) + + def cell(x, y, z, ix, iy, iz): if shared.state.interrupted: return Processed(p, [], p.seed, "") @@ -600,7 +600,9 @@ class Script(scripts.Script): res = process_images(pc) - if grid_infotext[0] is None: + # Sets subgrid infotexts + subgrid_index = 1 + iz + if grid_infotext[subgrid_index] is None and ix == 0 and iy == 0: pc.extra_generation_params = copy(pc.extra_generation_params) pc.extra_generation_params['Script'] = self.title() @@ -616,6 +618,12 @@ class Script(scripts.Script): if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys]) + grid_infotext[subgrid_index] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds) + + # Sets main grid infotext + if grid_infotext[0] is None and ix == 0 and iy == 0 and iz == 0: + pc.extra_generation_params = copy(pc.extra_generation_params) + if z_opt.label != 'Nothing': pc.extra_generation_params["Z Type"] = z_opt.label pc.extra_generation_params["Z Values"] = z_values @@ -650,6 +658,9 @@ class Script(scripts.Script): z_count = len(zs) + # Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids) + processed.infotexts[:1+z_count] = grid_infotext[:1+z_count] + if not include_lone_images: # Don't need sub-images anymore, drop from list: processed.images = processed.images[:z_count+1] -- cgit v1.2.3 From 7ea5d395c44be208f654b07ec7993aa2952f2510 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 19 Feb 2023 03:45:43 -0800 Subject: Add upscaler to img2img --- modules/generation_parameters_copypaste.py | 4 ++-- modules/img2img.py | 3 ++- modules/processing.py | 23 +++++++++++++++++------ modules/ui.py | 10 +++++++--- scripts/xyz_grid.py | 1 + style.css | 2 +- 6 files changed, 30 insertions(+), 13 deletions(-) (limited to 'scripts/xyz_grid.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 459de080..0ad2ad4f 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -282,8 +282,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-1"] = 0 res["Hires resize-2"] = 0 - if "Img2Img Upscale" not in res: - res["Img2Img Upscale"] = 1 + if "Img2Img upscale" not in res: + res["Img2Img upscale"] = 1 restore_old_hires_fix_params(res) diff --git a/modules/img2img.py b/modules/img2img.py index d05fa750..959dd96e 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): processed_image.save(os.path.join(output_dir, filename)) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -150,6 +150,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s inpainting_mask_invert=inpainting_mask_invert, override_settings=override_settings, scale=scale, + upscaler=upscaler, ) p.scripts = modules.scripts.scripts_txt2img diff --git a/modules/processing.py b/modules/processing.py index fc4b166c..afb8cfd1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -929,7 +929,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None - def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, **kwargs): + def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs): super().__init__(**kwargs) self.init_images = init_images @@ -950,6 +950,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.nmask = None self.image_conditioning = None self.scale = scale + self.upscaler = upscaler def get_final_size(self): if self.scale > 1: @@ -966,7 +967,16 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): crop_region = None if self.scale > 1: - self.extra_generation_params["Img2Img Upscale"] = self.scale + self.extra_generation_params["Img2Img upscale"] = self.scale + + # Non-latent upscalers are run before sampling + # Latent upscalers are run during sampling + init_upscaler = None + if self.upscaler is not None: + self.extra_generation_params["Img2Img upscaler"] = self.upscaler + if self.upscaler not in shared.latent_upscale_modes: + assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}" + init_upscaler = self.upscaler self.width, self.height = self.get_final_size() @@ -992,7 +1002,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image_mask = images.resize_image(2, mask, self.width, self.height) self.paste_to = (x1, y1, x2-x1, y2-y1) else: - image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) + image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler) np_mask = np.array(image_mask) np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) self.mask_for_overlay = Image.fromarray(np_mask) @@ -1009,7 +1019,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: - image = images.resize_image(self.resize_mode, image, self.width, self.height) + image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler) if image_mask is not None: image_masked = Image.new('RGBa', (image.width, image.height)) @@ -1054,8 +1064,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) - if self.resize_mode == 3: - self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") + latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") + if latent_scale_mode is not None: + self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"]) if image_mask is not None: init_mask = latent_mask diff --git a/modules/ui.py b/modules/ui.py index bb548f92..24ab0af7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -767,7 +767,7 @@ def create_ui(): ) with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize") for category in ordered_ui_categories(): if category == "sampler": @@ -797,7 +797,9 @@ def create_ui(): with FormRow(): cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + with FormRow(): + upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") elif category == "seed": seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') @@ -934,6 +936,7 @@ def create_ui(): height, width, scale, + upscaler, resize_mode, inpaint_full_res, inpaint_full_res_padding, @@ -1019,7 +1022,8 @@ def create_ui(): (seed, "Seed"), (width, "Size-1"), (height, "Size-2"), - (scale, "Img2Img Upscale"), + (scale, "Img2Img upscale"), + (upscaler, "Img2Img upscaler"), (batch_size, "Batch size"), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795..3f6c1997 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -220,6 +220,7 @@ axis_options = [ AxisOption("Clip skip", int, apply_clip_skip), AxisOption("Denoising", float, apply_field("denoising_strength")), AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), + AxisOptionImg2Img("Upscaler", str, apply_field("upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), diff --git a/style.css b/style.css index 7d58b3b2..e824256f 100644 --- a/style.css +++ b/style.css @@ -779,7 +779,7 @@ footer { #img2img_finalres{ min-height: 0 !important; padding: .625rem .75rem; - margin-left: -0.75em + margin-left: 0.25em } #img2img_finalres .resolution{ -- cgit v1.2.3 From 433b3ab7017556a19173a86d1215ed0a0b5b1396 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 28 Mar 2023 20:36:57 +0300 Subject: Revert "Merge pull request #7931 from space-nuko/img2img-enhance" This reverts commit 426875937048e21305ac24bea53df06523bdaa81, reversing changes made to 1b63afbedc7789c0eb9a4742b780ab304d7a9caf. --- javascript/ui.js | 22 +-------- modules/generation_parameters_copypaste.py | 3 -- modules/img2img.py | 4 +- modules/processing.py | 37 ++------------- modules/ui.py | 73 ++---------------------------- scripts/xyz_grid.py | 1 - style.css | 4 +- 7 files changed, 13 insertions(+), 131 deletions(-) (limited to 'scripts/xyz_grid.py') diff --git a/javascript/ui.js b/javascript/ui.js index a73eeaa2..4a440193 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -132,14 +132,7 @@ function create_tab_index_args(tabId, args){ function get_img2img_tab_index() { let res = args_to_array(arguments) - res.splice(-2) // gradio also sends outputs to the arguments, pop them off - res[0] = get_tab_index('mode_img2img') - return res -} - -function get_img2img_tab_index_for_res_preview() { - let res = args_to_array(arguments) - res.splice(-1) // gradio also sends outputs to the arguments, pop them off + res.splice(-2) res[0] = get_tab_index('mode_img2img') return res } @@ -368,16 +361,3 @@ function selectCheckpoint(name){ desiredCheckpointName = name; gradioApp().getElementById('change_checkpoint').click() } - - -function onCalcResolutionImg2Img(mode, scale, width, height, resize_mode, init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint){ - i2iScale = gradioApp().getElementById('img2img_scale') - i2iWidth = gradioApp().getElementById('img2img_width') - i2iHeight = gradioApp().getElementById('img2img_height') - - setInactive(i2iScale, scale == 1) - setInactive(i2iWidth, scale > 1) - setInactive(i2iHeight, scale > 1) - - return []; -} diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 0ad2ad4f..6df76858 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -282,9 +282,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-1"] = 0 res["Hires resize-2"] = 0 - if "Img2Img upscale" not in res: - res["Img2Img upscale"] = 1 - restore_old_hires_fix_params(res) return res diff --git a/modules/img2img.py b/modules/img2img.py index 959dd96e..c973b770 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): processed_image.save(os.path.join(output_dir, filename)) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -149,8 +149,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s inpaint_full_res_padding=inpaint_full_res_padding, inpainting_mask_invert=inpainting_mask_invert, override_settings=override_settings, - scale=scale, - upscaler=upscaler, ) p.scripts = modules.scripts.scripts_txt2img diff --git a/modules/processing.py b/modules/processing.py index 509b80b9..6d9c6a8d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -946,7 +946,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None - def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs): + def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): super().__init__(**kwargs) self.init_images = init_images @@ -966,37 +966,11 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.mask = None self.nmask = None self.image_conditioning = None - self.scale = scale - self.upscaler = upscaler - - def get_final_size(self): - if self.scale > 1: - img = self.init_images[0] - width = int(img.width * self.scale) - height = int(img.height * self.scale) - return width, height - else: - return self.width, self.height - def init(self, all_prompts, all_seeds, all_subseeds): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) crop_region = None - if self.scale > 1: - self.extra_generation_params["Img2Img upscale"] = self.scale - - # Non-latent upscalers are run before sampling - # Latent upscalers are run during sampling - init_upscaler = None - if self.upscaler is not None: - self.extra_generation_params["Img2Img upscaler"] = self.upscaler - if self.upscaler not in shared.latent_upscale_modes: - assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}" - init_upscaler = self.upscaler - - self.width, self.height = self.get_final_size() - image_mask = self.image_mask if image_mask is not None: @@ -1019,7 +993,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image_mask = images.resize_image(2, mask, self.width, self.height) self.paste_to = (x1, y1, x2-x1, y2-y1) else: - image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler) + image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) np_mask = np.array(image_mask) np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) self.mask_for_overlay = Image.fromarray(np_mask) @@ -1036,7 +1010,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: - image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler) + image = images.resize_image(self.resize_mode, image, self.width, self.height) if image_mask is not None: image_masked = Image.new('RGBa', (image.width, image.height)) @@ -1081,9 +1055,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) - latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") - if latent_scale_mode is not None: - self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"]) + if self.resize_mode == 3: + self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") if image_mask is not None: init_mask = latent_mask diff --git a/modules/ui.py b/modules/ui.py index f22da16a..eb5fcd3f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -15,7 +15,6 @@ import warnings import gradio as gr import gradio.routes import gradio.utils -from gradio.events import Releaseable import numpy as np from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call @@ -128,26 +127,6 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" -def calc_resolution_img2img(mode, scale, resize_x, resize_y, resize_mode, *i2i_images): - init_img = None - if mode in {0, 1, 3, 4}: - init_img = i2i_images[mode] - elif mode == 2: - init_img = i2i_images[mode]["image"] - - if not init_img: - return "" - - if scale > 1: - width = int(init_img.width * scale) - height = int(init_img.height * scale) - else: - width = resize_x - height = resize_y - - return f"resize: from {init_img.width}x{init_img.height} to {width}x{height}" - - def apply_styles(prompt, prompt_neg, styles): prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) @@ -756,7 +735,7 @@ def create_ui(): ) with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize") + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") for category in ordered_ui_categories(): if category == "sampler": @@ -765,13 +744,8 @@ def create_ui(): elif category == "dimensions": with FormRow(): with gr.Column(elem_id="img2img_column_size", scale=4): - with FormRow(variant="compact"): - final_resolution = FormHTML(value="", elem_id="img2img_finalres", label="Upscaled resolution", interactive=False) - with FormRow(variant="compact"): - scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=1.0, elem_id="img2img_scale") - with FormRow(variant="compact"): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") @@ -786,9 +760,7 @@ def create_ui(): with FormRow(): cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - with FormRow(): - upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") elif category == "seed": seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') @@ -841,39 +813,6 @@ def create_ui(): outputs=[inpaint_controls, mask_alpha], ) - img2img_resolution_preview_inputs = [dummy_component, # filled in by selected img2img tab index in _js - scale, width, height, resize_mode, - init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint] - for input in img2img_resolution_preview_inputs[1:]: - if isinstance(input, Releaseable): - input.release( - fn=calc_resolution_img2img, - _js="get_img2img_tab_index_for_res_preview", - inputs=img2img_resolution_preview_inputs, - outputs=[final_resolution], - show_progress=False, - ).success( - None, - _js="onCalcResolutionImg2Img", - inputs=img2img_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - else: - input.change( - fn=calc_resolution_img2img, - _js="get_img2img_tab_index_for_res_preview", - inputs=img2img_resolution_preview_inputs, - outputs=[final_resolution], - show_progress=False, - ).success( - None, - _js="onCalcResolutionImg2Img", - inputs=img2img_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) @@ -922,8 +861,6 @@ def create_ui(): subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, height, width, - scale, - upscaler, resize_mode, inpaint_full_res, inpaint_full_res_padding, @@ -1009,8 +946,6 @@ def create_ui(): (seed, "Seed"), (width, "Size-1"), (height, "Size-2"), - (scale, "Img2Img upscale"), - (upscaler, "Img2Img upscaler"), (batch_size, "Batch size"), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3f6c1997..3895a795 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -220,7 +220,6 @@ axis_options = [ AxisOption("Clip skip", int, apply_clip_skip), AxisOption("Denoising", float, apply_field("denoising_strength")), AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), - AxisOptionImg2Img("Upscaler", str, apply_field("upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), diff --git a/style.css b/style.css index 379a89dc..de16a7f2 100644 --- a/style.css +++ b/style.css @@ -287,13 +287,13 @@ button.custom-button{ border-radius: 0 0.5rem 0.5rem 0; } -#txtimg_hr_finalres, #img2img_finalres { +#txtimg_hr_finalres{ min-height: 0 !important; padding: .625rem .75rem; margin-left: -0.75em } -#txtimg_hr_finalres .resolution, #img2img_finalres .resolution{ +#txtimg_hr_finalres .resolution{ font-weight: bold; } -- cgit v1.2.3