From 68999d0b15d612965e7bc7feb62d6b4d55e112fa Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sat, 25 Mar 2023 12:52:14 -0400 Subject: Add upscale slider to img2img --- modules/processing.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 2e5a363f..fc4b166c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -929,7 +929,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None - def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): + def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, **kwargs): super().__init__(**kwargs) self.init_images = init_images @@ -949,11 +949,27 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.mask = None self.nmask = None self.image_conditioning = None + self.scale = scale + + def get_final_size(self): + if self.scale > 1: + img = self.init_images[0] + width = int(img.width * self.scale) + height = int(img.height * self.scale) + return width, height + else: + return self.width, self.height + def init(self, all_prompts, all_seeds, all_subseeds): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) crop_region = None + if self.scale > 1: + self.extra_generation_params["Img2Img Upscale"] = self.scale + + self.width, self.height = self.get_final_size() + image_mask = self.image_mask if image_mask is not None: -- cgit v1.2.3 From 7ea5d395c44be208f654b07ec7993aa2952f2510 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 19 Feb 2023 03:45:43 -0800 Subject: Add upscaler to img2img --- modules/generation_parameters_copypaste.py | 4 ++-- modules/img2img.py | 3 ++- modules/processing.py | 23 +++++++++++++++++------ modules/ui.py | 10 +++++++--- scripts/xyz_grid.py | 1 + style.css | 2 +- 6 files changed, 30 insertions(+), 13 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 459de080..0ad2ad4f 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -282,8 +282,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-1"] = 0 res["Hires resize-2"] = 0 - if "Img2Img Upscale" not in res: - res["Img2Img Upscale"] = 1 + if "Img2Img upscale" not in res: + res["Img2Img upscale"] = 1 restore_old_hires_fix_params(res) diff --git a/modules/img2img.py b/modules/img2img.py index d05fa750..959dd96e 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): processed_image.save(os.path.join(output_dir, filename)) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -150,6 +150,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s inpainting_mask_invert=inpainting_mask_invert, override_settings=override_settings, scale=scale, + upscaler=upscaler, ) p.scripts = modules.scripts.scripts_txt2img diff --git a/modules/processing.py b/modules/processing.py index fc4b166c..afb8cfd1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -929,7 +929,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None - def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, **kwargs): + def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs): super().__init__(**kwargs) self.init_images = init_images @@ -950,6 +950,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.nmask = None self.image_conditioning = None self.scale = scale + self.upscaler = upscaler def get_final_size(self): if self.scale > 1: @@ -966,7 +967,16 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): crop_region = None if self.scale > 1: - self.extra_generation_params["Img2Img Upscale"] = self.scale + self.extra_generation_params["Img2Img upscale"] = self.scale + + # Non-latent upscalers are run before sampling + # Latent upscalers are run during sampling + init_upscaler = None + if self.upscaler is not None: + self.extra_generation_params["Img2Img upscaler"] = self.upscaler + if self.upscaler not in shared.latent_upscale_modes: + assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}" + init_upscaler = self.upscaler self.width, self.height = self.get_final_size() @@ -992,7 +1002,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image_mask = images.resize_image(2, mask, self.width, self.height) self.paste_to = (x1, y1, x2-x1, y2-y1) else: - image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) + image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler) np_mask = np.array(image_mask) np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) self.mask_for_overlay = Image.fromarray(np_mask) @@ -1009,7 +1019,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: - image = images.resize_image(self.resize_mode, image, self.width, self.height) + image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler) if image_mask is not None: image_masked = Image.new('RGBa', (image.width, image.height)) @@ -1054,8 +1064,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) - if self.resize_mode == 3: - self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") + latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") + if latent_scale_mode is not None: + self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"]) if image_mask is not None: init_mask = latent_mask diff --git a/modules/ui.py b/modules/ui.py index bb548f92..24ab0af7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -767,7 +767,7 @@ def create_ui(): ) with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize") for category in ordered_ui_categories(): if category == "sampler": @@ -797,7 +797,9 @@ def create_ui(): with FormRow(): cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + with FormRow(): + upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") elif category == "seed": seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') @@ -934,6 +936,7 @@ def create_ui(): height, width, scale, + upscaler, resize_mode, inpaint_full_res, inpaint_full_res_padding, @@ -1019,7 +1022,8 @@ def create_ui(): (seed, "Seed"), (width, "Size-1"), (height, "Size-2"), - (scale, "Img2Img Upscale"), + (scale, "Img2Img upscale"), + (upscaler, "Img2Img upscaler"), (batch_size, "Batch size"), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795..3f6c1997 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -220,6 +220,7 @@ axis_options = [ AxisOption("Clip skip", int, apply_clip_skip), AxisOption("Denoising", float, apply_field("denoising_strength")), AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), + AxisOptionImg2Img("Upscaler", str, apply_field("upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), diff --git a/style.css b/style.css index 7d58b3b2..e824256f 100644 --- a/style.css +++ b/style.css @@ -779,7 +779,7 @@ footer { #img2img_finalres{ min-height: 0 !important; padding: .625rem .75rem; - margin-left: -0.75em + margin-left: 0.25em } #img2img_finalres .resolution{ -- cgit v1.2.3 From 8a34671fe91e142bce9e5556cca2258b3be9dd6e Mon Sep 17 00:00:00 2001 From: MrCheeze Date: Fri, 24 Mar 2023 22:48:16 -0400 Subject: Add support for the Variations models (unclip-h and unclip-l) --- launch.py | 2 +- models/karlo/ViT-L-14_stats.th | Bin 0 -> 7079 bytes modules/lowvram.py | 10 ++++++---- modules/processing.py | 41 +++++++++++++++++++++++++++----------- modules/sd_models.py | 5 +++++ modules/sd_models_config.py | 7 +++++++ modules/sd_samplers_compvis.py | 31 +++++++++++++++++++++------- modules/sd_samplers_kdiffusion.py | 19 ++++++++++++------ 8 files changed, 85 insertions(+), 30 deletions(-) create mode 100644 models/karlo/ViT-L-14_stats.th (limited to 'modules/processing.py') diff --git a/launch.py b/launch.py index b943fed2..e70df7ba 100644 --- a/launch.py +++ b/launch.py @@ -252,7 +252,7 @@ def prepare_environment(): codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git') blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git') - stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "47b6b607fdd31875c9279cd2f4f16b92e4ea958e") + stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf") taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") diff --git a/models/karlo/ViT-L-14_stats.th b/models/karlo/ViT-L-14_stats.th new file mode 100644 index 00000000..a6a06e94 Binary files /dev/null and b/models/karlo/ViT-L-14_stats.th differ diff --git a/modules/lowvram.py b/modules/lowvram.py index 042a0254..e254cc13 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -55,12 +55,12 @@ def setup_for_low_vram(sd_model, use_medvram): if hasattr(sd_model.cond_stage_model, 'model'): sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model - # remove four big modules, cond, first_stage, depth (if applicable), and unet from the model and then + # remove several big modules: cond, first_stage, depth/embedder (if applicable), and unet from the model and then # send the model to GPU. Then put modules back. the modules will be in CPU. - stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model - sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None, None + stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), getattr(sd_model, 'embedder', None), sd_model.model + sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = None, None, None, None, None sd_model.to(devices.device) - sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored + sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = stored # register hooks for those the first three models sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu) @@ -69,6 +69,8 @@ def setup_for_low_vram(sd_model, use_medvram): sd_model.first_stage_model.decode = first_stage_model_decode_wrap if sd_model.depth_model: sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu) + if sd_model.embedder: + sd_model.embedder.register_forward_pre_hook(send_me_to_gpu) parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model if hasattr(sd_model.cond_stage_model, 'model'): diff --git a/modules/processing.py b/modules/processing.py index 59717b4c..1451811c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -78,21 +78,27 @@ def apply_overlay(image, paste_loc, index, overlays): def txt2img_image_conditioning(sd_model, x, width, height): - if sd_model.model.conditioning_key not in {'hybrid', 'concat'}: - # Dummy zero conditioning if we're not using inpainting model. - # Still takes up a bit of memory, but no encoder call. - # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. - return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) + if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models - # The "masked-image" in this case will just be all zeros since the entire image is masked. - image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning)) + # The "masked-image" in this case will just be all zeros since the entire image is masked. + image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) + image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning)) - # Add the fake full 1s mask to the first dimension. - image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) - image_conditioning = image_conditioning.to(x.dtype) + # Add the fake full 1s mask to the first dimension. + image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) + image_conditioning = image_conditioning.to(x.dtype) - return image_conditioning + return image_conditioning + + elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models + + return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device) + + else: + # Dummy zero conditioning if we're not using inpainting or unclip models. + # Still takes up a bit of memory, but no encoder call. + # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. + return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) class StableDiffusionProcessing: @@ -190,6 +196,14 @@ class StableDiffusionProcessing: return conditioning_image + def unclip_image_conditioning(self, source_image): + c_adm = self.sd_model.embedder(source_image) + if self.sd_model.noise_augmentor is not None: + noise_level = 0 # TODO: Allow other noise levels? + c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0])) + c_adm = torch.cat((c_adm, noise_level_emb), 1) + return c_adm + def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None): self.is_using_inpainting_conditioning = True @@ -241,6 +255,9 @@ class StableDiffusionProcessing: if self.sampler.conditioning_key in {'hybrid', 'concat'}: return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask) + if self.sampler.conditioning_key == "crossattn-adm": + return self.unclip_image_conditioning(source_image) + # Dummy zero conditioning if we're not using inpainting or depth model. return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1) diff --git a/modules/sd_models.py b/modules/sd_models.py index f0cb1240..c1a80d82 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -383,6 +383,11 @@ def repair_config(sd_config): elif shared.cmd_opts.upcast_sampling: sd_config.model.params.unet_config.params.use_fp16 = True + # For UnCLIP-L, override the hardcoded karlo directory + if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"): + karlo_path = os.path.join(paths.models_path, 'karlo') + sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) + sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight' sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight' diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 91c21700..9398f528 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -14,6 +14,8 @@ config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml") config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml") config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml") config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml") +config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml") +config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml") config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml") config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml") config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml") @@ -65,9 +67,14 @@ def is_using_v_parameterization_for_sd2(state_dict): def guess_model_config_from_state_dict(sd, filename): sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None) diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None) + sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None) if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None: return config_depth_model + elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768: + return config_unclip + elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024: + return config_unopenclip if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024: if diffusion_model_input.shape[1] == 9: diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index 083da18c..bfcc5574 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -70,8 +70,13 @@ class VanillaStableDiffusionSampler: # Have to unwrap the inpainting conditioning here to perform pre-processing image_conditioning = None + uc_image_conditioning = None if isinstance(cond, dict): - image_conditioning = cond["c_concat"][0] + if self.conditioning_key == "crossattn-adm": + image_conditioning = cond["c_adm"] + uc_image_conditioning = unconditional_conditioning["c_adm"] + else: + image_conditioning = cond["c_concat"][0] cond = cond["c_crossattn"][0] unconditional_conditioning = unconditional_conditioning["c_crossattn"][0] @@ -98,8 +103,12 @@ class VanillaStableDiffusionSampler: # Wrap the image conditioning back up since the DDIM code can accept the dict directly. # Note that they need to be lists because it just concatenates them later. if image_conditioning is not None: - cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]} - unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} + if self.conditioning_key == "crossattn-adm": + cond = {"c_adm": image_conditioning, "c_crossattn": [cond]} + unconditional_conditioning = {"c_adm": uc_image_conditioning, "c_crossattn": [unconditional_conditioning]} + else: + cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]} + unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} return x, ts, cond, unconditional_conditioning @@ -176,8 +185,12 @@ class VanillaStableDiffusionSampler: # Wrap the conditioning models with additional image conditioning for inpainting model if image_conditioning is not None: - conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]} - unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} + if self.conditioning_key == "crossattn-adm": + conditioning = {"c_adm": image_conditioning, "c_crossattn": [conditioning]} + unconditional_conditioning = {"c_adm": torch.zeros_like(image_conditioning), "c_crossattn": [unconditional_conditioning]} + else: + conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]} + unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning)) @@ -195,8 +208,12 @@ class VanillaStableDiffusionSampler: # Wrap the conditioning models with additional image conditioning for inpainting model # dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape if image_conditioning is not None: - conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]} - unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]} + if self.conditioning_key == "crossattn-adm": + conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_adm": image_conditioning} + unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_adm": torch.zeros_like(image_conditioning)} + else: + conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]} + unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]} samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 93f0e55a..e9f08518 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -92,14 +92,21 @@ class CFGDenoiser(torch.nn.Module): batch_size = len(conds_list) repeats = [len(conds_list[i]) for i in range(batch_size)] + if shared.sd_model.model.conditioning_key == "crossattn-adm": + image_uncond = torch.zeros_like(image_cond) + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} + else: + image_uncond = image_cond + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} + if not is_edit_model: x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) else: x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond] + [torch.zeros_like(self.init_latent)]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) cfg_denoiser_callback(denoiser_params) @@ -116,13 +123,13 @@ class CFGDenoiser(torch.nn.Module): cond_in = torch.cat([tensor, uncond, uncond]) if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]}) + x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in)) else: x_out = torch.zeros_like(x_in) for batch_offset in range(0, x_out.shape[0], batch_size): a = batch_offset b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]}) + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict([cond_in[a:b]], image_cond_in[a:b])) else: x_out = torch.zeros_like(x_in) batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size @@ -135,9 +142,9 @@ class CFGDenoiser(torch.nn.Module): else: c_crossattn = torch.cat([tensor[a:b]], uncond) - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": c_crossattn, "c_concat": [image_cond_in[a:b]]}) + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]}) + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) -- cgit v1.2.3 From 433b3ab7017556a19173a86d1215ed0a0b5b1396 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 28 Mar 2023 20:36:57 +0300 Subject: Revert "Merge pull request #7931 from space-nuko/img2img-enhance" This reverts commit 426875937048e21305ac24bea53df06523bdaa81, reversing changes made to 1b63afbedc7789c0eb9a4742b780ab304d7a9caf. --- javascript/ui.js | 22 +-------- modules/generation_parameters_copypaste.py | 3 -- modules/img2img.py | 4 +- modules/processing.py | 37 ++------------- modules/ui.py | 73 ++---------------------------- scripts/xyz_grid.py | 1 - style.css | 4 +- 7 files changed, 13 insertions(+), 131 deletions(-) (limited to 'modules/processing.py') diff --git a/javascript/ui.js b/javascript/ui.js index a73eeaa2..4a440193 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -132,14 +132,7 @@ function create_tab_index_args(tabId, args){ function get_img2img_tab_index() { let res = args_to_array(arguments) - res.splice(-2) // gradio also sends outputs to the arguments, pop them off - res[0] = get_tab_index('mode_img2img') - return res -} - -function get_img2img_tab_index_for_res_preview() { - let res = args_to_array(arguments) - res.splice(-1) // gradio also sends outputs to the arguments, pop them off + res.splice(-2) res[0] = get_tab_index('mode_img2img') return res } @@ -368,16 +361,3 @@ function selectCheckpoint(name){ desiredCheckpointName = name; gradioApp().getElementById('change_checkpoint').click() } - - -function onCalcResolutionImg2Img(mode, scale, width, height, resize_mode, init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint){ - i2iScale = gradioApp().getElementById('img2img_scale') - i2iWidth = gradioApp().getElementById('img2img_width') - i2iHeight = gradioApp().getElementById('img2img_height') - - setInactive(i2iScale, scale == 1) - setInactive(i2iWidth, scale > 1) - setInactive(i2iHeight, scale > 1) - - return []; -} diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 0ad2ad4f..6df76858 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -282,9 +282,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-1"] = 0 res["Hires resize-2"] = 0 - if "Img2Img upscale" not in res: - res["Img2Img upscale"] = 1 - restore_old_hires_fix_params(res) return res diff --git a/modules/img2img.py b/modules/img2img.py index 959dd96e..c973b770 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): processed_image.save(os.path.join(output_dir, filename)) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -149,8 +149,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s inpaint_full_res_padding=inpaint_full_res_padding, inpainting_mask_invert=inpainting_mask_invert, override_settings=override_settings, - scale=scale, - upscaler=upscaler, ) p.scripts = modules.scripts.scripts_txt2img diff --git a/modules/processing.py b/modules/processing.py index 509b80b9..6d9c6a8d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -946,7 +946,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None - def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs): + def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): super().__init__(**kwargs) self.init_images = init_images @@ -966,37 +966,11 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.mask = None self.nmask = None self.image_conditioning = None - self.scale = scale - self.upscaler = upscaler - - def get_final_size(self): - if self.scale > 1: - img = self.init_images[0] - width = int(img.width * self.scale) - height = int(img.height * self.scale) - return width, height - else: - return self.width, self.height - def init(self, all_prompts, all_seeds, all_subseeds): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) crop_region = None - if self.scale > 1: - self.extra_generation_params["Img2Img upscale"] = self.scale - - # Non-latent upscalers are run before sampling - # Latent upscalers are run during sampling - init_upscaler = None - if self.upscaler is not None: - self.extra_generation_params["Img2Img upscaler"] = self.upscaler - if self.upscaler not in shared.latent_upscale_modes: - assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}" - init_upscaler = self.upscaler - - self.width, self.height = self.get_final_size() - image_mask = self.image_mask if image_mask is not None: @@ -1019,7 +993,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image_mask = images.resize_image(2, mask, self.width, self.height) self.paste_to = (x1, y1, x2-x1, y2-y1) else: - image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler) + image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) np_mask = np.array(image_mask) np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) self.mask_for_overlay = Image.fromarray(np_mask) @@ -1036,7 +1010,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: - image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler) + image = images.resize_image(self.resize_mode, image, self.width, self.height) if image_mask is not None: image_masked = Image.new('RGBa', (image.width, image.height)) @@ -1081,9 +1055,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) - latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") - if latent_scale_mode is not None: - self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"]) + if self.resize_mode == 3: + self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") if image_mask is not None: init_mask = latent_mask diff --git a/modules/ui.py b/modules/ui.py index f22da16a..eb5fcd3f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -15,7 +15,6 @@ import warnings import gradio as gr import gradio.routes import gradio.utils -from gradio.events import Releaseable import numpy as np from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call @@ -128,26 +127,6 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" -def calc_resolution_img2img(mode, scale, resize_x, resize_y, resize_mode, *i2i_images): - init_img = None - if mode in {0, 1, 3, 4}: - init_img = i2i_images[mode] - elif mode == 2: - init_img = i2i_images[mode]["image"] - - if not init_img: - return "" - - if scale > 1: - width = int(init_img.width * scale) - height = int(init_img.height * scale) - else: - width = resize_x - height = resize_y - - return f"resize: from {init_img.width}x{init_img.height} to {width}x{height}" - - def apply_styles(prompt, prompt_neg, styles): prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) @@ -756,7 +735,7 @@ def create_ui(): ) with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize") + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") for category in ordered_ui_categories(): if category == "sampler": @@ -765,13 +744,8 @@ def create_ui(): elif category == "dimensions": with FormRow(): with gr.Column(elem_id="img2img_column_size", scale=4): - with FormRow(variant="compact"): - final_resolution = FormHTML(value="", elem_id="img2img_finalres", label="Upscaled resolution", interactive=False) - with FormRow(variant="compact"): - scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=1.0, elem_id="img2img_scale") - with FormRow(variant="compact"): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") @@ -786,9 +760,7 @@ def create_ui(): with FormRow(): cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - with FormRow(): - upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") elif category == "seed": seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') @@ -841,39 +813,6 @@ def create_ui(): outputs=[inpaint_controls, mask_alpha], ) - img2img_resolution_preview_inputs = [dummy_component, # filled in by selected img2img tab index in _js - scale, width, height, resize_mode, - init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint] - for input in img2img_resolution_preview_inputs[1:]: - if isinstance(input, Releaseable): - input.release( - fn=calc_resolution_img2img, - _js="get_img2img_tab_index_for_res_preview", - inputs=img2img_resolution_preview_inputs, - outputs=[final_resolution], - show_progress=False, - ).success( - None, - _js="onCalcResolutionImg2Img", - inputs=img2img_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - else: - input.change( - fn=calc_resolution_img2img, - _js="get_img2img_tab_index_for_res_preview", - inputs=img2img_resolution_preview_inputs, - outputs=[final_resolution], - show_progress=False, - ).success( - None, - _js="onCalcResolutionImg2Img", - inputs=img2img_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) @@ -922,8 +861,6 @@ def create_ui(): subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, height, width, - scale, - upscaler, resize_mode, inpaint_full_res, inpaint_full_res_padding, @@ -1009,8 +946,6 @@ def create_ui(): (seed, "Seed"), (width, "Size-1"), (height, "Size-2"), - (scale, "Img2Img upscale"), - (upscaler, "Img2Img upscaler"), (batch_size, "Batch size"), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3f6c1997..3895a795 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -220,7 +220,6 @@ axis_options = [ AxisOption("Clip skip", int, apply_clip_skip), AxisOption("Denoising", float, apply_field("denoising_strength")), AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), - AxisOptionImg2Img("Upscaler", str, apply_field("upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), diff --git a/style.css b/style.css index 379a89dc..de16a7f2 100644 --- a/style.css +++ b/style.css @@ -287,13 +287,13 @@ button.custom-button{ border-radius: 0 0.5rem 0.5rem 0; } -#txtimg_hr_finalres, #img2img_finalres { +#txtimg_hr_finalres{ min-height: 0 !important; padding: .625rem .75rem; margin-left: -0.75em } -#txtimg_hr_finalres .resolution, #img2img_finalres .resolution{ +#txtimg_hr_finalres .resolution{ font-weight: bold; } -- cgit v1.2.3 From b3593d0997bfdcca7f8aa01663e81720db50e494 Mon Sep 17 00:00:00 2001 From: For Sure Date: Thu, 6 Apr 2023 19:42:26 +0300 Subject: Add support for saving init images in img2img --- modules/processing.py | 8 ++++++++ modules/shared.py | 3 +++ 2 files changed, 11 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..5556afc5 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -3,6 +3,7 @@ import math import os import sys import warnings +import hashlib import torch import numpy as np @@ -476,6 +477,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Init image hash": getattr(p, 'init_img_hash', None) } generation_params.update(p.extra_generation_params) @@ -1007,6 +1009,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.color_corrections = [] imgs = [] for img in self.init_images: + + # Save init image + if opts.save_init_img: + self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest() + images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False) + image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..69c2b21e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -39,6 +39,7 @@ restricted_opts = { "outdir_grids", "outdir_txt2img_grids", "outdir_save", + "outdir_init_images" } ui_reorder_categories = [ @@ -253,6 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), + "save_init_img": OptionInfo(True, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), @@ -268,6 +270,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), { "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), })) options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { -- cgit v1.2.3 From 5fe0dd79beaa5ef737ff85254ee9870f60ae9464 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 11:29:37 +0300 Subject: rename CPU RNG to RNG source in settings, add infotext and parameters copypaste support to RNG source --- modules/devices.py | 4 ++-- modules/generation_parameters_copypaste.py | 5 +++++ modules/processing.py | 3 ++- modules/sd_samplers_common.py | 3 ++- modules/sd_samplers_kdiffusion.py | 2 +- modules/shared.py | 2 +- 6 files changed, 13 insertions(+), 6 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/devices.py b/modules/devices.py index 3bc86a6a..c705a3cb 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -95,7 +95,7 @@ def randn(seed, shape): from modules.shared import opts torch.manual_seed(seed) - if opts.use_cpu_randn or device.type == 'mps': + if opts.randn_source == "CPU" or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) return torch.randn(shape, device=device) @@ -103,7 +103,7 @@ def randn(seed, shape): def randn_without_seed(shape): from modules.shared import opts - if opts.use_cpu_randn or device.type == 'mps': + if opts.randn_source == "CPU" or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) return torch.randn(shape, device=device) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 6df76858..e7269363 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -284,6 +284,10 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model restore_old_hires_fix_params(res) + # Missing RNG means the default was set, which is GPU RNG + if "RNG" not in res: + res["RNG"] = "GPU" + return res @@ -304,6 +308,7 @@ infotext_to_setting_name_mapping = [ ('UniPC skip type', 'uni_pc_skip_type'), ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), + ('RNG', 'randn_source'), ] diff --git a/modules/processing.py b/modules/processing.py index 5556afc5..7bac154d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -477,7 +477,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Init image hash": getattr(p, 'init_img_hash', None) + "Init image hash": getattr(p, 'init_img_hash', None), + "RNG": (opts.randn_source if opts.randn_source != "GPU" else None) } generation_params.update(p.extra_generation_params) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index e6a372d5..bc074238 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -61,7 +61,8 @@ def store_latent(decoded): class InterruptedException(BaseException): pass -if opts.use_cpu_randn: + +if opts.randn_source == "CPU": import torchsde._brownian.brownian_interval def torchsde_randn(size, dtype, device, seed): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 13f4567a..a547d1b5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -190,7 +190,7 @@ class TorchHijack: if noise.shape == x.shape: return noise - if opts.use_cpu_randn or x.device.type == 'mps': + if opts.randn_source == "CPU" or x.device.type == 'mps': return torch.randn_like(x, device=devices.cpu).to(x.device) else: return torch.randn_like(x) diff --git a/modules/shared.py b/modules/shared.py index b5b401fe..73704889 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -334,7 +334,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "use_cpu_randn": OptionInfo(False, "Use CPU for random number generation to make manual seeds generate the same image across platforms. This may change existing seeds."), + "randn_source": OptionInfo("GPU", "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", gr.Radio, {"choices": ["GPU", "CPU"]}), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.3