diff options
author | space-nuko <24979496+space-nuko@users.noreply.github.com> | 2023-02-19 11:45:43 +0000 |
---|---|---|
committer | space-nuko <24979496+space-nuko@users.noreply.github.com> | 2023-03-25 16:52:43 +0000 |
commit | 7ea5d395c44be208f654b07ec7993aa2952f2510 (patch) | |
tree | 63cf7ad593d465ced460d3c824fecfd3aec231fb /modules/processing.py | |
parent | 68999d0b15d612965e7bc7feb62d6b4d55e112fa (diff) | |
download | stable-diffusion-webui-gfx803-7ea5d395c44be208f654b07ec7993aa2952f2510.tar.gz stable-diffusion-webui-gfx803-7ea5d395c44be208f654b07ec7993aa2952f2510.tar.bz2 stable-diffusion-webui-gfx803-7ea5d395c44be208f654b07ec7993aa2952f2510.zip |
Add upscaler to img2img
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/modules/processing.py b/modules/processing.py index fc4b166c..afb8cfd1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -929,7 +929,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
- def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, **kwargs):
+ def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
@@ -950,6 +950,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.nmask = None
self.image_conditioning = None
self.scale = scale
+ self.upscaler = upscaler
def get_final_size(self):
if self.scale > 1:
@@ -966,7 +967,16 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): crop_region = None
if self.scale > 1:
- self.extra_generation_params["Img2Img Upscale"] = self.scale
+ self.extra_generation_params["Img2Img upscale"] = self.scale
+
+ # Non-latent upscalers are run before sampling
+ # Latent upscalers are run during sampling
+ init_upscaler = None
+ if self.upscaler is not None:
+ self.extra_generation_params["Img2Img upscaler"] = self.upscaler
+ if self.upscaler not in shared.latent_upscale_modes:
+ assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}"
+ init_upscaler = self.upscaler
self.width, self.height = self.get_final_size()
@@ -992,7 +1002,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
- image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
+ image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler)
np_mask = np.array(image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
@@ -1009,7 +1019,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = images.flatten(img, opts.img2img_background_color)
if crop_region is None and self.resize_mode != 3:
- image = images.resize_image(self.resize_mode, image, self.width, self.height)
+ image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler)
if image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
@@ -1054,8 +1064,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
- if self.resize_mode == 3:
- self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
+ if latent_scale_mode is not None:
+ self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
if image_mask is not None:
init_mask = latent_mask
|