diff options
author | CodeHatchling <steve@codehatch.com> | 2023-12-07 05:25:53 +0000 |
---|---|---|
committer | CodeHatchling <steve@codehatch.com> | 2023-12-07 21:28:02 +0000 |
commit | 2abc417834d752e43a283f8603bfddfb1c80b30f (patch) | |
tree | b6cdd6aa6014b856533e5d9ab8afc4e492e8f1cf /modules/processing.py | |
parent | ac4578912395627731f2cd8529f87a95df1f7644 (diff) | |
download | stable-diffusion-webui-gfx803-2abc417834d752e43a283f8603bfddfb1c80b30f.tar.gz stable-diffusion-webui-gfx803-2abc417834d752e43a283f8603bfddfb1c80b30f.tar.bz2 stable-diffusion-webui-gfx803-2abc417834d752e43a283f8603bfddfb1c80b30f.zip |
Re-implemented soft inpainting via a script. Also fixed some mistakes with the previous hooks, removed unnecessary formatting changes, removed code that I had forgotten to.
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 23 |
1 files changed, 10 insertions, 13 deletions
diff --git a/modules/processing.py b/modules/processing.py index 5a1a90af..f8d85bdf 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -879,14 +879,13 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None:
ps = scripts.PostSampleArgs(samples_ddim)
p.scripts.post_sample(p, ps)
- samples_ddim = pp.samples
+ samples_ddim = ps.samples
if getattr(samples_ddim, 'already_decoded', False):
x_samples_ddim = samples_ddim
else:
if opts.sd_vae_decode_method != 'Full':
p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
-
x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
x_samples_ddim = torch.stack(x_samples_ddim).float()
@@ -944,7 +943,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None:
ppmo = scripts.PostProcessMaskOverlayArgs(i, mask_for_overlay, overlay_image)
p.scripts.postprocess_maskoverlay(p, ppmo)
- mask_for_overlay, overlay_image = pp.mask_for_overlay, pp.overlay_image
+ mask_for_overlay, overlay_image = ppmo.mask_for_overlay, ppmo.overlay_image
if p.color_corrections is not None and i < len(p.color_corrections):
if save_samples and opts.save_images_before_color_correction:
@@ -959,7 +958,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: original_denoised_image = image.copy()
if p.paste_to is not None:
- original_denoised_image = uncrop(original_denoised_image, (p.overlay_image.width, p.overlay_image.height), p.paste_to)
+ original_denoised_image = uncrop(original_denoised_image, (overlay_image.width, overlay_image.height), p.paste_to)
image = apply_overlay(image, p.paste_to, overlay_image)
@@ -1512,9 +1511,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.overlay_images is not None:
self.overlay_images = self.overlay_images * self.batch_size
- if self.masks_for_overlay is not None:
- self.masks_for_overlay = self.masks_for_overlay * self.batch_size
-
if self.color_corrections is not None and len(self.color_corrections) == 1:
self.color_corrections = self.color_corrections * self.batch_size
@@ -1565,14 +1561,15 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
- blended_samples = samples * self.nmask + self.init_latent * self.mask
+ if self.mask is not None:
+ blended_samples = samples * self.nmask + self.init_latent * self.mask
- if self.scripts is not None:
- mba = scripts.MaskBlendArgs(self, samples, self.nmask, self.init_latent, self.mask, blended_samples, sigma=None, is_final_blend=True)
- self.scripts.on_mask_blend(self, mba)
- blended_samples = mba.blended_latent
+ if self.scripts is not None:
+ mba = scripts.MaskBlendArgs(samples, self.nmask, self.init_latent, self.mask, blended_samples)
+ self.scripts.on_mask_blend(self, mba)
+ blended_samples = mba.blended_latent
- samples = blended_samples
+ samples = blended_samples
del x
devices.torch_gc()
|