diff options
author | CodeHatchling <steve@codehatch.com> | 2023-12-05 02:42:59 +0000 |
---|---|---|
committer | CodeHatchling <steve@codehatch.com> | 2023-12-05 02:42:59 +0000 |
commit | 6fc12428e3c5f903584ca7986e0c441f80fa2807 (patch) | |
tree | 3b578dbd72a19556ea3278a9c97db056f6749522 /modules/processing.py | |
parent | b32a334e3da7b06d82441beaa08a673b4f55bca1 (diff) | |
download | stable-diffusion-webui-gfx803-6fc12428e3c5f903584ca7986e0c441f80fa2807.tar.gz stable-diffusion-webui-gfx803-6fc12428e3c5f903584ca7986e0c441f80fa2807.tar.bz2 stable-diffusion-webui-gfx803-6fc12428e3c5f903584ca7986e0c441f80fa2807.zip |
Fixed issue where batched inpainting (batch size > 1) wouldn't work because of mismatched tensor sizes. The 'already_decoded' decoded case should also be handled correctly (tested indirectly).
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/modules/processing.py b/modules/processing.py index 7fc282cf..71bb056a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -883,20 +883,27 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if getattr(samples_ddim, 'already_decoded', False):
x_samples_ddim = samples_ddim
# todo: generate adaptive masks based on pixel differences.
- # if p.masks_for_overlay is used, it will already be populated with masks
+ if getattr(p, "image_mask", None) is not None and getattr(p, "soft_inpainting", None) is not None:
+ si.apply_masks(soft_inpainting=p.soft_inpainting,
+ nmask=p.nmask,
+ overlay_images=p.overlay_images,
+ masks_for_overlay=p.masks_for_overlay,
+ width=p.width,
+ height=p.height,
+ paste_to=p.paste_to)
else:
if opts.sd_vae_decode_method != 'Full':
p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
# Generate the mask(s) based on similarity between the original and denoised latent vectors
if getattr(p, "image_mask", None) is not None and getattr(p, "soft_inpainting", None) is not None:
- si.generate_adaptive_masks(latent_orig=p.init_latent,
- latent_processed=samples_ddim,
- overlay_images=p.overlay_images,
- masks_for_overlay=p.masks_for_overlay,
- width=p.width,
- height=p.height,
- paste_to=p.paste_to)
+ si.apply_adaptive_masks(latent_orig=p.init_latent,
+ latent_processed=samples_ddim,
+ overlay_images=p.overlay_images,
+ masks_for_overlay=p.masks_for_overlay,
+ width=p.width,
+ height=p.height,
+ paste_to=p.paste_to)
x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
|