diff options
author | kurisu_u <73207840+lanyeeee@users.noreply.github.com> | 2023-12-30 13:47:59 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-30 13:47:59 +0000 |
commit | d05f9e8124160d5ef6fcea6523585f613f375fac (patch) | |
tree | c2898250b3f96ccf791c9b4ebf7faf7f12f1a128 /modules/processing.py | |
parent | c069c2c5628728c9506dd034ef98e6335fd5bb34 (diff) | |
parent | adcd65ba3493fc91c9d7c843d7e14275ad6fd881 (diff) | |
download | stable-diffusion-webui-gfx803-d05f9e8124160d5ef6fcea6523585f613f375fac.tar.gz stable-diffusion-webui-gfx803-d05f9e8124160d5ef6fcea6523585f613f375fac.tar.bz2 stable-diffusion-webui-gfx803-d05f9e8124160d5ef6fcea6523585f613f375fac.zip |
Merge branch 'dev' into api_thread_safe
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 25 |
1 files changed, 23 insertions, 2 deletions
diff --git a/modules/processing.py b/modules/processing.py index 9351e3fb..7789f9a4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -113,6 +113,21 @@ def txt2img_image_conditioning(sd_model, x, width, height): return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
else:
+ sd = sd_model.model.state_dict()
+ diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
+ image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
+ image_conditioning = images_tensor_to_samples(image_conditioning,
+ approximation_indexes.get(opts.sd_vae_encode_method))
+
+ # Add the fake full 1s mask to the first dimension.
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
+ image_conditioning = image_conditioning.to(x.dtype)
+
+ return image_conditioning
+
# Dummy zero conditioning if we're not using inpainting or unclip models.
# Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
@@ -371,6 +386,12 @@ class StableDiffusionProcessing: if self.sampler.conditioning_key == "crossattn-adm":
return self.unclip_image_conditioning(source_image)
+ sd = self.sampler.model_wrap.inner_model.model.state_dict()
+ diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
+
# Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
@@ -1135,7 +1156,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
- if self.hr_checkpoint_name:
+ if self.hr_checkpoint_name and self.hr_checkpoint_name != 'Use same checkpoint':
self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name)
if self.hr_checkpoint_info is None:
@@ -1482,7 +1503,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): # Save init image
if opts.save_init_img:
self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest()
- images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False)
+ images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False, existing_info=img.info)
image = images.flatten(img, opts.img2img_background_color)
|