diff options
author | random_thoughtss <random_thoughtss@proton.me> | 2022-10-20 16:45:03 +0000 |
---|---|---|
committer | random_thoughtss <random_thoughtss@proton.me> | 2022-10-20 16:45:03 +0000 |
commit | 92a17a7a4a13fceb3c3e25a2e854b2a7dd6eb5df (patch) | |
tree | 32ad89726a53fcd23151b461348da59e9b7717be /modules/processing.py | |
parent | aa7ff2a1972f3865883e10ba28c5414cdebe8e3b (diff) | |
download | stable-diffusion-webui-gfx803-92a17a7a4a13fceb3c3e25a2e854b2a7dd6eb5df.tar.gz stable-diffusion-webui-gfx803-92a17a7a4a13fceb3c3e25a2e854b2a7dd6eb5df.tar.bz2 stable-diffusion-webui-gfx803-92a17a7a4a13fceb3c3e25a2e854b2a7dd6eb5df.zip |
Made dummy latents smaller. Minor code cleanups
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/modules/processing.py b/modules/processing.py index 3caac25e..539cde38 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -557,7 +557,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else:
# Dummy zero conditioning if we're not using inpainting model.
# Still takes up a bit of memory, but no encoder call.
- image_conditioning = torch.zeros(x.shape[0], 5, x.shape[-2], x.shape[-1], dtype=x.dtype, device=x.device)
+ # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
+ image_conditioning = torch.zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
return image_conditioning
@@ -759,8 +760,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.image_conditioning = self.image_conditioning.to(shared.device).type(self.sd_model.dtype)
else:
self.image_conditioning = torch.zeros(
- self.init_latent.shape[0], 5, self.init_latent.shape[-2], self.init_latent.shape[-1],
- dtype=self.init_latent.dtype,
+ self.init_latent.shape[0], 5, 1, 1,
+ dtype=self.init_latent.dtype,
device=self.init_latent.device
)
|