aboutsummaryrefslogtreecommitdiffstats
path: root/modules/processing.py
diff options
context:
space:
mode:
authorDepFA <35278260+dfaker@users.noreply.github.com>2022-10-10 14:13:48 +0000
committerGitHub <noreply@github.com>2022-10-10 14:13:48 +0000
commitce2d7f7eaccbd1843835ca2d048d78ba5cb1ea13 (patch)
tree948c77a1ed9ed85278bc97ca02857b0c9efbd4b0 /modules/processing.py
parent4117afff11c7b0a2162c73ea02be8cfa30d02640 (diff)
parentce37fdd30e9fc0fe0bc5805a068ce8b11b42b5a3 (diff)
downloadstable-diffusion-webui-gfx803-ce2d7f7eaccbd1843835ca2d048d78ba5cb1ea13.tar.gz
stable-diffusion-webui-gfx803-ce2d7f7eaccbd1843835ca2d048d78ba5cb1ea13.tar.bz2
stable-diffusion-webui-gfx803-ce2d7f7eaccbd1843835ca2d048d78ba5cb1ea13.zip
Merge branch 'master' into embed-embeddings-in-images
Diffstat (limited to 'modules/processing.py')
-rw-r--r--modules/processing.py14
1 files changed, 10 insertions, 4 deletions
diff --git a/modules/processing.py b/modules/processing.py
index 94d2dd62..50ba4fc5 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -259,6 +259,13 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
return x
+def decode_first_stage(model, x):
+ with devices.autocast(disable=x.dtype == devices.dtype_vae):
+ x = model.decode_first_stage(x)
+
+ return x
+
+
def get_fixed_seed(seed):
if seed is None or seed == '' or seed == -1:
return int(random.randrange(4294967294))
@@ -398,9 +405,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
# use the image collected previously in sampler loop
samples_ddim = shared.state.current_latent
- samples_ddim = samples_ddim.to(devices.dtype)
-
- x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim)
+ samples_ddim = samples_ddim.to(devices.dtype_vae)
+ x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
del samples_ddim
@@ -533,7 +539,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if self.scale_latent:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
else:
- decoded_samples = self.sd_model.decode_first_stage(samples)
+ decoded_samples = decode_first_stage(self.sd_model, samples)
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")