aboutsummaryrefslogtreecommitdiffstats
path: root/modules/processing.py
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2022-10-15 10:23:12 +0000
committerAUTOMATIC <16777216c@gmail.com>2022-10-15 10:23:12 +0000
commitf7ca63937ac83d32483285c3af09afaa356d6276 (patch)
tree56b58d962a4f797ddf2c6185c9b9120f1d7d3901 /modules/processing.py
parent5967d07d1aa4e2fef031a57b1612b1ab04a3cd78 (diff)
downloadstable-diffusion-webui-gfx803-f7ca63937ac83d32483285c3af09afaa356d6276.tar.gz
stable-diffusion-webui-gfx803-f7ca63937ac83d32483285c3af09afaa356d6276.tar.bz2
stable-diffusion-webui-gfx803-f7ca63937ac83d32483285c3af09afaa356d6276.zip
bring back scale latent option in settings
Diffstat (limited to 'modules/processing.py')
-rw-r--r--modules/processing.py8
1 files changed, 4 insertions, 4 deletions
diff --git a/modules/processing.py b/modules/processing.py
index 7e2a416d..b9a1660e 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -557,11 +557,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
- decoded_samples = decode_first_stage(self.sd_model, samples)
+ if opts.use_scale_latent_for_hires_fix:
+ samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
- if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
- decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
else:
+ decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
@@ -578,7 +578,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
decoded_samples = decoded_samples.to(shared.device)
decoded_samples = 2. * decoded_samples - 1.
- samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
shared.state.nextjob()