diff options
author | AUTOMATIC <16777216c@gmail.com> | 2022-10-15 10:23:12 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2022-10-15 10:23:12 +0000 |
commit | f7ca63937ac83d32483285c3af09afaa356d6276 (patch) | |
tree | 56b58d962a4f797ddf2c6185c9b9120f1d7d3901 /modules/processing.py | |
parent | 5967d07d1aa4e2fef031a57b1612b1ab04a3cd78 (diff) | |
download | stable-diffusion-webui-gfx803-f7ca63937ac83d32483285c3af09afaa356d6276.tar.gz stable-diffusion-webui-gfx803-f7ca63937ac83d32483285c3af09afaa356d6276.tar.bz2 stable-diffusion-webui-gfx803-f7ca63937ac83d32483285c3af09afaa356d6276.zip |
bring back scale latent option in settings
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/modules/processing.py b/modules/processing.py index 7e2a416d..b9a1660e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -557,11 +557,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
- decoded_samples = decode_first_stage(self.sd_model, samples)
+ if opts.use_scale_latent_for_hires_fix:
+ samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
- if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
- decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
else:
+ decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
@@ -578,7 +578,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = decoded_samples.to(shared.device)
decoded_samples = 2. * decoded_samples - 1.
- samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
shared.state.nextjob()
|