diff options
author | 不会画画的中医不是好程序员 <yfszzx@gmail.com> | 2022-10-16 02:04:05 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-10-16 02:04:05 +0000 |
commit | d41ac174e24e1e7cdcf7b42f2a03cbc6394eb5e5 (patch) | |
tree | da39175c109598f17e89fafe57aac9b9597ff616 /modules/processing.py | |
parent | 6e4f5566b58e36aede83427df6c69eba8517af28 (diff) | |
parent | be1596ce30b1ead6998da0c62003003dcce5eb2c (diff) | |
download | stable-diffusion-webui-gfx803-d41ac174e24e1e7cdcf7b42f2a03cbc6394eb5e5.tar.gz stable-diffusion-webui-gfx803-d41ac174e24e1e7cdcf7b42f2a03cbc6394eb5e5.tar.bz2 stable-diffusion-webui-gfx803-d41ac174e24e1e7cdcf7b42f2a03cbc6394eb5e5.zip |
Merge branch 'AUTOMATIC1111:master' into master
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/modules/processing.py b/modules/processing.py index a75b9f84..941ae089 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -140,7 +140,7 @@ class Processed: self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
- self.seed = int(self.seed if type(self.seed) != list else self.seed[0])
+ self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
self.all_prompts = all_prompts or [self.prompt]
@@ -528,7 +528,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): firstphase_height_truncated = int(scale * self.height)
else:
- self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}"
width_ratio = self.width / self.firstphase_width
height_ratio = self.height / self.firstphase_height
@@ -540,6 +539,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): firstphase_width_truncated = self.firstphase_height * self.width / self.height
firstphase_height_truncated = self.firstphase_height
+ self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}"
self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
@@ -557,11 +557,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
- decoded_samples = decode_first_stage(self.sd_model, samples)
+ if opts.use_scale_latent_for_hires_fix:
+ samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
- if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
- decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
else:
+ decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
@@ -578,7 +578,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = decoded_samples.to(shared.device)
decoded_samples = 2. * decoded_samples - 1.
- samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
shared.state.nextjob()
|