diff options
author | AUTOMATIC <16777216c@gmail.com> | 2023-05-17 06:24:01 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2023-05-17 06:24:01 +0000 |
commit | 56a2672831751480f94a018f861f0143a8234ae8 (patch) | |
tree | b7b4a37178c6a4945a748b9c94b81c259e4315b8 /modules/sd_samplers_common.py | |
parent | b217ebc49000b41baab3094dbc8caaf33eaf5579 (diff) | |
download | stable-diffusion-webui-gfx803-56a2672831751480f94a018f861f0143a8234ae8.tar.gz stable-diffusion-webui-gfx803-56a2672831751480f94a018f861f0143a8234ae8.tar.bz2 stable-diffusion-webui-gfx803-56a2672831751480f94a018f861f0143a8234ae8.zip |
return live preview defaults to how they were
only download TAESD model when it's needed
return calculations in single_sample_to_image to just if/elif/elif blocks
keep taesd model in its own directory
Diffstat (limited to 'modules/sd_samplers_common.py')
-rw-r--r-- | modules/sd_samplers_common.py | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index b1e8a780..20a9af20 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -22,28 +22,29 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc
-approximation_indexes = {"Full": 0, "Tiny AE": 1, "Approx NN": 2, "Approx cheap": 3}
+approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
def single_sample_to_image(sample, approximation=None):
- if approximation is None or approximation not in approximation_indexes.keys():
- approximation = approximation_indexes.get(opts.show_progress_type, 1)
- if approximation == 1:
- x_sample = sd_vae_taesd.decode()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
- x_sample = sd_vae_taesd.TAESD.unscale_latents(x_sample)
- x_sample = torch.clamp((x_sample * 0.25) + 0.5, 0, 1)
+ if approximation is None:
+ approximation = approximation_indexes.get(opts.show_progress_type, 0)
+
+ if approximation == 2:
+ x_sample = sd_vae_approx.cheap_approximation(sample)
+ elif approximation == 1:
+ x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
+ elif approximation == 3:
+ x_sample = sd_vae_taesd.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
+ x_sample = sd_vae_taesd.TAESD.unscale_latents(x_sample) # returns value in [-2, 2]
+ x_sample = x_sample * 0.5
else:
- if approximation == 3:
- x_sample = sd_vae_approx.cheap_approximation(sample)
- elif approximation == 2:
- x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
- else:
- x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
- x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
+ x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
+ x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
+
return Image.fromarray(x_sample)
|