diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-05-17 06:26:50 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-05-17 06:26:50 +0000 |
commit | 9ac85b8b73e180154453609f10b044a475289e24 (patch) | |
tree | 3af76d8c6ba3173ffd925336d902da058df4e02d /modules/sd_samplers_common.py | |
parent | 4b07f2f584596604c4499efb0b0295e96985080f (diff) | |
parent | 85232a5b26666854deae59cf950f744740dd5c37 (diff) | |
download | stable-diffusion-webui-gfx803-9ac85b8b73e180154453609f10b044a475289e24.tar.gz stable-diffusion-webui-gfx803-9ac85b8b73e180154453609f10b044a475289e24.tar.bz2 stable-diffusion-webui-gfx803-9ac85b8b73e180154453609f10b044a475289e24.zip |
Merge pull request #10365 from Sakura-Luna/taesd-a
Add Tiny AE live preview
Diffstat (limited to 'modules/sd_samplers_common.py')
-rw-r--r-- | modules/sd_samplers_common.py | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 92880caf..ceda6a35 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np
import torch
from PIL import Image
-from modules import devices, processing, images, sd_vae_approx, sd_samplers
+from modules import devices, processing, images, sd_vae_approx, sd_samplers, sd_vae_taesd
from modules.shared import opts, state
import modules.shared as shared
@@ -22,10 +22,11 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc
-approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2}
+approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
def single_sample_to_image(sample, approximation=None):
+
if approximation is None:
approximation = approximation_indexes.get(opts.show_progress_type, 0)
@@ -33,12 +34,17 @@ def single_sample_to_image(sample, approximation=None): x_sample = sd_vae_approx.cheap_approximation(sample)
elif approximation == 1:
x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
+ elif approximation == 3:
+ x_sample = sd_vae_taesd.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
+ x_sample = sd_vae_taesd.TAESD.unscale_latents(x_sample) # returns value in [-2, 2]
+ x_sample = x_sample * 0.5
else:
x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
+
return Image.fromarray(x_sample)
|