From 11dd79e346bd780bc5c3119df962e7a9c20f2493 Mon Sep 17 00:00:00 2001 From: AbstractQbit <38468635+AbstractQbit@users.noreply.github.com> Date: Sat, 24 Dec 2022 14:00:17 +0300 Subject: Add an option for faster low quality previews --- modules/sd_samplers.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d26e48dc..fbb56af4 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -106,20 +106,29 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc -def single_sample_to_image(sample): - x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] +def single_sample_to_image(sample, approximation=False): + if approximation: + # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 + coefs = torch.tensor( + [[ 0.298, 0.207, 0.208], + [ 0.187, 0.286, 0.173], + [-0.158, 0.189, 0.264], + [-0.184, -0.271, -0.473]]).to(sample.device) + x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs) + else: + x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) return Image.fromarray(x_sample) -def sample_to_image(samples, index=0): - return single_sample_to_image(samples[index]) +def sample_to_image(samples, index=0, approximation=False): + return single_sample_to_image(samples[index], approximation) -def samples_to_image_grid(samples): - return images.image_grid([single_sample_to_image(sample) for sample in samples]) +def samples_to_image_grid(samples, approximation=False): + return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples]) def store_latent(decoded): @@ -127,7 +136,7 @@ def store_latent(decoded): if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0: if not shared.parallel_processing_allowed: - shared.state.current_image = sample_to_image(decoded) + shared.state.current_image = sample_to_image(decoded, approximation=opts.show_progress_approximate) class InterruptedException(BaseException): -- cgit v1.2.3