From 8b0703b8fcdab153958b11f0dd5e5b6b58565fed Mon Sep 17 00:00:00 2001 From: "Alex \"mcmonkey\" Goodwin" Date: Fri, 16 Dec 2022 08:18:29 -0800 Subject: Add a workaround patch for DPM2 a issue DPM2 a and DPM2 a Karras samplers are both affected by an issue described by https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/3483 and can be resolved by a workaround suggested by the k-diffusion author at https://github.com/crowsonkb/k-diffusion/issues/43#issuecomment-1305195666 --- modules/sd_samplers.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 4c123d3b..b8e0ce53 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -494,6 +494,9 @@ class KDiffusionSampler: x = x * sigmas[0] + if self.funcname == "sample_dpm_2_ancestral": # workaround dpm2 a issue + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + extra_params_kwargs = self.initialize(p) if 'sigma_min' in inspect.signature(self.func).parameters: extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() -- cgit v1.2.3 From 180fdf7809ea18de2d3b04618846d5a4e33c002e Mon Sep 17 00:00:00 2001 From: "Alex \"mcmonkey\" Goodwin" Date: Fri, 16 Dec 2022 08:42:00 -0800 Subject: apply to DPM2 (non-ancestral) as well --- modules/sd_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index b8e0ce53..ae3d8bfa 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -494,7 +494,7 @@ class KDiffusionSampler: x = x * sigmas[0] - if self.funcname == "sample_dpm_2_ancestral": # workaround dpm2 a issue + if self.funcname in ['sample_dpm_2_ancestral', 'sample_dpm_2']: sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) extra_params_kwargs = self.initialize(p) -- cgit v1.2.3 From 7ba9bc2fdbfae8115294962510492faafeb48573 Mon Sep 17 00:00:00 2001 From: "Alex \"mcmonkey\" Goodwin" Date: Sun, 18 Dec 2022 19:16:42 -0800 Subject: fix dpm2 in img2img as well --- modules/sd_samplers.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index ae3d8bfa..1a1b8919 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -454,6 +454,9 @@ class KDiffusionSampler: else: sigmas = self.model_wrap.get_sigmas(steps) + if self.funcname in ['sample_dpm_2_ancestral', 'sample_dpm_2']: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + sigma_sched = sigmas[steps - t_enc - 1:] xi = x + noise * sigma_sched[0] -- cgit v1.2.3 From 399b229783a7b5fddab0a258740b4d59d668ee12 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 24 Dec 2022 09:03:45 +0300 Subject: eliminate duplicated code add an option to samplers for skipping next to last sigma --- modules/sd_samplers.py | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 1a1b8919..d26e48dc 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -23,16 +23,16 @@ samplers_k_diffusion = [ ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), ('Heun', 'sample_heun', ['k_heun'], {}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}), ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), @@ -444,9 +444,7 @@ class KDiffusionSampler: return extra_params_kwargs - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = setup_img2img_steps(p, steps) - + def get_sigmas(self, p, steps): if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': @@ -454,9 +452,16 @@ class KDiffusionSampler: else: sigmas = self.model_wrap.get_sigmas(steps) - if self.funcname in ['sample_dpm_2_ancestral', 'sample_dpm_2']: + if self.config is not None and self.config.options.get('discard_next_to_last_sigma', False): sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + return sigmas + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = setup_img2img_steps(p, steps) + + sigmas = self.get_sigmas(p, steps) + sigma_sched = sigmas[steps - t_enc - 1:] xi = x + noise * sigma_sched[0] @@ -488,18 +493,10 @@ class KDiffusionSampler: def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None): steps = steps or p.steps - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) + sigmas = self.get_sigmas(p, steps) x = x * sigmas[0] - if self.funcname in ['sample_dpm_2_ancestral', 'sample_dpm_2']: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - extra_params_kwargs = self.initialize(p) if 'sigma_min' in inspect.signature(self.func).parameters: extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() -- cgit v1.2.3 From 11dd79e346bd780bc5c3119df962e7a9c20f2493 Mon Sep 17 00:00:00 2001 From: AbstractQbit <38468635+AbstractQbit@users.noreply.github.com> Date: Sat, 24 Dec 2022 14:00:17 +0300 Subject: Add an option for faster low quality previews --- modules/sd_samplers.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d26e48dc..fbb56af4 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -106,20 +106,29 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc -def single_sample_to_image(sample): - x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] +def single_sample_to_image(sample, approximation=False): + if approximation: + # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 + coefs = torch.tensor( + [[ 0.298, 0.207, 0.208], + [ 0.187, 0.286, 0.173], + [-0.158, 0.189, 0.264], + [-0.184, -0.271, -0.473]]).to(sample.device) + x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs) + else: + x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) return Image.fromarray(x_sample) -def sample_to_image(samples, index=0): - return single_sample_to_image(samples[index]) +def sample_to_image(samples, index=0, approximation=False): + return single_sample_to_image(samples[index], approximation) -def samples_to_image_grid(samples): - return images.image_grid([single_sample_to_image(sample) for sample in samples]) +def samples_to_image_grid(samples, approximation=False): + return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples]) def store_latent(decoded): @@ -127,7 +136,7 @@ def store_latent(decoded): if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0: if not shared.parallel_processing_allowed: - shared.state.current_image = sample_to_image(decoded) + shared.state.current_image = sample_to_image(decoded, approximation=opts.show_progress_approximate) class InterruptedException(BaseException): -- cgit v1.2.3 From 0b8acce6a9a1418fa88a506450cd1b92e2d48986 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 24 Dec 2022 18:38:16 +0300 Subject: separate part of denoiser code into a function to make it easier for extensions to override it --- modules/sd_samplers.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d26e48dc..8efe74df 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -288,6 +288,16 @@ class CFGDenoiser(torch.nn.Module): self.init_latent = None self.step = 0 + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + def forward(self, x, sigma, uncond, cond, cond_scale, image_cond): if state.interrupted or state.skipped: raise InterruptedException @@ -329,12 +339,7 @@ class CFGDenoiser(torch.nn.Module): x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]}) - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) if self.mask is not None: denoised = self.init_latent * self.mask + self.nmask * denoised -- cgit v1.2.3 From 56e557c6ff8a6480887c9c585bf908045ee8e791 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 24 Dec 2022 22:39:00 +0300 Subject: added cheap NN approximation for VAE --- modules/sd_samplers.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 27ef4ff8..177b5338 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -9,7 +9,7 @@ import k_diffusion.sampling import torchsde._brownian.brownian_interval import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from modules import prompt_parser, devices, processing, images +from modules import prompt_parser, devices, processing, images, sd_vae_approx from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -106,28 +106,31 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc -def single_sample_to_image(sample, approximation=False): - if approximation: - # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 - coefs = torch.tensor( - [[ 0.298, 0.207, 0.208], - [ 0.187, 0.286, 0.173], - [-0.158, 0.189, 0.264], - [-0.184, -0.271, -0.473]]).to(sample.device) - x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs) +approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2} + + +def single_sample_to_image(sample, approximation=None): + if approximation is None: + approximation = approximation_indexes.get(opts.show_progress_type, 0) + + if approximation == 2: + x_sample = sd_vae_approx.cheap_approximation(sample) + elif approximation == 1: + x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() else: x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] + x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) return Image.fromarray(x_sample) -def sample_to_image(samples, index=0, approximation=False): +def sample_to_image(samples, index=0, approximation=None): return single_sample_to_image(samples[index], approximation) -def samples_to_image_grid(samples, approximation=False): +def samples_to_image_grid(samples, approximation=None): return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples]) @@ -136,7 +139,7 @@ def store_latent(decoded): if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0: if not shared.parallel_processing_allowed: - shared.state.current_image = sample_to_image(decoded, approximation=opts.show_progress_approximate) + shared.state.current_image = sample_to_image(decoded) class InterruptedException(BaseException): -- cgit v1.2.3