From 2a72d76d6f3d34b1ffccec7736b19e7d52033dad Mon Sep 17 00:00:00 2001 From: dhwz Date: Tue, 8 Aug 2023 19:08:37 +0200 Subject: fix typo --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 31745006..dc6e8ff1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -568,7 +568,7 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): errors.print_error_explanation( "A tensor with all NaNs was produced in VAE.\n" "Web UI will now convert VAE into 32-bit float and retry.\n" - "To disable this behavior, disable the 'Automaticlly revert VAE to 32-bit floats' setting.\n" + "To disable this behavior, disable the 'Automatically revert VAE to 32-bit floats' setting.\n" "To always start with 32-bit VAE, use --no-half-vae commandline flag." ) -- cgit v1.2.3 From d81d3fa8cde83ce1421889ed481a69c950c0c6f6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 07:45:06 +0300 Subject: fix styles missing from the prompt in infotext when making a grid of batch of multiplie images --- modules/processing.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index d7266307..aa72b132 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -172,6 +172,8 @@ class StableDiffusionProcessing: self.iteration = 0 self.is_hr_pass = False self.sampler = None + self.main_prompt = None + self.main_negative_prompt = None self.prompts = None self.negative_prompts = None @@ -319,6 +321,9 @@ class StableDiffusionProcessing: self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts] self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts] + self.main_prompt = self.all_prompts[0] + self.main_negative_prompt = self.all_negative_prompts[0] + def cached_params(self, required_prompts, steps, extra_network_data): """Returns parameters that invalidate the cond cache if changed""" @@ -653,8 +658,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) - prompt_text = p.prompt if use_main_prompt else all_prompts[index] - negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else "" + prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] + negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else "" return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip() -- cgit v1.2.3 From 0d5dc9a6e7f6362e423a06bf0e75dd5854025394 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 08:43:31 +0300 Subject: rework RNG to use generators instead of generating noises beforehand --- modules/devices.py | 81 +------------------- modules/processing.py | 89 +++------------------- modules/rng.py | 171 ++++++++++++++++++++++++++++++++++++++++++ modules/sd_samplers_common.py | 24 +++--- modules/shared.py | 2 +- 5 files changed, 196 insertions(+), 171 deletions(-) create mode 100644 modules/rng.py (limited to 'modules/processing.py') diff --git a/modules/devices.py b/modules/devices.py index 00a00b18..ce59dc53 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,7 +3,7 @@ import contextlib from functools import lru_cache import torch -from modules import errors, rng_philox +from modules import errors if sys.platform == "darwin": from modules import mac_specific @@ -96,84 +96,6 @@ def cond_cast_float(input): nv_rng = None -def randn(seed, shape): - """Generate a tensor with random numbers from a normal distribution using seed. - - Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed.""" - - from modules.shared import opts - - manual_seed(seed) - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(shape), device=device) - - if opts.randn_source == "CPU" or device.type == 'mps': - return torch.randn(shape, device=cpu).to(device) - - return torch.randn(shape, device=device) - - -def randn_local(seed, shape): - """Generate a tensor with random numbers from a normal distribution using seed. - - Does not change the global random number generator. You can only generate the seed's first tensor using this function.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - rng = rng_philox.Generator(seed) - return torch.asarray(rng.randn(shape), device=device) - - local_device = cpu if opts.randn_source == "CPU" or device.type == 'mps' else device - local_generator = torch.Generator(local_device).manual_seed(int(seed)) - return torch.randn(shape, device=local_device, generator=local_generator).to(device) - - -def randn_like(x): - """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. - - Use either randn() or manual_seed() to initialize the generator.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype) - - if opts.randn_source == "CPU" or x.device.type == 'mps': - return torch.randn_like(x, device=cpu).to(x.device) - - return torch.randn_like(x) - - -def randn_without_seed(shape): - """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. - - Use either randn() or manual_seed() to initialize the generator.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(shape), device=device) - - if opts.randn_source == "CPU" or device.type == 'mps': - return torch.randn(shape, device=cpu).to(device) - - return torch.randn(shape, device=device) - - -def manual_seed(seed): - """Set up a global random number generator using the specified seed.""" - from modules.shared import opts - - if opts.randn_source == "NV": - global nv_rng - nv_rng = rng_philox.Generator(seed) - return - - torch.manual_seed(seed) - - def autocast(disable=False): from modules import shared @@ -236,3 +158,4 @@ def first_time_calculation(): x = torch.zeros((1, 1, 3, 3)).to(device, dtype) conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype) conv2d(x) + diff --git a/modules/processing.py b/modules/processing.py index aa72b132..2df5e8c7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -14,7 +14,7 @@ from skimage import exposure from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng from modules.sd_hijack import model_hijack from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes from modules.shared import opts, cmd_opts, state @@ -186,6 +186,7 @@ class StableDiffusionProcessing: self.cached_c = StableDiffusionProcessing.cached_c self.uc = None self.c = None + self.rng: rng.ImageRNG = None self.user = None @@ -475,82 +476,9 @@ class Processed: return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio -# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 -def slerp(val, low, high): - low_norm = low/torch.norm(low, dim=1, keepdim=True) - high_norm = high/torch.norm(high, dim=1, keepdim=True) - dot = (low_norm*high_norm).sum(1) - - if dot.mean() > 0.9995: - return low * val + high * (1 - val) - - omega = torch.acos(dot) - so = torch.sin(omega) - res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high - return res - - def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None): - eta_noise_seed_delta = opts.eta_noise_seed_delta or 0 - xs = [] - - # if we have multiple seeds, this means we are working with batch size>1; this then - # enables the generation of additional tensors with noise that the sampler will use during its processing. - # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to - # produce the same images as with two batches [100], [101]. - if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0): - sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))] - else: - sampler_noises = None - - for i, seed in enumerate(seeds): - noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8) - - subnoise = None - if subseeds is not None and subseed_strength != 0: - subseed = 0 if i >= len(subseeds) else subseeds[i] - - subnoise = devices.randn(subseed, noise_shape) - - # randn results depend on device; gpu and cpu get different results for same seed; - # the way I see it, it's better to do this on CPU, so that everyone gets same result; - # but the original script had it like this, so I do not dare change it for now because - # it will break everyone's seeds. - noise = devices.randn(seed, noise_shape) - - if subnoise is not None: - noise = slerp(subseed_strength, noise, subnoise) - - if noise_shape != shape: - x = devices.randn(seed, shape) - dx = (shape[2] - noise_shape[2]) // 2 - dy = (shape[1] - noise_shape[1]) // 2 - w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx - h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy - tx = 0 if dx < 0 else dx - ty = 0 if dy < 0 else dy - dx = max(-dx, 0) - dy = max(-dy, 0) - - x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w] - noise = x - - if sampler_noises is not None: - cnt = p.sampler.number_of_needed_noises(p) - - if eta_noise_seed_delta > 0: - devices.manual_seed(seed + eta_noise_seed_delta) - - for j in range(cnt): - sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape))) - - xs.append(noise) - - if sampler_noises is not None: - p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises] - - x = torch.stack(xs).to(shared.device) - return x + g = rng.ImageRNG(shape, seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w) + return g.next() class DecodedSamples(list): @@ -769,6 +697,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] + p.rng = rng.ImageRNG((opt_C, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w) + if p.scripts is not None: p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds) @@ -1072,7 +1002,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) + x = self.rng.next() samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x @@ -1160,7 +1090,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] - noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) + self.rng = rng.ImageRNG(samples.shape[1:], self.seeds, subseeds=self.subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w) + noise = self.rng.next() # GC now before running the next img2img to prevent running out of memory devices.torch_gc() @@ -1418,7 +1349,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask) def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) + x = self.rng.next() if self.initial_noise_multiplier != 1.0: self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier diff --git a/modules/rng.py b/modules/rng.py new file mode 100644 index 00000000..2d7baea5 --- /dev/null +++ b/modules/rng.py @@ -0,0 +1,171 @@ +import torch + +from modules import devices, rng_philox, shared + + +def randn(seed, shape, generator=None): + """Generate a tensor with random numbers from a normal distribution using seed. + + Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed.""" + + manual_seed(seed) + + if shared.opts.randn_source == "NV": + return torch.asarray((generator or nv_rng).randn(shape), device=devices.device) + + if shared.opts.randn_source == "CPU" or devices.device.type == 'mps': + return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device) + + return torch.randn(shape, device=devices.device, generator=generator) + + +def randn_local(seed, shape): + """Generate a tensor with random numbers from a normal distribution using seed. + + Does not change the global random number generator. You can only generate the seed's first tensor using this function.""" + + if shared.opts.randn_source == "NV": + rng = rng_philox.Generator(seed) + return torch.asarray(rng.randn(shape), device=devices.device) + + local_device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device + local_generator = torch.Generator(local_device).manual_seed(int(seed)) + return torch.randn(shape, device=local_device, generator=local_generator).to(devices.device) + + +def randn_like(x): + """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + + Use either randn() or manual_seed() to initialize the generator.""" + + if shared.opts.randn_source == "NV": + return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype) + + if shared.opts.randn_source == "CPU" or x.device.type == 'mps': + return torch.randn_like(x, device=devices.cpu).to(x.device) + + return torch.randn_like(x) + + +def randn_without_seed(shape, generator=None): + """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + + Use either randn() or manual_seed() to initialize the generator.""" + + if shared.opts.randn_source == "NV": + return torch.asarray((generator or nv_rng).randn(shape), device=devices.device) + + if shared.opts.randn_source == "CPU" or devices.device.type == 'mps': + return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device) + + return torch.randn(shape, device=devices.device, generator=generator) + + +def manual_seed(seed): + """Set up a global random number generator using the specified seed.""" + from modules.shared import opts + + if opts.randn_source == "NV": + global nv_rng + nv_rng = rng_philox.Generator(seed) + return + + torch.manual_seed(seed) + + +def create_generator(seed): + if shared.opts.randn_source == "NV": + return rng_philox.Generator(seed) + + device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device + generator = torch.Generator(device).manual_seed(int(seed)) + return generator + + +# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 +def slerp(val, low, high): + low_norm = low/torch.norm(low, dim=1, keepdim=True) + high_norm = high/torch.norm(high, dim=1, keepdim=True) + dot = (low_norm*high_norm).sum(1) + + if dot.mean() > 0.9995: + return low * val + high * (1 - val) + + omega = torch.acos(dot) + so = torch.sin(omega) + res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high + return res + + +class ImageRNG: + def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0): + self.shape = shape + self.seeds = seeds + self.subseeds = subseeds + self.subseed_strength = subseed_strength + self.seed_resize_from_h = seed_resize_from_h + self.seed_resize_from_w = seed_resize_from_w + + self.generators = [create_generator(seed) for seed in seeds] + + self.is_first = True + + def first(self): + noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], self.seed_resize_from_h // 8, self.seed_resize_from_w // 8) + + xs = [] + + for i, (seed, generator) in enumerate(zip(self.seeds, self.generators)): + subnoise = None + if self.subseeds is not None and self.subseed_strength != 0: + subseed = 0 if i >= len(self.subseeds) else self.subseeds[i] + subnoise = randn(subseed, noise_shape) + + if noise_shape != self.shape: + noise = randn(seed, noise_shape) + else: + noise = randn(seed, self.shape, generator=generator) + + if subnoise is not None: + noise = slerp(self.subseed_strength, noise, subnoise) + + if noise_shape != self.shape: + x = randn(seed, self.shape, generator=generator) + dx = (self.shape[2] - noise_shape[2]) // 2 + dy = (self.shape[1] - noise_shape[1]) // 2 + w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx + h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy + tx = 0 if dx < 0 else dx + ty = 0 if dy < 0 else dy + dx = max(-dx, 0) + dy = max(-dy, 0) + + x[:, ty:ty + h, tx:tx + w] = noise[:, dy:dy + h, dx:dx + w] + noise = x + + xs.append(noise) + + eta_noise_seed_delta = shared.opts.eta_noise_seed_delta or 0 + if eta_noise_seed_delta: + self.generators = [create_generator(seed + eta_noise_seed_delta) for seed in self.seeds] + + return torch.stack(xs).to(shared.device) + + def next(self): + if self.is_first: + self.is_first = False + return self.first() + + xs = [] + for generator in self.generators: + x = randn_without_seed(self.shape, generator=generator) + xs.append(x) + + return torch.stack(xs).to(shared.device) + + +devices.randn = randn +devices.randn_local = randn_local +devices.randn_like = randn_like +devices.randn_without_seed = randn_without_seed +devices.manual_seed = manual_seed diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index adda963b..97bc0804 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -1,5 +1,5 @@ import inspect -from collections import namedtuple, deque +from collections import namedtuple import numpy as np import torch from PIL import Image @@ -132,10 +132,15 @@ replace_torchsde_browinan() class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) + """This is here to replace torch.randn_like of k-diffusion. + + k-diffusion has random_sampler argument for most samplers, but not for all, so + this is needed to properly replace every use of torch.randn_like. + + We need to replace to make images generated in batches to be same as images generated individually.""" + + def __init__(self, p): + self.rng = p.rng def __getattr__(self, item): if item == 'randn_like': @@ -147,12 +152,7 @@ class TorchHijack: raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - return devices.randn_like(x) + return self.rng.next() class Sampler: @@ -215,7 +215,7 @@ class Sampler: self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0) self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + k_diffusion.sampling.torch = TorchHijack(p) extra_params_kwargs = {} for param_name in self.extra_params: diff --git a/modules/shared.py b/modules/shared.py index e34847ce..e9b980a4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -16,7 +16,7 @@ import modules.interrogate import modules.memmon import modules.styles import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args +from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from ldm.models.diffusion.ddpm import LatentDiffusion from typing import Optional -- cgit v1.2.3 From 259805947e4e567abcf6a4be20fe520d456c251f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Wed, 9 Aug 2023 14:24:16 -0400 Subject: Add slerp import for extension backwards compat --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 2df5e8c7..6961b7b1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -15,6 +15,7 @@ from typing import Any, Dict, List import modules.sd_hijack from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng +from modules.rng import slerp # noqa: F401 from modules.sd_hijack import model_hijack from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes from modules.shared import opts, cmd_opts, state -- cgit v1.2.3 From 9199b6b7ebe96cdf09571ba874a103e8ed8c90ef Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 11:20:46 +0300 Subject: add a custom UI element that combines accordion and checkbox rework hires fix UI to use accordion prevent bogus progress output in console when calculating hires fix dimensions --- javascript/inputAccordion.js | 37 +++++++++++++++++++++ modules/processing.py | 77 +++++++++++++++++++++++--------------------- modules/ui.py | 23 ++++--------- modules/ui_components.py | 31 ++++++++++++++++++ style.css | 15 +++++---- 5 files changed, 124 insertions(+), 59 deletions(-) create mode 100644 javascript/inputAccordion.js (limited to 'modules/processing.py') diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js new file mode 100644 index 00000000..a5eef229 --- /dev/null +++ b/javascript/inputAccordion.js @@ -0,0 +1,37 @@ +var observerAccordionOpen = new MutationObserver(function(mutations) { + mutations.forEach(function(mutationRecord) { + var elem = mutationRecord.target; + var open = elem.classList.contains('open'); + + var accordion = elem.parentNode; + accordion.classList.toggle('input-accordion-open', open); + + var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); + checkbox.checked = open; + updateInput(checkbox); + + extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + if(extra){ + extra.style.display = open ? "" : "none"; + } + }); +}); + +function inputAccordionChecked(id, checked){ + var label = gradioApp().querySelector('#' + id + " .label-wrap"); + if(label.classList.contains('open') != checked){ + label.click(); + } +} + +onUiLoaded(function() { + for (var accordion of gradioApp().querySelectorAll('.input-accordion')) { + var labelWrap = accordion.querySelector('.label-wrap'); + observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); + + var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + if(extra){ + labelWrap.insertBefore(extra, labelWrap.lastElementChild) + } + } +}); diff --git a/modules/processing.py b/modules/processing.py index 6961b7b1..7819644c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -924,6 +924,45 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_c = None self.hr_uc = None + def calculate_target_resolution(self): + if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): + self.hr_resize_x = self.width + self.hr_resize_y = self.height + self.hr_upscale_to_x = self.width + self.hr_upscale_to_y = self.height + + self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height) + self.applied_old_hires_behavior_to = (self.width, self.height) + + if self.hr_resize_x == 0 and self.hr_resize_y == 0: + self.extra_generation_params["Hires upscale"] = self.hr_scale + self.hr_upscale_to_x = int(self.width * self.hr_scale) + self.hr_upscale_to_y = int(self.height * self.hr_scale) + else: + self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}" + + if self.hr_resize_y == 0: + self.hr_upscale_to_x = self.hr_resize_x + self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width + elif self.hr_resize_x == 0: + self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height + self.hr_upscale_to_y = self.hr_resize_y + else: + target_w = self.hr_resize_x + target_h = self.hr_resize_y + src_ratio = self.width / self.height + dst_ratio = self.hr_resize_x / self.hr_resize_y + + if src_ratio < dst_ratio: + self.hr_upscale_to_x = self.hr_resize_x + self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width + else: + self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height + self.hr_upscale_to_y = self.hr_resize_y + + self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f + self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f + def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: if self.hr_checkpoint_name: @@ -948,43 +987,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): raise Exception(f"could not find upscaler named {self.hr_upscaler}") - if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): - self.hr_resize_x = self.width - self.hr_resize_y = self.height - self.hr_upscale_to_x = self.width - self.hr_upscale_to_y = self.height - - self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height) - self.applied_old_hires_behavior_to = (self.width, self.height) - - if self.hr_resize_x == 0 and self.hr_resize_y == 0: - self.extra_generation_params["Hires upscale"] = self.hr_scale - self.hr_upscale_to_x = int(self.width * self.hr_scale) - self.hr_upscale_to_y = int(self.height * self.hr_scale) - else: - self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}" - - if self.hr_resize_y == 0: - self.hr_upscale_to_x = self.hr_resize_x - self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width - elif self.hr_resize_x == 0: - self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height - self.hr_upscale_to_y = self.hr_resize_y - else: - target_w = self.hr_resize_x - target_h = self.hr_resize_y - src_ratio = self.width / self.height - dst_ratio = self.hr_resize_x / self.hr_resize_y - - if src_ratio < dst_ratio: - self.hr_upscale_to_x = self.hr_resize_x - self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width - else: - self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height - self.hr_upscale_to_y = self.hr_resize_y - - self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f - self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f + self.calculate_target_resolution() if not state.processing_has_refined_job_count: if state.job_count == -1: diff --git a/modules/ui.py b/modules/ui.py index 4e1daa8d..cbad3afe 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,7 +14,7 @@ from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_grad from modules import gradio_extensons # noqa: F401 from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, devices, ui_extra_networks -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion from modules.paths import script_path from modules.ui_common import create_refresh_button from modules.ui_gradio_extensions import reload_javascript @@ -94,11 +94,9 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz return "" p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + p.calculate_target_resolution() - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" + return f"from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" def resize_from_to_html(width, height, scale_by): @@ -436,11 +434,12 @@ def create_ui(): with FormRow(elem_classes="checkboxes-row", variant="compact"): restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with InputAccordion(False, label="Hires. fix") as enable_hr: + with enable_hr.extra(): + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") @@ -568,13 +567,6 @@ def create_ui(): show_progress=False, ) - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - txt2img_paste_fields = [ (toprow.prompt, "Prompt"), (toprow.negative_prompt, "Negative prompt"), @@ -594,7 +586,6 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), (hr_second_pass_steps, "Hires steps"), diff --git a/modules/ui_components.py b/modules/ui_components.py index 8f8a7088..598ce738 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -72,3 +72,34 @@ class DropdownEditable(FormComponent, gr.Dropdown): def get_block_name(self): return "dropdown" + +class InputAccordion(gr.Checkbox): + global_index = 0 + + def __init__(self, value, **kwargs): + self.accordion_id = kwargs.get('elem_id') + if self.accordion_id is None: + self.accordion_id = f"input-accordion-{self.global_index}" + self.global_index += 1 + + kwargs['elem_id'] = self.accordion_id + "-checkbox" + kwargs['visible'] = False + super().__init__(value, **kwargs) + + self.change(fn=None, _js='function(checked){ inputAccordionChecked("' + self.accordion_id + '", checked); }', inputs=[self]) + + self.accordion = gr.Accordion(kwargs.get('label', 'Accordion'), open=value, elem_id=self.accordion_id, elem_classes=['input-accordion']) + + def extra(self): + return gr.Column(elem_id=self.accordion_id + '-extra', elem_classes='input-accordion-extra', min_width=0) + + def __enter__(self): + self.accordion.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.accordion.__exit__(exc_type, exc_val, exc_tb) + + def get_block_name(self): + return "checkbox" + diff --git a/style.css b/style.css index dfd5ca36..5163e53c 100644 --- a/style.css +++ b/style.css @@ -329,12 +329,6 @@ div.gradio-accordion { border-radius: 0 0.5rem 0.5rem 0; } -#txtimg_hr_finalres{ - min-height: 0 !important; - padding: .625rem .75rem; - margin-left: -0.75em -} - #img2img_scale_resolution_preview.block{ display: flex; align-items: end; @@ -1016,3 +1010,12 @@ div.block.gradio-box.popup-dialog, .popup-dialog { div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-child{ margin-top: 1em; } + +div.block.input-accordion{ + margin-bottom: 0.4em; +} + +.input-accordion-extra{ + flex: 0 0 auto !important; + margin: 0 0.5em 0 auto; +} -- cgit v1.2.3 From 33446acf47a8c3e0c0964782189562df3c4bcf4f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 12:41:41 +0300 Subject: face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back --- modules/generation_parameters_copypaste.py | 2 ++ modules/img2img.py | 4 +--- modules/processing.py | 11 +++++++++-- modules/shared_options.py | 2 ++ modules/txt2img.py | 4 +--- modules/ui.py | 12 ++---------- 6 files changed, 17 insertions(+), 18 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index d932c67d..bdff3266 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -343,6 +343,8 @@ infotext_to_setting_name_mapping = [ ('Pad conds', 'pad_cond_uncond'), ('VAE Encoder', 'sd_vae_encode_method'), ('VAE Decoder', 'sd_vae_decode_method'), + ('Tiling', 'tiling'), + ('Face restoration', 'face_restoration'), ] diff --git a/modules/img2img.py b/modules/img2img.py index e06ac1d6..c7bbbac8 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal process_images(p) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -179,8 +179,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s cfg_scale=cfg_scale, width=width, height=height, - restore_faces=restore_faces, - tiling=tiling, init_images=[image], mask=mask, mask_blur=mask_blur, diff --git a/modules/processing.py b/modules/processing.py index 7819644c..68a8f1c6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -111,7 +111,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -564,7 +564,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "CFG scale": p.cfg_scale, "Image CFG scale": getattr(p, 'image_cfg_scale', None), "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], - "Face restoration": (opts.face_restoration_model if p.restore_faces else None), + "Face restoration": opts.face_restoration_model if p.restore_faces else None, "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), @@ -580,6 +580,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" and opts.randn_source != "NV" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + "Tiling": "True" if p.tiling else None, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, "User": p.user if opts.add_user_name_to_info else None, @@ -645,6 +646,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: seed = get_fixed_seed(p.seed) subseed = get_fixed_seed(p.subseed) + if p.restore_faces is None: + p.restore_faces = opts.face_restoration + + if p.tiling is None: + p.tiling = opts.tiling + modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() diff --git a/modules/shared_options.py b/modules/shared_options.py index 7468bc81..f72859d9 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -92,6 +92,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { })) options_templates.update(options_section(('face-restoration', "Face restoration"), { + "face_restoration": OptionInfo(False, "Restore faces").info("will use a third-party model on generation result to reconstruct faces"), "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), @@ -138,6 +139,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), + "tiling": OptionInfo(False, "Tiling").info("produce a tileable picture"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { diff --git a/modules/txt2img.py b/modules/txt2img.py index edad8930..5ea96bba 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -32,8 +32,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step cfg_scale=cfg_scale, width=width, height=height, - restore_faces=restore_faces, - tiling=tiling, enable_hr=enable_hr, denoising_strength=denoising_strength if enable_hr else None, hr_scale=hr_scale, diff --git a/modules/ui.py b/modules/ui.py index cbad3afe..09a826fd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -432,8 +432,7 @@ def create_ui(): elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + pass elif category == "hires_fix": with InputAccordion(False, label="Hires. fix") as enable_hr: @@ -516,8 +515,6 @@ def create_ui(): toprow.ui_styles.dropdown, steps, sampler_name, - restore_faces, - tiling, batch_count, batch_size, cfg_scale, @@ -572,7 +569,6 @@ def create_ui(): (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_name, "Sampler"), - (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (seed, "Seed"), (width, "Size-1"), @@ -792,8 +788,7 @@ def create_ui(): elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + pass elif category == "batch": if not opts.dimensions_and_batch_together: @@ -866,8 +861,6 @@ def create_ui(): mask_blur, mask_alpha, inpainting_fill, - restore_faces, - tiling, batch_count, batch_size, cfg_scale, @@ -959,7 +952,6 @@ def create_ui(): (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_name, "Sampler"), - (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (image_cfg_scale, "Image CFG scale"), (seed, "Seed"), -- cgit v1.2.3 From 1b3093fe3aedb20aa8d505ceeea7900ac592e6fe Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 15:58:53 +0300 Subject: fix --use-textbox-seed --- modules/processing.py | 12 ++++++++++-- modules/ui.py | 6 +++++- modules/ui_loadsave.py | 5 ++++- 3 files changed, 19 insertions(+), 4 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 68a8f1c6..f06c374a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -521,7 +521,15 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): def get_fixed_seed(seed): - if seed is None or seed == '' or seed == -1: + if seed == '' or seed is None: + seed = -1 + elif isinstance(seed, str): + try: + seed = int(seed) + except Exception: + seed = -1 + + if seed == -1: return int(random.randrange(4294967294)) return seed @@ -728,7 +736,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: # strength, which is saved as "Model Strength: 1.0" in the infotext if n == 0: with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: - processed = Processed(p, [], p.seed, "") + processed = Processed(p, []) file.write(processed.infotext(p, 0)) p.setup_conds() diff --git a/modules/ui.py b/modules/ui.py index b87e95a6..e7433cbd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -144,7 +144,11 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + if cmd_opts.use_textbox_seed: + seed = gr.Textbox(label='Seed', value="", elem_id=f"{target_interface}_seed") + else: + seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index 0052a5cc..99d763e1 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -8,7 +8,7 @@ from modules.ui_components import ToolButton class UiLoadsave: - """allows saving and restorig default values for gradio components""" + """allows saving and restoring default values for gradio components""" def __init__(self, filename): self.filename = filename @@ -48,6 +48,9 @@ class UiLoadsave: elif condition and not condition(saved_value): pass else: + if isinstance(x, gr.Textbox) and field == 'value': # due to an undersirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies + saved_value = str(saved_value) + setattr(obj, field, saved_value) if init_field is not None: init_field(saved_value) -- cgit v1.2.3 From 9d78d317ae492db59ebf8b31fda9a049f6c9bd14 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 16:22:10 +0300 Subject: add VAE to infotext --- modules/generation_parameters_copypaste.py | 1 + modules/processing.py | 2 ++ modules/sd_vae.py | 16 +++++++++++++++- 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index bdff3266..6ace29cf 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -341,6 +341,7 @@ infotext_to_setting_name_mapping = [ ('RNG', 'randn_source'), ('NGMS', 's_min_uncond'), ('Pad conds', 'pad_cond_uncond'), + ('VAE', 'sd_vae'), ('VAE Encoder', 'sd_vae_encode_method'), ('VAE Decoder', 'sd_vae_decode_method'), ('Tiling', 'tiling'), diff --git a/modules/processing.py b/modules/processing.py index f06c374a..44d47e8c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -576,6 +576,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), + "VAE hash": sd_vae.get_loaded_vae_hash() if opts.add_model_hash_to_info else None, + "VAE": sd_vae.get_loaded_vae_name() if opts.add_model_name_to_info else None, "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 5ac1ac31..1db01992 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -2,7 +2,7 @@ import os import collections from dataclasses import dataclass -from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack +from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack, hashes import glob from copy import deepcopy @@ -20,6 +20,20 @@ checkpoint_info = None checkpoints_loaded = collections.OrderedDict() +def get_loaded_vae_name(): + if loaded_vae_file is None: + return None + + return os.path.basename(loaded_vae_file) + + +def get_loaded_vae_hash(): + if loaded_vae_file is None: + return None + + return hashes.sha256(loaded_vae_file, 'vae')[0:10] + + def get_base_vae(model): if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model: return base_vae -- cgit v1.2.3