From fa9370b7411166c19e8e386400dc4e6082f47b2d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 06:07:30 +0300 Subject: add refiner to StableDiffusionProcessing class write out correct model name in infotext, rather than the refiner model --- modules/processing.py | 38 ++++++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6ad105d7..b47ddaa8 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -111,7 +111,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, refiner_checkpoint: str = None, refiner_switch_at: float = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -153,10 +153,14 @@ class StableDiffusionProcessing: self.s_noise = s_noise if s_noise is not None else opts.s_noise self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} self.override_settings_restore_afterwards = override_settings_restore_afterwards + self.refiner_checkpoint = refiner_checkpoint + self.refiner_switch_at = refiner_switch_at + self.is_using_inpainting_conditioning = False self.disable_extra_networks = False self.token_merging_ratio = 0 self.token_merging_ratio_hr = 0 + self.refiner_checkpoint_info = None if not seed_enable_extras: self.subseed = -1 @@ -191,6 +195,11 @@ class StableDiffusionProcessing: self.user = None + self.sd_model_name = None + self.sd_model_hash = None + self.sd_vae_name = None + self.sd_vae_hash = None + @property def sd_model(self): return shared.sd_model @@ -408,7 +417,10 @@ class Processed: self.batch_size = p.batch_size self.restore_faces = p.restore_faces self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None - self.sd_model_hash = shared.sd_model.sd_model_hash + self.sd_model_name = p.sd_model_name + self.sd_model_hash = p.sd_model_hash + self.sd_vae_name = p.sd_vae_name + self.sd_vae_hash = p.sd_vae_hash self.seed_resize_from_w = p.seed_resize_from_w self.seed_resize_from_h = p.seed_resize_from_h self.denoising_strength = getattr(p, 'denoising_strength', None) @@ -459,7 +471,10 @@ class Processed: "batch_size": self.batch_size, "restore_faces": self.restore_faces, "face_restoration_model": self.face_restoration_model, + "sd_model_name": self.sd_model_name, "sd_model_hash": self.sd_model_hash, + "sd_vae_name": self.sd_vae_name, + "sd_vae_hash": self.sd_vae_hash, "seed_resize_from_w": self.seed_resize_from_w, "seed_resize_from_h": self.seed_resize_from_h, "denoising_strength": self.denoising_strength, @@ -578,10 +593,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], "Face restoration": opts.face_restoration_model if p.restore_faces else None, "Size": f"{p.width}x{p.height}", - "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), - "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), - "VAE hash": p.loaded_vae_hash if opts.add_model_hash_to_info else None, - "VAE": p.loaded_vae_name if opts.add_model_name_to_info else None, + "Model hash": p.sd_model_hash if opts.add_model_hash_to_info else None, + "Model": p.sd_model_name if opts.add_model_name_to_info else None, + "VAE hash": p.sd_vae_hash if opts.add_model_hash_to_info else None, + "VAE": p.sd_vae_name if opts.add_model_name_to_info else None, "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), @@ -670,8 +685,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.tiling is None: p.tiling = opts.tiling - p.loaded_vae_name = sd_vae.get_loaded_vae_name() - p.loaded_vae_hash = sd_vae.get_loaded_vae_hash() + if p.refiner_checkpoint not in (None, "", "None"): + p.refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(p.refiner_checkpoint) + if p.refiner_checkpoint_info is None: + raise Exception(f'Could not find checkpoint with name {p.refiner_checkpoint}') + + p.sd_model_name = shared.sd_model.sd_checkpoint_info.name_for_extra + p.sd_model_hash = shared.sd_model.sd_model_hash + p.sd_vae_name = sd_vae.get_loaded_vae_name() + p.sd_vae_hash = sd_vae.get_loaded_vae_hash() modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() -- cgit v1.2.3 From 599f61a1e0bddf463dd3c6adb84509b3d9db1941 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 08:24:16 +0300 Subject: use dataclass for StableDiffusionProcessing --- modules/processing.py | 318 +++++++++++++++++++++++------------------- modules/sd_samplers_common.py | 5 +- 2 files changed, 176 insertions(+), 147 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index b47ddaa8..007a4e05 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,9 +1,11 @@ +from __future__ import annotations import json import logging import math import os import sys import hashlib +from dataclasses import dataclass, field import torch import numpy as np @@ -11,7 +13,7 @@ from PIL import Image, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List +from typing import Any import modules.sd_hijack from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng @@ -104,106 +106,126 @@ def txt2img_image_conditioning(sd_model, x, width, height): return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) +@dataclass(repr=False) class StableDiffusionProcessing: - """ - The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing - """ + sd_model: object = None + outpath_samples: str = None + outpath_grids: str = None + prompt: str = "" + prompt_for_display: str = None + negative_prompt: str = "" + styles: list[str] = field(default_factory=list) + seed: int = -1 + subseed: int = -1 + subseed_strength: float = 0 + seed_resize_from_h: int = -1 + seed_resize_from_w: int = -1 + seed_enable_extras: bool = True + sampler_name: str = None + batch_size: int = 1 + n_iter: int = 1 + steps: int = 50 + cfg_scale: float = 7.0 + width: int = 512 + height: int = 512 + restore_faces: bool = None + tiling: bool = None + do_not_save_samples: bool = False + do_not_save_grid: bool = False + extra_generation_params: dict[str, Any] = None + overlay_images: list = None + eta: float = None + do_not_reload_embeddings: bool = False + denoising_strength: float = 0 + ddim_discretize: str = None + s_min_uncond: float = None + s_churn: float = None + s_tmax: float = None + s_tmin: float = None + s_noise: float = None + override_settings: dict[str, Any] = None + override_settings_restore_afterwards: bool = True + sampler_index: int = None + refiner_checkpoint: str = None + refiner_switch_at: float = None + token_merging_ratio = 0 + token_merging_ratio_hr = 0 + disable_extra_networks: bool = False + + script_args: list = None + cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, refiner_checkpoint: str = None, refiner_switch_at: float = None, script_args: list = None): - if sampler_index is not None: + sampler: sd_samplers_common.Sampler | None = field(default=None, init=False) + is_using_inpainting_conditioning: bool = field(default=False, init=False) + paste_to: tuple | None = field(default=None, init=False) + + is_hr_pass: bool = field(default=False, init=False) + + c: tuple = field(default=None, init=False) + uc: tuple = field(default=None, init=False) + + rng: rng.ImageRNG | None = field(default=None, init=False) + step_multiplier: int = field(default=1, init=False) + color_corrections: list = field(default=None, init=False) + + scripts: list = field(default=None, init=False) + all_prompts: list = field(default=None, init=False) + all_negative_prompts: list = field(default=None, init=False) + all_seeds: list = field(default=None, init=False) + all_subseeds: list = field(default=None, init=False) + iteration: int = field(default=0, init=False) + main_prompt: str = field(default=None, init=False) + main_negative_prompt: str = field(default=None, init=False) + + prompts: list = field(default=None, init=False) + negative_prompts: list = field(default=None, init=False) + seeds: list = field(default=None, init=False) + subseeds: list = field(default=None, init=False) + extra_network_data: dict = field(default=None, init=False) + + user: str = field(default=None, init=False) + + sd_model_name: str = field(default=None, init=False) + sd_model_hash: str = field(default=None, init=False) + sd_vae_name: str = field(default=None, init=False) + sd_vae_hash: str = field(default=None, init=False) + + def __post_init__(self): + if self.sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) - self.outpath_samples: str = outpath_samples - self.outpath_grids: str = outpath_grids - self.prompt: str = prompt - self.prompt_for_display: str = None - self.negative_prompt: str = (negative_prompt or "") - self.styles: list = styles or [] - self.seed: int = seed - self.subseed: int = subseed - self.subseed_strength: float = subseed_strength - self.seed_resize_from_h: int = seed_resize_from_h - self.seed_resize_from_w: int = seed_resize_from_w - self.sampler_name: str = sampler_name - self.batch_size: int = batch_size - self.n_iter: int = n_iter - self.steps: int = steps - self.cfg_scale: float = cfg_scale - self.width: int = width - self.height: int = height - self.restore_faces: bool = restore_faces - self.tiling: bool = tiling - self.do_not_save_samples: bool = do_not_save_samples - self.do_not_save_grid: bool = do_not_save_grid - self.extra_generation_params: dict = extra_generation_params or {} - self.overlay_images = overlay_images - self.eta = eta - self.do_not_reload_embeddings = do_not_reload_embeddings - self.paste_to = None - self.color_corrections = None - self.denoising_strength: float = denoising_strength self.sampler_noise_scheduler_override = None - self.ddim_discretize = ddim_discretize or opts.ddim_discretize - self.s_min_uncond = s_min_uncond or opts.s_min_uncond - self.s_churn = s_churn or opts.s_churn - self.s_tmin = s_tmin or opts.s_tmin - self.s_tmax = (s_tmax if s_tmax is not None else opts.s_tmax) or float('inf') - self.s_noise = s_noise if s_noise is not None else opts.s_noise - self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} - self.override_settings_restore_afterwards = override_settings_restore_afterwards - self.refiner_checkpoint = refiner_checkpoint - self.refiner_switch_at = refiner_switch_at - - self.is_using_inpainting_conditioning = False - self.disable_extra_networks = False - self.token_merging_ratio = 0 - self.token_merging_ratio_hr = 0 + self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond + self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn + self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin + self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf') + self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise + + self.extra_generation_params = self.extra_generation_params or {} + self.override_settings = self.override_settings or {} + self.script_args = self.script_args or {} + self.refiner_checkpoint_info = None - if not seed_enable_extras: + if not self.seed_enable_extras: self.subseed = -1 self.subseed_strength = 0 self.seed_resize_from_h = 0 self.seed_resize_from_w = 0 - self.scripts = None - self.script_args = script_args - self.all_prompts = None - self.all_negative_prompts = None - self.all_seeds = None - self.all_subseeds = None - self.iteration = 0 - self.is_hr_pass = False - self.sampler = None - self.main_prompt = None - self.main_negative_prompt = None - - self.prompts = None - self.negative_prompts = None - self.extra_network_data = None - self.seeds = None - self.subseeds = None - - self.step_multiplier = 1 self.cached_uc = StableDiffusionProcessing.cached_uc self.cached_c = StableDiffusionProcessing.cached_c - self.uc = None - self.c = None - self.rng: rng.ImageRNG = None - - self.user = None - - self.sd_model_name = None - self.sd_model_hash = None - self.sd_vae_name = None - self.sd_vae_hash = None @property def sd_model(self): return shared.sd_model + @sd_model.setter + def sd_model(self, value): + pass + def txt2img_image_conditioning(self, x, width=None, height=None): self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'} @@ -932,49 +954,51 @@ def old_hires_fix_first_pass_dimensions(width, height): return width, height +@dataclass(repr=False) class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): - sampler = None + enable_hr: bool = False + denoising_strength: float = 0.75 + firstphase_width: int = 0 + firstphase_height: int = 0 + hr_scale: float = 2.0 + hr_upscaler: str = None + hr_second_pass_steps: int = 0 + hr_resize_x: int = 0 + hr_resize_y: int = 0 + hr_checkpoint_name: str = None + hr_sampler_name: str = None + hr_prompt: str = '' + hr_negative_prompt: str = '' + cached_hr_uc = [None, None] cached_hr_c = [None, None] - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_checkpoint_name: str = None, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): - super().__init__(**kwargs) - self.enable_hr = enable_hr - self.denoising_strength = denoising_strength - self.hr_scale = hr_scale - self.hr_upscaler = hr_upscaler - self.hr_second_pass_steps = hr_second_pass_steps - self.hr_resize_x = hr_resize_x - self.hr_resize_y = hr_resize_y - self.hr_upscale_to_x = hr_resize_x - self.hr_upscale_to_y = hr_resize_y - self.hr_checkpoint_name = hr_checkpoint_name - self.hr_checkpoint_info = None - self.hr_sampler_name = hr_sampler_name - self.hr_prompt = hr_prompt - self.hr_negative_prompt = hr_negative_prompt - self.all_hr_prompts = None - self.all_hr_negative_prompts = None - self.latent_scale_mode = None - - if firstphase_width != 0 or firstphase_height != 0: + hr_checkpoint_info: dict = field(default=None, init=False) + hr_upscale_to_x: int = field(default=0, init=False) + hr_upscale_to_y: int = field(default=0, init=False) + truncate_x: int = field(default=0, init=False) + truncate_y: int = field(default=0, init=False) + applied_old_hires_behavior_to: tuple = field(default=None, init=False) + latent_scale_mode: dict = field(default=None, init=False) + hr_c: tuple | None = field(default=None, init=False) + hr_uc: tuple | None = field(default=None, init=False) + all_hr_prompts: list = field(default=None, init=False) + all_hr_negative_prompts: list = field(default=None, init=False) + hr_prompts: list = field(default=None, init=False) + hr_negative_prompts: list = field(default=None, init=False) + hr_extra_network_data: list = field(default=None, init=False) + + def __post_init__(self): + super().__post_init__() + + if self.firstphase_width != 0 or self.firstphase_height != 0: self.hr_upscale_to_x = self.width self.hr_upscale_to_y = self.height - self.width = firstphase_width - self.height = firstphase_height - - self.truncate_x = 0 - self.truncate_y = 0 - self.applied_old_hires_behavior_to = None - - self.hr_prompts = None - self.hr_negative_prompts = None - self.hr_extra_network_data = None + self.width = self.firstphase_width + self.height = self.firstphase_height self.cached_hr_uc = StableDiffusionProcessingTxt2Img.cached_hr_uc self.cached_hr_c = StableDiffusionProcessingTxt2Img.cached_hr_c - self.hr_c = None - self.hr_uc = None def calculate_target_resolution(self): if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): @@ -1252,7 +1276,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return super().get_conds() - def parse_extra_network_prompts(self): res = super().parse_extra_network_prompts() @@ -1265,32 +1288,37 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return res +@dataclass(repr=False) class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): - sampler = None - - def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = None, mask_blur_x: int = 4, mask_blur_y: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): - super().__init__(**kwargs) - - self.init_images = init_images - self.resize_mode: int = resize_mode - self.denoising_strength: float = denoising_strength - self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None - self.init_latent = None - self.image_mask = mask - self.latent_mask = None - self.mask_for_overlay = None - self.mask_blur_x = mask_blur_x - self.mask_blur_y = mask_blur_y - if mask_blur is not None: - self.mask_blur = mask_blur - self.inpainting_fill = inpainting_fill - self.inpaint_full_res = inpaint_full_res - self.inpaint_full_res_padding = inpaint_full_res_padding - self.inpainting_mask_invert = inpainting_mask_invert - self.initial_noise_multiplier = opts.initial_noise_multiplier if initial_noise_multiplier is None else initial_noise_multiplier + init_images: list = None + resize_mode: int = 0 + denoising_strength: float = 0.75 + image_cfg_scale: float = None + mask: Any = None + mask_blur_x: int = 4 + mask_blur_y: int = 4 + mask_blur: int = None + inpainting_fill: int = 0 + inpaint_full_res: bool = True + inpaint_full_res_padding: int = 0 + inpainting_mask_invert: int = 0 + initial_noise_multiplier: float = None + latent_mask: Image = None + + image_mask: Any = field(default=None, init=False) + + nmask: torch.Tensor = field(default=None, init=False) + image_conditioning: torch.Tensor = field(default=None, init=False) + init_img_hash: str = field(default=None, init=False) + mask_for_overlay: Image = field(default=None, init=False) + init_latent: torch.Tensor = field(default=None, init=False) + + def __post_init__(self): + super().__post_init__() + + self.image_mask = self.mask self.mask = None - self.nmask = None - self.image_conditioning = None + self.initial_noise_multiplier = opts.initial_noise_multiplier if self.initial_noise_multiplier is None else self.initial_noise_multiplier @property def mask_blur(self): @@ -1300,15 +1328,13 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): @mask_blur.setter def mask_blur(self, value): - self.mask_blur_x = value - self.mask_blur_y = value - - @mask_blur.deleter - def mask_blur(self): - del self.mask_blur_x - del self.mask_blur_y + if isinstance(value, int): + self.mask_blur_x = value + self.mask_blur_y = value def init(self, all_prompts, all_seeds, all_subseeds): + self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None + self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) crop_region = None diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 380cdd5f..09d1e11e 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -305,5 +305,8 @@ class Sampler: current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + raise NotImplementedError() - + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + raise NotImplementedError() -- cgit v1.2.3 From 69f49c8d394220331eaa6609825b477eed60e0f4 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 04:40:34 -0400 Subject: Clear sampler before decoding images More significant VRAM reduction. --- modules/processing.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 007a4e05..16104f92 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -1192,6 +1192,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) + self.sampler = None + devices.torch_gc() + decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) self.is_hr_pass = False -- cgit v1.2.3 From d8419762c1454ba51baa710d9ce8e762efc056ef Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 15:07:37 +0300 Subject: Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console --- extensions-builtin/Lora/extra_networks_lora.py | 12 ++++- extensions-builtin/Lora/networks.py | 61 ++++++++++++++++---------- extensions-builtin/Lora/scripts/lora_script.py | 6 +-- modules/processing.py | 13 +++--- 4 files changed, 58 insertions(+), 34 deletions(-) (limited to 'modules/processing.py') diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py index ba2945c6..32e32cab 100644 --- a/extensions-builtin/Lora/extra_networks_lora.py +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -1,4 +1,4 @@ -from modules import extra_networks, shared +from modules import extra_networks, shared, sd_hijack import networks @@ -6,9 +6,14 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): def __init__(self): super().__init__('lora') + self.errors = {} + """mapping of network names to the number of errors the network had during operation""" + def activate(self, p, params_list): additional = shared.opts.sd_lora + self.errors.clear() + if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional): p.all_prompts = [x + f"" for x in p.all_prompts] params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) @@ -56,4 +61,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) def deactivate(self, p): - pass + if self.errors: + p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items())) + + self.errors.clear() diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index ba621139..c252ed9e 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -1,3 +1,4 @@ +import logging import os import re @@ -194,7 +195,7 @@ def load_network(name, network_on_disk): net.modules[key] = net_module if keys_failed_to_match: - print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}") + logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}") return net @@ -207,7 +208,6 @@ def purge_networks_from_memory(): devices.torch_gc() - def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): already_loaded = {} @@ -248,7 +248,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if net is None: failed_to_load_networks.append(name) - print(f"Couldn't find network with name {name}") + logging.info(f"Couldn't find network with name {name}") continue net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0 @@ -257,7 +257,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No loaded_networks.append(net) if failed_to_load_networks: - sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks)) + sd_hijack.model_hijack.comments.append("Networks not found: " + ", ".join(failed_to_load_networks)) purge_networks_from_memory() @@ -314,17 +314,22 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn for net in loaded_networks: module = net.modules.get(network_layer_name, None) if module is not None and hasattr(self, 'weight'): - with torch.no_grad(): - updown, ex_bias = module.calc_updown(self.weight) + try: + with torch.no_grad(): + updown, ex_bias = module.calc_updown(self.weight) - if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: - # inpainting model. zero pad updown to make channel[1] 4 to 9 - updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) + if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: + # inpainting model. zero pad updown to make channel[1] 4 to 9 + updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) - self.weight += updown - if ex_bias is not None and getattr(self, 'bias', None) is not None: - self.bias += ex_bias - continue + self.weight += updown + if ex_bias is not None and getattr(self, 'bias', None) is not None: + self.bias += ex_bias + except RuntimeError as e: + logging.debug(f"Network {net.name} layer {network_layer_name}: {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + + continue module_q = net.modules.get(network_layer_name + "_q_proj", None) module_k = net.modules.get(network_layer_name + "_k_proj", None) @@ -332,21 +337,28 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn module_out = net.modules.get(network_layer_name + "_out_proj", None) if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: - with torch.no_grad(): - updown_q = module_q.calc_updown(self.in_proj_weight) - updown_k = module_k.calc_updown(self.in_proj_weight) - updown_v = module_v.calc_updown(self.in_proj_weight) - updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) - updown_out = module_out.calc_updown(self.out_proj.weight) - - self.in_proj_weight += updown_qkv - self.out_proj.weight += updown_out - continue + try: + with torch.no_grad(): + updown_q = module_q.calc_updown(self.in_proj_weight) + updown_k = module_k.calc_updown(self.in_proj_weight) + updown_v = module_v.calc_updown(self.in_proj_weight) + updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) + updown_out = module_out.calc_updown(self.out_proj.weight) + + self.in_proj_weight += updown_qkv + self.out_proj.weight += updown_out + + except RuntimeError as e: + logging.debug(f"Network {net.name} layer {network_layer_name}: {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + + continue if module is None: continue - print(f'failed to calculate network weights for layer {network_layer_name}') + logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 self.network_current_names = wanted_names @@ -519,6 +531,7 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) +extra_network_lora = None available_networks = {} available_network_aliases = {} diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index dc307f8c..4c6e774a 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -23,9 +23,9 @@ def unload(): def before_ui(): ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora()) - extra_network = extra_networks_lora.ExtraNetworkLora() - extra_networks.register_extra_network(extra_network) - extra_networks.register_extra_network_alias(extra_network, "lyco") + networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora() + extra_networks.register_extra_network(networks.extra_network_lora) + extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco") if not hasattr(torch.nn, 'Linear_forward_before_network'): diff --git a/modules/processing.py b/modules/processing.py index 007a4e05..10749aa2 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -157,6 +157,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] + comments: dict = None sampler: sd_samplers_common.Sampler | None = field(default=None, init=False) is_using_inpainting_conditioning: bool = field(default=False, init=False) paste_to: tuple | None = field(default=None, init=False) @@ -196,6 +197,8 @@ class StableDiffusionProcessing: if self.sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) + self.comments = {} + self.sampler_noise_scheduler_override = None self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn @@ -226,6 +229,9 @@ class StableDiffusionProcessing: def sd_model(self, value): pass + def comment(self, text): + self.comments[text] = 1 + def txt2img_image_conditioning(self, x, width=None, height=None): self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'} @@ -429,7 +435,7 @@ class Processed: self.subseed = subseed self.subseed_strength = p.subseed_strength self.info = info - self.comments = comments + self.comments = "".join(f"{comment}\n" for comment in p.comments) self.width = p.width self.height = p.height self.sampler_name = p.sampler_name @@ -720,8 +726,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() - comments = {} - p.setup_prompts() if type(seed) == list: @@ -801,7 +805,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.setup_conds() for comment in model_hijack.comments: - comments[comment] = 1 + p.comment(comment) p.extra_generation_params.update(model_hijack.extra_generation_params) @@ -930,7 +934,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: images_list=output_images, seed=p.all_seeds[0], info=infotexts[0], - comments="".join(f"{comment}\n" for comment in comments), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts, -- cgit v1.2.3 From f093c9d39d0fe9951a8f5c570027cecc68778ef2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 17:31:10 +0300 Subject: fix broken XYZ plot seeds add new callback for scripts to be used before processing --- modules/processing.py | 32 ++++++++++++++++++++++++++++++-- modules/processing_scripts/seed.py | 2 +- modules/scripts.py | 17 ++++++++++++++++- 3 files changed, 47 insertions(+), 4 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index fdf49359..74366655 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -152,7 +152,9 @@ class StableDiffusionProcessing: token_merging_ratio_hr = 0 disable_extra_networks: bool = False - script_args: list = None + scripts_value: scripts.ScriptRunner = field(default=None, init=False) + script_args_value: list = field(default=None, init=False) + scripts_setup_complete: bool = field(default=False, init=False) cached_uc = [None, None] cached_c = [None, None] @@ -171,7 +173,6 @@ class StableDiffusionProcessing: step_multiplier: int = field(default=1, init=False) color_corrections: list = field(default=None, init=False) - scripts: list = field(default=None, init=False) all_prompts: list = field(default=None, init=False) all_negative_prompts: list = field(default=None, init=False) all_seeds: list = field(default=None, init=False) @@ -229,6 +230,33 @@ class StableDiffusionProcessing: def sd_model(self, value): pass + @property + def scripts(self): + return self.scripts_value + + @scripts.setter + def scripts(self, value): + self.scripts_value = value + + if self.scripts_value and self.script_args_value and not self.scripts_setup_complete: + self.setup_scripts() + + @property + def script_args(self): + return self.script_args_value + + @script_args.setter + def script_args(self, value): + self.script_args_value = value + + if self.scripts_value and self.script_args_value and not self.scripts_setup_complete: + self.setup_scripts() + + def setup_scripts(self): + self.scripts_setup_complete = True + + self.scripts.setup_scrips(self) + def comment(self, text): self.comments[text] = 1 diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py index cc90775a..96b44dfb 100644 --- a/modules/processing_scripts/seed.py +++ b/modules/processing_scripts/seed.py @@ -58,7 +58,7 @@ class ScriptSeed(scripts.ScriptBuiltin): return self.seed, subseed, subseed_strength - def before_process(self, p, seed, subseed, subseed_strength): + def setup(self, p, seed, subseed, subseed_strength): p.seed = seed if subseed_strength > 0: diff --git a/modules/scripts.py b/modules/scripts.py index c6459b45..d4a9da94 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -106,9 +106,16 @@ class Script: pass + def setup(self, p, *args): + """For AlwaysVisible scripts, this function is called when the processing object is set up, before any processing starts. + args contains all values returned by components from ui(). + """ + pass + + def before_process(self, p, *args): """ - This function is called very early before processing begins for AlwaysVisible scripts. + This function is called very early during processing begins for AlwaysVisible scripts. You can modify the processing object (p) here, inject hooks, etc. args contains all values returned by components from ui() """ @@ -706,6 +713,14 @@ class ScriptRunner: except Exception: errors.report(f"Error running before_hr: {script.filename}", exc_info=True) + def setup_scrips(self, p): + for script in self.alwayson_scripts: + try: + script_args = p.script_args[script.args_from:script.args_to] + script.setup(p, *script_args) + except Exception: + errors.report(f"Error running setup: {script.filename}", exc_info=True) + scripts_txt2img: ScriptRunner = None scripts_img2img: ScriptRunner = None -- cgit v1.2.3