diff options
40 files changed, 628 insertions, 231 deletions
@@ -17,7 +17,7 @@ A browser interface based on Gradio library for Stable Diffusion. - a man in a (tuxedo:1.21) - alternative syntax
- select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text (code contributed by anonymous user)
- Loopback, run img2img processing multiple times
-- X/Y plot, a way to draw a 2 dimensional plot of images with different parameters
+- X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters
- Textual Inversion
- have as many embeddings as you want and use any names you like for them
- use multiple embeddings with different numbers of vectors per token
diff --git a/configs/instruct-pix2pix.yaml b/configs/instruct-pix2pix.yaml new file mode 100644 index 00000000..437ddcef --- /dev/null +++ b/configs/instruct-pix2pix.yaml @@ -0,0 +1,99 @@ +# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion). +# See more details in LICENSE. + +model: + base_learning_rate: 1.0e-04 + target: modules.models.diffusion.ddpm_edit.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: edited + cond_stage_key: edit + # image_size: 64 + # image_size: 32 + image_size: 16 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: hybrid + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: true + load_ema: true + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 0 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 8 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + +data: + target: main.DataModuleFromConfig + params: + batch_size: 128 + num_workers: 1 + wrap: false + validation: + target: edit_dataset.EditDataset + params: + path: data/clip-filtered-dataset + cache_dir: data/ + cache_name: data_10k + split: val + min_text_sim: 0.2 + min_image_sim: 0.75 + min_direction_sim: 0.2 + max_samples_per_prompt: 1 + min_resize_res: 512 + max_resize_res: 512 + crop_res: 512 + output_as_edit: False + real_input: True diff --git a/v2-inference-v.yaml b/configs/v1-inpainting-inference.yaml index 513cd635..f9eec37d 100644 --- a/v2-inference-v.yaml +++ b/configs/v1-inpainting-inference.yaml @@ -1,8 +1,7 @@ model: - base_learning_rate: 1.0e-4 - target: ldm.models.diffusion.ddpm.LatentDiffusion + base_learning_rate: 7.5e-05 + target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion params: - parameterization: "v" linear_start: 0.00085 linear_end: 0.0120 num_timesteps_cond: 1 @@ -12,29 +11,36 @@ model: cond_stage_key: "txt" image_size: 64 channels: 4 - cond_stage_trainable: false - conditioning_key: crossattn + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: hybrid # important monitor: val/loss_simple_ema scale_factor: 0.18215 - use_ema: False # we set this to false because this is an inference only config + finetune_keys: null + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] unet_config: target: ldm.modules.diffusionmodules.openaimodel.UNetModel params: - use_checkpoint: True - use_fp16: True image_size: 32 # unused - in_channels: 4 + in_channels: 9 # 4 data + 4 downscaled image + 1 mask out_channels: 4 model_channels: 320 attention_resolutions: [ 4, 2, 1 ] num_res_blocks: 2 channel_mult: [ 1, 2, 4, 4 ] - num_head_channels: 64 # need to fix for flash-attn + num_heads: 8 use_spatial_transformer: True - use_linear_in_transformer: True transformer_depth: 1 - context_dim: 1024 + context_dim: 768 + use_checkpoint: True legacy: False first_stage_config: @@ -43,7 +49,6 @@ model: embed_dim: 4 monitor: val/rec_loss ddconfig: - #attn_type: "vanilla-xformers" double_z: true z_channels: 4 resolution: 256 @@ -62,7 +67,4 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder - params: - freeze: True - layer: "penultimate"
\ No newline at end of file + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/javascript/hints.js b/javascript/hints.js index 3cf10e20..7b60b25e 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -50,7 +50,7 @@ titles = { "None": "Do not do anything special", "Prompt matrix": "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)", - "X/Y plot": "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows", + "X/Y/Z plot": "Create grid(s) where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows", "Custom code": "Run Python code. Advanced user only. Must run program with --allow-code for this to work", "Prompt S/R": "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others", @@ -17,6 +17,37 @@ stored_commit_hash = None skip_install = False
+def check_python_version():
+ is_windows = platform.system() == "Windows"
+ major = sys.version_info.major
+ minor = sys.version_info.minor
+ micro = sys.version_info.micro
+
+ if is_windows:
+ supported_minors = [10]
+ else:
+ supported_minors = [7, 8, 9, 10, 11]
+
+ if not (major == 3 and minor in supported_minors):
+ import modules.errors
+
+ modules.errors.print_error_explanation(f"""
+INCOMPATIBLE PYTHON VERSION
+
+This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
+If you encounter an error with "RuntimeError: Couldn't install torch." message,
+or any other error regarding unsuccessful package (library) installation,
+please downgrade (or upgrade) to the latest version of 3.10 Python
+and delete current Python and "venv" folder in WebUI's directory.
+
+You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/
+
+{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
+
+Use --skip-python-version-check to suppress this warning.
+""")
+
+
def commit_hash():
global stored_commit_hash
@@ -216,6 +247,7 @@ def prepare_environment(): sys.argv, _ = extract_arg(sys.argv, '-f')
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
+ sys.argv, skip_python_version_check = extract_arg(sys.argv, '--skip-python-version-check')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
sys.argv, reinstall_torch = extract_arg(sys.argv, '--reinstall-torch')
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
@@ -224,6 +256,9 @@ def prepare_environment(): xformers = '--xformers' in sys.argv
ngrok = '--ngrok' in sys.argv
+ if not skip_python_version_check:
+ check_python_version()
+
commit = commit_hash()
print(f"Python {sys.version}")
diff --git a/modules/api/api.py b/modules/api/api.py index 25c65e57..eb7b1da5 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -18,7 +18,8 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_ from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from PIL import PngImagePlugin,Image -from modules.sd_models import checkpoints_list, find_checkpoint_config +from modules.sd_models import checkpoints_list +from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices from typing import List @@ -387,7 +388,7 @@ class Api: ] def get_sd_models(self): - return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config(x)} for x in checkpoints_list.values()] + return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()] def get_hypernetworks(self): return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks] diff --git a/modules/api/models.py b/modules/api/models.py index 805bd8f7..cba43d3b 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -228,7 +228,7 @@ class SDModelItem(BaseModel): hash: Optional[str] = Field(title="Short hash") sha256: Optional[str] = Field(title="sha256 hash") filename: str = Field(title="Filename") - config: str = Field(title="Config file") + config: Optional[str] = Field(title="Config file") class HypernetworkItem(BaseModel): name: str = Field(title="Name") diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index ab40d842..01fb7bd8 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -8,7 +8,7 @@ import torch import modules.face_restoration
import modules.shared
from modules import shared, devices, modelloader
-from modules.paths import script_path, models_path
+from modules.paths import models_path
# codeformer people made a choice to include modified basicsr library to their project which makes
# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN.
diff --git a/modules/devices.py b/modules/devices.py index 6b36622c..4687944e 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -34,14 +34,18 @@ def get_cuda_device_string(): return "cuda" -def get_optimal_device(): +def get_optimal_device_name(): if torch.cuda.is_available(): - return torch.device(get_cuda_device_string()) + return get_cuda_device_string() if has_mps(): - return torch.device("mps") + return "mps" + + return "cpu" - return cpu + +def get_optimal_device(): + return torch.device(get_optimal_device_name()) def get_device_for(task): @@ -139,6 +143,8 @@ def test_for_nans(x, where): else: message = "A tensor with all NaNs was produced." + message += " Use --disable-nan-check commandline argument to disable this check." + raise NansException(message) diff --git a/modules/extensions.py b/modules/extensions.py index b522125c..5e12b1aa 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -7,9 +7,11 @@ import git from modules import paths, shared
extensions = []
-extensions_dir = os.path.join(paths.script_path, "extensions")
+extensions_dir = os.path.join(paths.data_path, "extensions")
extensions_builtin_dir = os.path.join(paths.script_path, "extensions-builtin")
+if not os.path.exists(extensions_dir):
+ os.makedirs(extensions_dir)
def active():
return [x for x in extensions if x.enabled]
diff --git a/modules/extras.py b/modules/extras.py index 466ecc15..d8ece955 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -6,7 +6,7 @@ import shutil import torch
import tqdm
-from modules import shared, images, sd_models, sd_vae
+from modules import shared, images, sd_models, sd_vae, sd_models_config
from modules.ui_common import plaintext_to_html
import gradio as gr
import safetensors.torch
@@ -37,7 +37,7 @@ def run_pnginfo(image): def create_config(ckpt_result, config_source, a, b, c):
def config(x):
- res = sd_models.find_checkpoint_config(x) if x else None
+ res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None
return res if res != shared.sd_default_config else None
if config_source == 0:
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 46e12dc6..773c5c0e 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -6,7 +6,7 @@ import re from pathlib import Path
import gradio as gr
-from modules.shared import script_path
+from modules.paths import data_path
from modules import shared, ui_tempdir, script_callbacks
import tempfile
from PIL import Image
@@ -289,7 +289,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model def connect_paste(button, paste_fields, input_comp, jsfunc=None):
def paste_func(prompt):
if not prompt and not shared.cmd_opts.hide_ui_dir_config:
- filename = os.path.join(script_path, "params.txt")
+ filename = os.path.join(data_path, "params.txt")
if os.path.exists(filename):
with open(filename, "r", encoding="utf8") as file:
prompt = file.read()
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index 1e2dbc32..fbe6215a 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -6,12 +6,11 @@ import facexlib import gfpgan
import modules.face_restoration
-from modules import shared, devices, modelloader
-from modules.paths import models_path
+from modules import paths, shared, devices, modelloader
model_dir = "GFPGAN"
user_path = None
-model_path = os.path.join(models_path, model_dir)
+model_path = os.path.join(paths.models_path, model_dir)
model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
have_gfpgan = False
loaded_gfpgan_model = None
diff --git a/modules/hashes.py b/modules/hashes.py index b85a7580..819362a3 100644 --- a/modules/hashes.py +++ b/modules/hashes.py @@ -4,8 +4,10 @@ import os.path import filelock
+from modules.paths import data_path
-cache_filename = "cache.json"
+
+cache_filename = os.path.join(data_path, "cache.json")
cache_data = None
diff --git a/modules/img2img.py b/modules/img2img.py index 2168c8e2..fe9447c7 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -16,11 +16,16 @@ import modules.images as images import modules.scripts
-def process_batch(p, input_dir, output_dir, args):
+def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processing.fix_seed(p)
images = shared.listfiles(input_dir)
+ inpaint_masks = shared.listfiles(inpaint_mask_dir)
+ is_inpaint_batch = inpaint_mask_dir and len(inpaint_masks) > 0
+ if is_inpaint_batch:
+ print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
+
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
save_normally = output_dir == ''
@@ -43,6 +48,15 @@ def process_batch(p, input_dir, output_dir, args): img = ImageOps.exif_transpose(img)
p.init_images = [img] * p.batch_size
+ if is_inpaint_batch:
+ # try to find corresponding mask for an image using simple filename matching
+ mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
+ # if not found use first one ("same mask for all images" use-case)
+ if not mask_image_path in inpaint_masks:
+ mask_image_path = inpaint_masks[0]
+ mask_image = Image.open(mask_image_path)
+ p.image_mask = mask_image
+
proc = modules.scripts.scripts_img2img.run(p, *args)
if proc is None:
proc = process_images(p)
@@ -59,7 +73,7 @@ def process_batch(p, input_dir, output_dir, args): processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, *args):
is_batch = mode == 5
if mode == 0: # img2img
@@ -139,7 +153,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
- process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)
+ process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)
processed = Processed(p, [], p.seed, "")
else:
diff --git a/modules/interrogate.py b/modules/interrogate.py index c72ff694..cbb80683 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -12,7 +12,7 @@ from torchvision import transforms from torchvision.transforms.functional import InterpolationMode
import modules.shared as shared
-from modules import devices, paths, lowvram, modelloader, errors
+from modules import devices, paths, shared, lowvram, modelloader, errors
blip_image_eval_size = 384
clip_model_name = 'ViT-L/14'
diff --git a/modules/paths.py b/modules/paths.py index 20b3e4d8..d991cc71 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -4,7 +4,15 @@ import sys import modules.safe
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-models_path = os.path.join(script_path, "models")
+
+# Parse the --data-dir flag first so we can use it as a base for our other argument default values
+parser = argparse.ArgumentParser(add_help=False)
+parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",)
+cmd_opts_pre = parser.parse_known_args()[0]
+data_path = cmd_opts_pre.data_dir
+models_path = os.path.join(data_path, "models")
+
+# data_path = cmd_opts_pre.data
sys.path.insert(0, script_path)
# search for directory of stable diffusion in following places
diff --git a/modules/processing.py b/modules/processing.py index cb41288a..5072fc40 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -13,10 +13,11 @@ from skimage import exposure from typing import Any, Dict, List, Optional
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
+import modules.paths as paths
import modules.face_restoration
import modules.images as images
import modules.styles
@@ -172,7 +173,7 @@ class StableDiffusionProcessing: midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image.to(devices.dtype_unet) if devices.unet_needs_upcast else source_image))
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image.to(devices.dtype_vae) if devices.unet_needs_upcast else source_image))
conditioning_image = conditioning_image.float() if devices.unet_needs_upcast else conditioning_image
conditioning = torch.nn.functional.interpolate(
self.sd_model.depth_model(midas_in),
@@ -217,7 +218,7 @@ class StableDiffusionProcessing: )
# Encode the new masked image using first stage of network.
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image.to(devices.dtype_unet) if devices.unet_needs_upcast else conditioning_image))
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image.to(devices.dtype_vae) if devices.unet_needs_upcast else conditioning_image))
# Create the concatenated conditioning tensor to be fed to `c_concat`
conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
@@ -417,7 +418,7 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see def decode_first_stage(model, x):
with devices.autocast(disable=x.dtype == devices.dtype_vae):
- x = model.decode_first_stage(x)
+ x = model.decode_first_stage(x.to(devices.dtype_vae) if devices.unet_needs_upcast else x)
return x
@@ -584,7 +585,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not p.disable_extra_networks:
extra_networks.activate(p, extra_network_data)
- with open(os.path.join(shared.script_path, "params.txt"), "w", encoding="utf8") as file:
+ with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0))
@@ -658,6 +659,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image = Image.fromarray(x_sample)
+ if p.scripts is not None:
+ pp = scripts.PostprocessImageArgs(image)
+ p.scripts.postprocess_image(p, pp)
+ image = pp.image
+
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
@@ -1001,7 +1007,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = torch.from_numpy(batch_images)
image = 2. * image - 1.
- image = image.to(device=shared.device, dtype=devices.dtype_unet if devices.unet_needs_upcast else None)
+ image = image.to(device=shared.device, dtype=devices.dtype_vae if devices.unet_needs_upcast else None)
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
diff --git a/modules/script_loading.py b/modules/script_loading.py index f93f0951..a7d2203f 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -1,16 +1,14 @@ import os
import sys
import traceback
+import importlib.util
from types import ModuleType
def load_module(path):
- with open(path, "r", encoding="utf8") as file:
- text = file.read()
-
- compiled = compile(text, path, 'exec')
- module = ModuleType(os.path.basename(path))
- exec(compiled, module.__dict__)
+ module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path)
+ module = importlib.util.module_from_spec(module_spec)
+ module_spec.loader.exec_module(module)
return module
diff --git a/modules/scripts.py b/modules/scripts.py index 03907a63..6e9dc0c0 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -6,12 +6,16 @@ from collections import namedtuple import gradio as gr
-from modules.processing import StableDiffusionProcessing
from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing
AlwaysVisible = object()
+class PostprocessImageArgs:
+ def __init__(self, image):
+ self.image = image
+
+
class Script:
filename = None
args_from = None
@@ -65,7 +69,7 @@ class Script: args contains all values returned by components from ui()
"""
- raise NotImplementedError()
+ pass
def process(self, p, *args):
"""
@@ -100,6 +104,13 @@ class Script: pass
+ def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
+ """
+ Called for every image after it has been generated.
+ """
+
+ pass
+
def postprocess(self, p, processed, *args):
"""
This function is called after processing ends for AlwaysVisible scripts.
@@ -247,11 +258,15 @@ class ScriptRunner: self.infotext_fields = []
def initialize_scripts(self, is_img2img):
+ from modules import scripts_auto_postprocessing
+
self.scripts.clear()
self.alwayson_scripts.clear()
self.selectable_scripts.clear()
- for script_class, path, basedir, script_module in scripts_data:
+ auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
+
+ for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data:
script = script_class()
script.filename = path
script.is_txt2img = not is_img2img
@@ -332,7 +347,7 @@ class ScriptRunner: return inputs
- def run(self, p: StableDiffusionProcessing, *args):
+ def run(self, p, *args):
script_index = args[0]
if script_index == 0:
@@ -386,6 +401,15 @@ class ScriptRunner: print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ def postprocess_image(self, p, pp: PostprocessImageArgs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.postprocess_image(p, pp, *script_args)
+ except Exception:
+ print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+
def before_component(self, component, **kwargs):
for script in self.scripts:
try:
diff --git a/modules/scripts_auto_postprocessing.py b/modules/scripts_auto_postprocessing.py new file mode 100644 index 00000000..30d6d658 --- /dev/null +++ b/modules/scripts_auto_postprocessing.py @@ -0,0 +1,42 @@ +from modules import scripts, scripts_postprocessing, shared
+
+
+class ScriptPostprocessingForMainUI(scripts.Script):
+ def __init__(self, script_postproc):
+ self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc
+ self.postprocessing_controls = None
+
+ def title(self):
+ return self.script.name
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ self.postprocessing_controls = self.script.ui()
+ return self.postprocessing_controls.values()
+
+ def postprocess_image(self, p, script_pp, *args):
+ args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)}
+
+ pp = scripts_postprocessing.PostprocessedImage(script_pp.image)
+ pp.info = {}
+ self.script.process(pp, **args_dict)
+ p.extra_generation_params.update(pp.info)
+ script_pp.image = pp.image
+
+
+def create_auto_preprocessing_script_data():
+ from modules import scripts
+
+ res = []
+
+ for name in shared.opts.postprocessing_enable_in_main_ui:
+ script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None)
+ if script is None:
+ continue
+
+ constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class())
+ res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module))
+
+ return res
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py index 25de02d0..ce0ebb61 100644 --- a/modules/scripts_postprocessing.py +++ b/modules/scripts_postprocessing.py @@ -46,6 +46,8 @@ class ScriptPostprocessing: pass
+
+
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
res = func(*args, **kwargs)
@@ -68,6 +70,9 @@ class ScriptPostprocessingRunner: script: ScriptPostprocessing = script_class()
script.filename = path
+ if script.name == "Simple Upscale":
+ continue
+
self.scripts.append(script)
def create_script_ui(self, script, inputs):
@@ -87,12 +92,11 @@ class ScriptPostprocessingRunner: import modules.scripts
self.initialize_scripts(modules.scripts.postprocessing_scripts_data)
- scripts_order = [x.lower().strip() for x in shared.opts.postprocessing_scipts_order.split(",")]
+ scripts_order = shared.opts.postprocessing_operation_order
def script_score(name):
- name = name.lower()
for i, possible_match in enumerate(scripts_order):
- if possible_match in name:
+ if possible_match == name:
return i
return len(self.scripts)
@@ -145,3 +149,4 @@ class ScriptPostprocessingRunner: def image_changed(self):
for script in self.scripts_in_preferred_order():
script.image_changed()
+
diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 31d2c898..478cd499 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -96,15 +96,6 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F return x_prev, pred_x0, e_t -def should_hijack_inpainting(checkpoint_info): - from modules import sd_models - - ckpt_basename = os.path.basename(checkpoint_info.filename).lower() - cfg_basename = os.path.basename(sd_models.find_checkpoint_config(checkpoint_info)).lower() - - return "inpainting" in ckpt_basename and not "inpainting" in cfg_basename - - def do_inpainting_hijack(): # p_sample_plms is needed because PLMS can't work with dicts as conditionings diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 635f015f..3c727d3b 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -5,9 +5,9 @@ import gc import time def should_hijack_ip2p(checkpoint_info): - from modules import sd_models + from modules import sd_models_config ckpt_basename = os.path.basename(checkpoint_info.filename).lower() - cfg_basename = os.path.basename(sd_models.find_checkpoint_config(checkpoint_info)).lower() + cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower() return "pix2pix" in ckpt_basename and not "pix2pix" in cfg_basename diff --git a/modules/sd_hijack_utils.py b/modules/sd_hijack_utils.py index f81b169a..f8684475 100644 --- a/modules/sd_hijack_utils.py +++ b/modules/sd_hijack_utils.py @@ -5,7 +5,7 @@ class CondFunc: self = super(CondFunc, cls).__new__(cls)
if isinstance(orig_func, str):
func_path = orig_func.split('.')
- for i in range(len(func_path)-2, -1, -1):
+ for i in range(len(func_path)-1, -1, -1):
try:
resolved_obj = importlib.import_module('.'.join(func_path[:i]))
break
diff --git a/modules/sd_models.py b/modules/sd_models.py index 7072eb2e..b2d48a51 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -2,8 +2,6 @@ import collections import os.path
import sys
import gc
-import time
-from collections import namedtuple
import torch
import re
import safetensors.torch
@@ -14,13 +12,13 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config
-from modules import shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes
+from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config
from modules.paths import models_path
-from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
-from modules.sd_hijack_ip2p import should_hijack_ip2p
+from modules.sd_hijack_inpainting import do_inpainting_hijack
+from modules.timer import Timer
model_dir = "Stable-diffusion"
-model_path = os.path.abspath(os.path.join(models_path, model_dir))
+model_path = os.path.abspath(os.path.join(paths.models_path, model_dir))
checkpoints_list = {}
checkpoint_alisases = {}
@@ -99,17 +97,6 @@ def checkpoint_tiles(): return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key)
-def find_checkpoint_config(info):
- if info is None:
- return shared.cmd_opts.config
-
- config = os.path.splitext(info.filename)[0] + ".yaml"
- if os.path.exists(config):
- return config
-
- return shared.cmd_opts.config
-
-
def list_models():
checkpoints_list.clear()
checkpoint_alisases.clear()
@@ -215,9 +202,7 @@ def get_state_dict_from_checkpoint(pl_sd): def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
_, extension = os.path.splitext(checkpoint_file)
if extension.lower() == ".safetensors":
- device = map_location or shared.weight_load_location
- if device is None:
- device = devices.get_cuda_device_string() if torch.cuda.is_available() else "cpu"
+ device = map_location or shared.weight_load_location or devices.get_optimal_device_name()
pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
else:
pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
@@ -229,60 +214,74 @@ def read_state_dict(checkpoint_file, print_global_state=False, map_location=None return sd
-def load_model_weights(model, checkpoint_info: CheckpointInfo):
+def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
+ sd_model_hash = checkpoint_info.calculate_shorthash()
+ timer.record("calculate hash")
+
+ if checkpoint_info in checkpoints_loaded:
+ # use checkpoint cache
+ print(f"Loading weights [{sd_model_hash}] from cache")
+ return checkpoints_loaded[checkpoint_info]
+
+ print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
+ res = read_state_dict(checkpoint_info.filename)
+ timer.record("load weights from disk")
+
+ return res
+
+
+def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
title = checkpoint_info.title
sd_model_hash = checkpoint_info.calculate_shorthash()
+ timer.record("calculate hash")
+
if checkpoint_info.title != title:
shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
- cache_enabled = shared.opts.sd_checkpoint_cache > 0
+ if state_dict is None:
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
- if cache_enabled and checkpoint_info in checkpoints_loaded:
- # use checkpoint cache
- print(f"Loading weights [{sd_model_hash}] from cache")
- model.load_state_dict(checkpoints_loaded[checkpoint_info])
- else:
- # load from file
- print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
+ model.load_state_dict(state_dict, strict=False)
+ del state_dict
+ timer.record("apply weights to model")
- sd = read_state_dict(checkpoint_info.filename)
- model.load_state_dict(sd, strict=False)
- del sd
-
- if cache_enabled:
- # cache newly loaded model
- checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
+ if shared.opts.sd_checkpoint_cache > 0:
+ # cache newly loaded model
+ checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
+
+ if shared.cmd_opts.opt_channelslast:
+ model.to(memory_format=torch.channels_last)
+ timer.record("apply channels_last")
- if shared.cmd_opts.opt_channelslast:
- model.to(memory_format=torch.channels_last)
+ if not shared.cmd_opts.no_half:
+ vae = model.first_stage_model
+ depth_model = getattr(model, 'depth_model', None)
- if not shared.cmd_opts.no_half:
- vae = model.first_stage_model
- depth_model = getattr(model, 'depth_model', None)
+ # with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
+ if shared.cmd_opts.no_half_vae:
+ model.first_stage_model = None
+ # with --upcast-sampling, don't convert the depth model weights to float16
+ if shared.cmd_opts.upcast_sampling and depth_model:
+ model.depth_model = None
- # with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
- if shared.cmd_opts.no_half_vae:
- model.first_stage_model = None
- # with --upcast-sampling, don't convert the depth model weights to float16
- if shared.cmd_opts.upcast_sampling and depth_model:
- model.depth_model = None
+ model.half()
+ model.first_stage_model = vae
+ if depth_model:
+ model.depth_model = depth_model
- model.half()
- model.first_stage_model = vae
- if depth_model:
- model.depth_model = depth_model
+ timer.record("apply half()")
- devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
- devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
- devices.dtype_unet = model.model.diffusion_model.dtype
- devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
+ devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
+ devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
+ devices.dtype_unet = model.model.diffusion_model.dtype
+ devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
- model.first_stage_model.to(devices.dtype_vae)
+ model.first_stage_model.to(devices.dtype_vae)
+ timer.record("apply dtype to VAE")
# clean up cache if limit is reached
- if cache_enabled:
- while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
- checkpoints_loaded.popitem(last=False) # LRU
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ checkpoints_loaded.popitem(last=False)
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_info.filename
@@ -295,6 +294,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo): sd_vae.clear_loaded_vae()
vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename)
sd_vae.load_vae(model, vae_file, vae_source)
+ timer.record("load VAE")
def enable_midas_autodownload():
@@ -307,7 +307,7 @@ def enable_midas_autodownload(): location automatically.
"""
- midas_path = os.path.join(models_path, 'midas')
+ midas_path = os.path.join(paths.models_path, 'midas')
# stable-diffusion-stability-ai hard-codes the midas model path to
# a location that differs from where other scripts using this model look.
@@ -340,24 +340,20 @@ def enable_midas_autodownload(): midas.api.load_model = load_model_wrapper
-class Timer:
- def __init__(self):
- self.start = time.time()
+def repair_config(sd_config):
+
+ if not hasattr(sd_config.model.params, "use_ema"):
+ sd_config.model.params.use_ema = False
- def elapsed(self):
- end = time.time()
- res = end - self.start
- self.start = end
- return res
+ if shared.cmd_opts.no_half:
+ sd_config.model.params.unet_config.params.use_fp16 = False
+ elif shared.cmd_opts.upcast_sampling:
+ sd_config.model.params.unet_config.params.use_fp16 = True
-def load_model(checkpoint_info=None):
+def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_to_load_state_dict=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
- checkpoint_config = find_checkpoint_config(checkpoint_info)
-
- if checkpoint_config != shared.cmd_opts.config:
- print(f"Loading config from: {checkpoint_config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
@@ -365,38 +361,27 @@ def load_model(checkpoint_info=None): gc.collect()
devices.torch_gc()
- sd_config = OmegaConf.load(checkpoint_config)
-
- if should_hijack_inpainting(checkpoint_info):
- # Hardcoded config for now...
- sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
- sd_config.model.params.conditioning_key = "hybrid"
- sd_config.model.params.unet_config.params.in_channels = 9
- sd_config.model.params.finetune_keys = None
-
- if should_hijack_ip2p(checkpoint_info):
- sd_config.model.target = "modules.models.diffusion.ddpm_edit.LatentDiffusion"
- sd_config.model.params.conditioning_key = "hybrid"
- sd_config.model.params.first_stage_key = "edited"
- sd_config.model.params.cond_stage_key = "edit"
- sd_config.model.params.image_size = 16
- sd_config.model.params.unet_config.params.in_channels = 8
- sd_config.model.params.unet_config.params.out_channels = 4
+ do_inpainting_hijack()
- if not hasattr(sd_config.model.params, "use_ema"):
- sd_config.model.params.use_ema = False
+ timer = Timer()
- do_inpainting_hijack()
+ if already_loaded_state_dict is not None:
+ state_dict = already_loaded_state_dict
+ else:
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
- if shared.cmd_opts.no_half:
- sd_config.model.params.unet_config.params.use_fp16 = False
- elif shared.cmd_opts.upcast_sampling:
- sd_config.model.params.unet_config.params.use_fp16 = True
+ checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
- timer = Timer()
+ timer.record("find config")
- sd_model = None
+ sd_config = OmegaConf.load(checkpoint_config)
+ repair_config(sd_config)
+
+ timer.record("load config")
+ print(f"Creating model from config: {checkpoint_config}")
+
+ sd_model = None
try:
with sd_disable_initialization.DisableInitialization():
sd_model = instantiate_from_config(sd_config.model)
@@ -407,29 +392,35 @@ def load_model(checkpoint_info=None): print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
sd_model = instantiate_from_config(sd_config.model)
- elapsed_create = timer.elapsed()
+ sd_model.used_config = checkpoint_config
- load_model_weights(sd_model, checkpoint_info)
+ timer.record("create model")
- elapsed_load_weights = timer.elapsed()
+ load_model_weights(sd_model, checkpoint_info, state_dict, timer)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
+ timer.record("move model to device")
+
sd_hijack.model_hijack.hijack(sd_model)
+ timer.record("hijack")
+
sd_model.eval()
shared.sd_model = sd_model
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
+ timer.record("load textual inversion embeddings")
+
script_callbacks.model_loaded_callback(sd_model)
- elapsed_the_rest = timer.elapsed()
+ timer.record("scripts callbacks")
- print(f"Model loaded in {elapsed_create + elapsed_load_weights + elapsed_the_rest:.1f}s ({elapsed_create:.1f}s create model, {elapsed_load_weights:.1f}s load weights).")
+ print(f"Model loaded in {timer.summary()}.")
return sd_model
@@ -440,6 +431,7 @@ def reload_model_weights(sd_model=None, info=None): if not sd_model:
sd_model = shared.sd_model
+
if sd_model is None: # previous model load failed
current_checkpoint_info = None
else:
@@ -447,38 +439,44 @@ def reload_model_weights(sd_model=None, info=None): if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
- checkpoint_config = find_checkpoint_config(current_checkpoint_info)
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ sd_model.to(devices.cpu)
- if current_checkpoint_info is None or checkpoint_config != find_checkpoint_config(checkpoint_info) or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info) or should_hijack_ip2p(checkpoint_info) != should_hijack_ip2p(sd_model.sd_checkpoint_info):
- del sd_model
- checkpoints_loaded.clear()
- load_model(checkpoint_info)
- return shared.sd_model
+ sd_hijack.model_hijack.undo_hijack(sd_model)
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
- lowvram.send_everything_to_cpu()
- else:
- sd_model.to(devices.cpu)
+ timer = Timer()
- sd_hijack.model_hijack.undo_hijack(sd_model)
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
- timer = Timer()
+ checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
+
+ timer.record("find config")
+
+ if sd_model is None or checkpoint_config != sd_model.used_config:
+ del sd_model
+ checkpoints_loaded.clear()
+ load_model(checkpoint_info, already_loaded_state_dict=state_dict, time_taken_to_load_state_dict=timer.records["load weights from disk"])
+ return shared.sd_model
try:
- load_model_weights(sd_model, checkpoint_info)
+ load_model_weights(sd_model, checkpoint_info, state_dict, timer)
except Exception as e:
print("Failed to load checkpoint, restoring previous")
- load_model_weights(sd_model, current_checkpoint_info)
+ load_model_weights(sd_model, current_checkpoint_info, None, timer)
raise
finally:
sd_hijack.model_hijack.hijack(sd_model)
+ timer.record("hijack")
+
script_callbacks.model_loaded_callback(sd_model)
+ timer.record("script callbacks")
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
+ timer.record("move model to device")
- elapsed = timer.elapsed()
-
- print(f"Weights loaded in {elapsed:.1f}s.")
+ print(f"Weights loaded in {timer.summary()}.")
return sd_model
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py new file mode 100644 index 00000000..00217990 --- /dev/null +++ b/modules/sd_models_config.py @@ -0,0 +1,71 @@ +import re
+import os
+
+from modules import shared, paths
+
+sd_configs_path = shared.sd_configs_path
+sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
+
+
+config_default = shared.sd_default_config
+config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
+config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
+config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
+config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
+config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
+config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
+config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
+
+re_parametrization_v = re.compile(r'-v\b')
+
+
+def guess_model_config_from_state_dict(sd, filename):
+ fn = os.path.basename(filename)
+
+ sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
+ diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
+
+ if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
+ return config_depth_model
+
+ if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
+ if diffusion_model_input.shape[1] == 9:
+ return config_sd2_inpainting
+ elif re.search(re_parametrization_v, fn):
+ return config_sd2v
+ else:
+ return config_sd2
+
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ return config_inpainting
+ if diffusion_model_input.shape[1] == 8:
+ return config_instruct_pix2pix
+
+ if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
+ return config_alt_diffusion
+
+ return config_default
+
+
+def find_checkpoint_config(state_dict, info):
+ if info is None:
+ return guess_model_config_from_state_dict(state_dict, "")
+
+ config = find_checkpoint_config_near_filename(info)
+ if config is not None:
+ return config
+
+ return guess_model_config_from_state_dict(state_dict, info.filename)
+
+
+def find_checkpoint_config_near_filename(info):
+ if info is None:
+ return None
+
+ config = os.path.splitext(info.filename)[0] + ".yaml"
+ if os.path.exists(config):
+ return config
+
+ return None
+
diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 4ce238b8..9b00f76e 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -3,13 +3,12 @@ import safetensors.torch import os import collections from collections import namedtuple -from modules import shared, devices, script_callbacks, sd_models -from modules.paths import models_path +from modules import paths, shared, devices, script_callbacks, sd_models import glob from copy import deepcopy -vae_path = os.path.abspath(os.path.join(models_path, "VAE")) +vae_path = os.path.abspath(os.path.join(paths.models_path, "VAE")) vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"} vae_dict = {} diff --git a/modules/shared.py b/modules/shared.py index 6a0b96cb..474fcc42 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -13,17 +13,19 @@ import modules.interrogate import modules.memmon
import modules.styles
import modules.devices as devices
-from modules import localization, sd_vae, extensions, script_loading, errors, ui_components
-from modules.paths import models_path, script_path, sd_path
+from modules import localization, extensions, script_loading, errors, ui_components, shared_items
+from modules.paths import models_path, script_path, data_path
demo = None
-sd_default_config = os.path.join(script_path, "configs/v1-inference.yaml")
+sd_configs_path = os.path.join(script_path, "configs")
+sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml")
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
+parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",)
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
@@ -34,7 +36,7 @@ parser.add_argument("--no-half", action='store_true', help="do not switch the mo parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
-parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
+parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
@@ -73,16 +75,16 @@ parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for sp parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
-parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
+parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(data_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
-parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
+parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
-parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
+parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
@@ -264,12 +266,6 @@ interrogator = modules.interrogate.InterrogateModels("interrogate") face_restorers = []
-
-def realesrgan_models_names():
- import modules.realesrgan_model
- return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
-
-
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None):
self.default = default
@@ -360,7 +356,7 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
- "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
+ "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
}))
@@ -397,7 +393,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
- "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": ["Automatic", "None"] + list(sd_vae.vae_dict)}, refresh=sd_vae.refresh_vae_list),
+ "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list),
"sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
@@ -483,7 +479,8 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" }))
options_templates.update(options_section(('postprocessing', "Postprocessing"), {
- 'postprocessing_scipts_order': OptionInfo("upscale, gfpgan, codeformer", "Postprocessing operation order"),
+ 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
}))
diff --git a/modules/shared_items.py b/modules/shared_items.py new file mode 100644 index 00000000..8b5ec96d --- /dev/null +++ b/modules/shared_items.py @@ -0,0 +1,23 @@ +
+
+def realesrgan_models_names():
+ import modules.realesrgan_model
+ return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
+
+
+def postprocessing_scripts():
+ import modules.scripts
+
+ return modules.scripts.scripts_postproc.scripts
+
+
+def sd_vae_items():
+ import modules.sd_vae
+
+ return ["Automatic", "None"] + list(modules.sd_vae.vae_dict)
+
+
+def refresh_vae_list():
+ import modules.sd_vae
+
+ return modules.sd_vae.refresh_vae_list
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index c0ac11d3..2239cb84 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -6,8 +6,7 @@ import sys import tqdm
import time
-from modules import shared, images, deepbooru
-from modules.paths import models_path
+from modules import paths, shared, images, deepbooru
from modules.shared import opts, cmd_opts
from modules.textual_inversion import autocrop
@@ -199,7 +198,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre dnn_model_path = None
try:
- dnn_model_path = autocrop.download_and_cache_models(os.path.join(models_path, "opencv"))
+ dnn_model_path = autocrop.download_and_cache_models(os.path.join(paths.models_path, "opencv"))
except Exception as e:
print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
diff --git a/modules/timer.py b/modules/timer.py new file mode 100644 index 00000000..57a4f17a --- /dev/null +++ b/modules/timer.py @@ -0,0 +1,35 @@ +import time
+
+
+class Timer:
+ def __init__(self):
+ self.start = time.time()
+ self.records = {}
+ self.total = 0
+
+ def elapsed(self):
+ end = time.time()
+ res = end - self.start
+ self.start = end
+ return res
+
+ def record(self, category, extra_time=0):
+ e = self.elapsed()
+ if category not in self.records:
+ self.records[category] = 0
+
+ self.records[category] += e + extra_time
+ self.total += e + extra_time
+
+ def summary(self):
+ res = f"{self.total:.1f}s"
+
+ additions = [x for x in self.records.items() if x[1] >= 0.1]
+ if not additions:
+ return res
+
+ res += " ("
+ res += ", ".join([f"{category}: {time_taken:.1f}s" for category, time_taken in additions])
+ res += ")"
+
+ return res
diff --git a/modules/ui.py b/modules/ui.py index 85ae62c7..9f4cfda1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -21,7 +21,7 @@ from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_grad from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
-from modules.paths import script_path
+from modules.paths import script_path, data_path
from modules.shared import opts, cmd_opts, restricted_opts
@@ -91,6 +91,7 @@ save_style_symbol = '\U0001f4be' # 💾 apply_style_symbol = '\U0001f4cb' # 📋
clear_prompt_symbol = '\U0001F5D1' # 🗑️
extra_networks_symbol = '\U0001F3B4' # 🎴
+switch_values_symbol = '\U000021C5' # ⇅
def plaintext_to_html(text):
@@ -466,6 +467,7 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
if opts.dimensions_and_batch_together:
+ res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn")
with gr.Column(elem_id="txt2img_column_batch"):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
@@ -567,6 +569,8 @@ def create_ui(): txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
+ res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height])
+
txt_prompt_img.change(
fn=modules.images.image_data,
inputs=[
@@ -691,9 +695,15 @@ def create_ui(): with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
- gr.HTML(f"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.<br>Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}</p>")
+ gr.HTML(
+ f"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
+ f"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
+ f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
+ f"{hidden}</p>"
+ )
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
+ img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
def copy_image(img):
if isinstance(img, dict) and 'image' in img:
@@ -728,6 +738,7 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
if opts.dimensions_and_batch_together:
+ res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
with gr.Column(elem_id="img2img_column_batch"):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
@@ -838,6 +849,7 @@ def create_ui(): inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
+ img2img_batch_inpaint_mask_dir
] + custom_inputs,
outputs=[
img2img_gallery,
@@ -865,6 +877,7 @@ def create_ui(): img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
+ res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height])
img2img_interrogate.click(
fn=lambda *args: process_interrogate(interrogate, *args),
@@ -1497,8 +1510,8 @@ def create_ui(): with open(cssfile, "r", encoding="utf8") as file:
css += file.read() + "\n"
- if os.path.exists(os.path.join(script_path, "user.css")):
- with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
+ if os.path.exists(os.path.join(data_path, "user.css")):
+ with open(os.path.join(data_path, "user.css"), "r", encoding="utf8") as file:
css += file.read() + "\n"
if not cmd_opts.no_progressbar_hiding:
diff --git a/modules/ui_components.py b/modules/ui_components.py index 9aec3097..284ca0cf 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -48,3 +48,11 @@ class FormColorPicker(gr.ColorPicker, gr.components.FormComponent): def get_block_name(self):
return "colorpicker"
+
+class DropdownMulti(gr.Dropdown):
+ """Same as gr.Dropdown but always multiselect"""
+ def __init__(self, **kwargs):
+ super().__init__(multiselect=True, **kwargs)
+
+ def get_block_name(self):
+ return "dropdown"
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 742e745e..66a41865 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -132,7 +132,7 @@ def install_extension_from_url(dirname, url): normalized_url = normalize_git_url(url)
assert len([x for x in extensions.extensions if normalize_git_url(x.remote) == normalized_url]) == 0, 'Extension with this URL is already installed'
- tmpdir = os.path.join(paths.script_path, "tmp", dirname)
+ tmpdir = os.path.join(paths.data_path, "tmp", dirname)
try:
shutil.rmtree(tmpdir, True)
diff --git a/modules/upscaler.py b/modules/upscaler.py index a5bf5acb..e2eaa730 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -11,7 +11,6 @@ from modules import modelloader, shared LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST) -from modules.paths import models_path class Upscaler: @@ -39,7 +38,7 @@ class Upscaler: self.mod_scale = None if self.model_path is None and self.name: - self.model_path = os.path.join(models_path, self.name) + self.model_path = os.path.join(shared.models_path, self.name) if self.model_path and create_dirs: os.makedirs(self.model_path, exist_ok=True) @@ -143,4 +142,4 @@ class UpscalerNearest(Upscaler): def __init__(self, dirname=None): super().__init__(False) self.name = "Nearest" - self.scalers = [UpscalerData("Nearest", None, self)]
\ No newline at end of file + self.scalers = [UpscalerData("Nearest", None, self)] diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 095d29b2..8842bd91 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -104,3 +104,28 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): def image_changed(self):
upscale_cache.clear()
+
+
+class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
+ name = "Simple Upscale"
+ order = 900
+
+ def ui(self):
+ with FormRow():
+ upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
+ upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2)
+
+ return {
+ "upscale_by": upscale_by,
+ "upscaler_name": upscaler_name,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
+ if upscaler_name is None or upscaler_name == "None":
+ return
+
+ upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None)
+ assert upscaler1, f'could not find upscaler named {upscaler_name}'
+
+ pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
+ pp.info[f"Postprocess upscaler"] = upscaler1.name
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 828c2d12..f0116055 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -123,7 +123,7 @@ def apply_vae(p, x, xs): def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
- p.styles = x.split(',')
+ p.styles.extend(x.split(','))
def format_value_add_label(p, opt, x):
@@ -499,7 +499,7 @@ class Script(scripts.Script): image_cell_count = p.n_iter * p.batch_size
cell_console_text = f"; {image_cell_count} images per cell" if image_cell_count > 1 else ""
plural_s = 's' if len(zs) > 1 else ''
- print(f"X/Y plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})")
+ print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})")
shared.total_tqdm.updateTotal(total_steps)
grid_infotext = [None]
@@ -533,6 +533,7 @@ class Script(scripts.Script): return Processed(p, [], p.seed, "")
pc = copy(p)
+ pc.styles = pc.styles[:]
x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys)
z_opt.apply(pc, z, zs)
@@ -164,7 +164,7 @@ min-height: 3.2em;
}
-#txt2img_styles ul, #img2img_styles ul{
+ul.list-none{
max-height: 35em;
z-index: 2000;
}
@@ -714,9 +714,6 @@ footer { white-space: nowrap;
min-width: auto;
}
-#txt2img_hires_fix{
- margin-left: -0.8em;
-}
#img2img_copy_to_img2img, #img2img_copy_to_sketch, #img2img_copy_to_inpaint, #img2img_copy_to_inpaint_sketch{
margin-left: 0em;
@@ -744,7 +741,6 @@ footer { .dark .gr-compact{
background-color: rgb(31 41 55 / var(--tw-bg-opacity));
- margin-left: 0.8em;
}
.gr-compact{
@@ -15,7 +15,6 @@ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not from modules import import_hook, errors, extra_networks
from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion
from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call
-from modules.paths import script_path
import torch
|