aboutsummaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/api/api.py5
-rw-r--r--modules/api/models.py2
-rw-r--r--modules/codeformer_model.py2
-rw-r--r--modules/devices.py25
-rw-r--r--modules/extensions.py4
-rw-r--r--modules/extra_networks_hypernet.py8
-rw-r--r--modules/extras.py21
-rw-r--r--modules/generation_parameters_copypaste.py4
-rw-r--r--modules/gfpgan_model.py5
-rw-r--r--modules/hashes.py4
-rw-r--r--modules/img2img.py22
-rw-r--r--modules/interrogate.py2
-rw-r--r--modules/paths.py10
-rw-r--r--modules/processing.py33
-rw-r--r--modules/realesrgan_model.py2
-rw-r--r--modules/script_loading.py10
-rw-r--r--modules/scripts.py32
-rw-r--r--modules/scripts_auto_postprocessing.py42
-rw-r--r--modules/scripts_postprocessing.py11
-rw-r--r--modules/sd_hijack.py4
-rw-r--r--modules/sd_hijack_inpainting.py9
-rw-r--r--modules/sd_hijack_ip2p.py4
-rw-r--r--modules/sd_hijack_unet.py8
-rw-r--r--modules/sd_hijack_utils.py2
-rw-r--r--modules/sd_models.py242
-rw-r--r--modules/sd_models_config.py112
-rw-r--r--modules/sd_samplers.py2
-rw-r--r--modules/sd_vae.py5
-rw-r--r--modules/shared.py34
-rw-r--r--modules/shared_items.py23
-rw-r--r--modules/textual_inversion/preprocess.py5
-rw-r--r--modules/textual_inversion/textual_inversion.py2
-rw-r--r--modules/timer.py35
-rw-r--r--modules/ui.py25
-rw-r--r--modules/ui_components.py8
-rw-r--r--modules/ui_extensions.py30
-rw-r--r--modules/upscaler.py5
37 files changed, 562 insertions, 237 deletions
diff --git a/modules/api/api.py b/modules/api/api.py
index 25c65e57..eb7b1da5 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -18,7 +18,8 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_
from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image
-from modules.sd_models import checkpoints_list, find_checkpoint_config
+from modules.sd_models import checkpoints_list
+from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
from typing import List
@@ -387,7 +388,7 @@ class Api:
]
def get_sd_models(self):
- return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config(x)} for x in checkpoints_list.values()]
+ return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
diff --git a/modules/api/models.py b/modules/api/models.py
index 805bd8f7..cba43d3b 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -228,7 +228,7 @@ class SDModelItem(BaseModel):
hash: Optional[str] = Field(title="Short hash")
sha256: Optional[str] = Field(title="sha256 hash")
filename: str = Field(title="Filename")
- config: str = Field(title="Config file")
+ config: Optional[str] = Field(title="Config file")
class HypernetworkItem(BaseModel):
name: str = Field(title="Name")
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index ab40d842..01fb7bd8 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -8,7 +8,7 @@ import torch
import modules.face_restoration
import modules.shared
from modules import shared, devices, modelloader
-from modules.paths import script_path, models_path
+from modules.paths import models_path
# codeformer people made a choice to include modified basicsr library to their project which makes
# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN.
diff --git a/modules/devices.py b/modules/devices.py
index 6b36622c..655ca1d3 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -34,14 +34,18 @@ def get_cuda_device_string():
return "cuda"
-def get_optimal_device():
+def get_optimal_device_name():
if torch.cuda.is_available():
- return torch.device(get_cuda_device_string())
+ return get_cuda_device_string()
if has_mps():
- return torch.device("mps")
+ return "mps"
+
+ return "cpu"
- return cpu
+
+def get_optimal_device():
+ return torch.device(get_optimal_device_name())
def get_device_for(task):
@@ -83,6 +87,14 @@ dtype_unet = torch.float16
unet_needs_upcast = False
+def cond_cast_unet(input):
+ return input.to(dtype_unet) if unet_needs_upcast else input
+
+
+def cond_cast_float(input):
+ return input.float() if unet_needs_upcast else input
+
+
def randn(seed, shape):
torch.manual_seed(seed)
if device.type == 'mps':
@@ -139,6 +151,8 @@ def test_for_nans(x, where):
else:
message = "A tensor with all NaNs was produced."
+ message += " Use --disable-nan-check commandline argument to disable this check."
+
raise NansException(message)
@@ -193,6 +207,3 @@ if has_mps():
cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0))
torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
- orig_narrow = torch.narrow
- torch.narrow = lambda *args, **kwargs: ( orig_narrow(*args, **kwargs).clone() )
-
diff --git a/modules/extensions.py b/modules/extensions.py
index b522125c..5e12b1aa 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -7,9 +7,11 @@ import git
from modules import paths, shared
extensions = []
-extensions_dir = os.path.join(paths.script_path, "extensions")
+extensions_dir = os.path.join(paths.data_path, "extensions")
extensions_builtin_dir = os.path.join(paths.script_path, "extensions-builtin")
+if not os.path.exists(extensions_dir):
+ os.makedirs(extensions_dir)
def active():
return [x for x in extensions if x.enabled]
diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py
index ff279a1f..d3a4d7ad 100644
--- a/modules/extra_networks_hypernet.py
+++ b/modules/extra_networks_hypernet.py
@@ -1,4 +1,4 @@
-from modules import extra_networks
+from modules import extra_networks, shared, extra_networks
from modules.hypernetworks import hypernetwork
@@ -7,6 +7,12 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
super().__init__('hypernet')
def activate(self, p, params_list):
+ additional = shared.opts.sd_hypernetwork
+
+ if additional != "" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0:
+ p.all_prompts = [x + f"<hypernet:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
+ params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
+
names = []
multipliers = []
for params in params_list:
diff --git a/modules/extras.py b/modules/extras.py
index 36123aa5..d8ece955 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -6,7 +6,7 @@ import shutil
import torch
import tqdm
-from modules import shared, images, sd_models, sd_vae
+from modules import shared, images, sd_models, sd_vae, sd_models_config
from modules.ui_common import plaintext_to_html
import gradio as gr
import safetensors.torch
@@ -37,7 +37,7 @@ def run_pnginfo(image):
def create_config(ckpt_result, config_source, a, b, c):
def config(x):
- res = sd_models.find_checkpoint_config(x) if x else None
+ res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None
return res if res != shared.sd_default_config else None
if config_source == 0:
@@ -132,6 +132,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
tertiary_model_info = sd_models.checkpoints_list[tertiary_model_name] if theta_func1 else None
result_is_inpainting_model = False
+ result_is_instruct_pix2pix_model = False
if theta_func2:
shared.state.textinfo = f"Loading B"
@@ -185,14 +186,19 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
if a.shape != b.shape and a.shape[0:1] + a.shape[2:] == b.shape[0:1] + b.shape[2:]:
if a.shape[1] == 4 and b.shape[1] == 9:
raise RuntimeError("When merging inpainting model with a normal one, A must be the inpainting model.")
+ if a.shape[1] == 4 and b.shape[1] == 8:
+ raise RuntimeError("When merging instruct-pix2pix model with a normal one, A must be the instruct-pix2pix model.")
- assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}"
-
- theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
- result_is_inpainting_model = True
+ if a.shape[1] == 8 and b.shape[1] == 4:#If we have an Instruct-Pix2Pix model...
+ theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)#Merge only the vectors the models have in common. Otherwise we get an error due to dimension mismatch.
+ result_is_instruct_pix2pix_model = True
+ else:
+ assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}"
+ theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
+ result_is_inpainting_model = True
else:
theta_0[key] = theta_func2(a, b, multiplier)
-
+
theta_0[key] = to_half(theta_0[key], save_as_half)
shared.state.sampling_step += 1
@@ -226,6 +232,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
filename = filename_generator() if custom_name == '' else custom_name
filename += ".inpainting" if result_is_inpainting_model else ""
+ filename += ".instruct-pix2pix" if result_is_instruct_pix2pix_model else ""
filename += "." + checkpoint_format
output_modelname = os.path.join(ckpt_dir, filename)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 53f1a865..3c098e0d 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -6,7 +6,7 @@ import re
from pathlib import Path
import gradio as gr
-from modules.shared import script_path
+from modules.paths import data_path
from modules import shared, ui_tempdir, script_callbacks
import tempfile
from PIL import Image
@@ -289,7 +289,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
def connect_paste(button, paste_fields, input_comp, jsfunc=None):
def paste_func(prompt):
if not prompt and not shared.cmd_opts.hide_ui_dir_config:
- filename = os.path.join(script_path, "params.txt")
+ filename = os.path.join(data_path, "params.txt")
if os.path.exists(filename):
with open(filename, "r", encoding="utf8") as file:
prompt = file.read()
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index 1e2dbc32..fbe6215a 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -6,12 +6,11 @@ import facexlib
import gfpgan
import modules.face_restoration
-from modules import shared, devices, modelloader
-from modules.paths import models_path
+from modules import paths, shared, devices, modelloader
model_dir = "GFPGAN"
user_path = None
-model_path = os.path.join(models_path, model_dir)
+model_path = os.path.join(paths.models_path, model_dir)
model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
have_gfpgan = False
loaded_gfpgan_model = None
diff --git a/modules/hashes.py b/modules/hashes.py
index b85a7580..819362a3 100644
--- a/modules/hashes.py
+++ b/modules/hashes.py
@@ -4,8 +4,10 @@ import os.path
import filelock
+from modules.paths import data_path
-cache_filename = "cache.json"
+
+cache_filename = os.path.join(data_path, "cache.json")
cache_data = None
diff --git a/modules/img2img.py b/modules/img2img.py
index 2168c8e2..3ecb6146 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -16,11 +16,18 @@ import modules.images as images
import modules.scripts
-def process_batch(p, input_dir, output_dir, args):
+def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processing.fix_seed(p)
images = shared.listfiles(input_dir)
+ is_inpaint_batch = False
+ if inpaint_mask_dir:
+ inpaint_masks = shared.listfiles(inpaint_mask_dir)
+ is_inpaint_batch = len(inpaint_masks) > 0
+ if is_inpaint_batch:
+ print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
+
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
save_normally = output_dir == ''
@@ -43,6 +50,15 @@ def process_batch(p, input_dir, output_dir, args):
img = ImageOps.exif_transpose(img)
p.init_images = [img] * p.batch_size
+ if is_inpaint_batch:
+ # try to find corresponding mask for an image using simple filename matching
+ mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
+ # if not found use first one ("same mask for all images" use-case)
+ if not mask_image_path in inpaint_masks:
+ mask_image_path = inpaint_masks[0]
+ mask_image = Image.open(mask_image_path)
+ p.image_mask = mask_image
+
proc = modules.scripts.scripts_img2img.run(p, *args)
if proc is None:
proc = process_images(p)
@@ -59,7 +75,7 @@ def process_batch(p, input_dir, output_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, *args):
is_batch = mode == 5
if mode == 0: # img2img
@@ -139,7 +155,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
- process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)
+ process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)
processed = Processed(p, [], p.seed, "")
else:
diff --git a/modules/interrogate.py b/modules/interrogate.py
index c72ff694..cbb80683 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -12,7 +12,7 @@ from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import modules.shared as shared
-from modules import devices, paths, lowvram, modelloader, errors
+from modules import devices, paths, shared, lowvram, modelloader, errors
blip_image_eval_size = 384
clip_model_name = 'ViT-L/14'
diff --git a/modules/paths.py b/modules/paths.py
index 20b3e4d8..d991cc71 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -4,7 +4,15 @@ import sys
import modules.safe
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-models_path = os.path.join(script_path, "models")
+
+# Parse the --data-dir flag first so we can use it as a base for our other argument default values
+parser = argparse.ArgumentParser(add_help=False)
+parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",)
+cmd_opts_pre = parser.parse_known_args()[0]
+data_path = cmd_opts_pre.data_dir
+models_path = os.path.join(data_path, "models")
+
+# data_path = cmd_opts_pre.data
sys.path.insert(0, script_path)
# search for directory of stable diffusion in following places
diff --git a/modules/processing.py b/modules/processing.py
index 9e5a2f38..afab6790 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -13,10 +13,11 @@ from skimage import exposure
from typing import Any, Dict, List, Optional
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
+import modules.paths as paths
import modules.face_restoration
import modules.images as images
import modules.styles
@@ -172,8 +173,7 @@ class StableDiffusionProcessing:
midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image.to(devices.dtype_unet) if devices.unet_needs_upcast else source_image))
- conditioning_image = conditioning_image.float() if devices.unet_needs_upcast else conditioning_image
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
conditioning = torch.nn.functional.interpolate(
self.sd_model.depth_model(midas_in),
size=conditioning_image.shape[2:],
@@ -185,7 +185,12 @@ class StableDiffusionProcessing:
conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
return conditioning
- def inpainting_image_conditioning(self, source_image, latent_image, image_mask = None):
+ def edit_image_conditioning(self, source_image):
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
+
+ return conditioning_image
+
+ def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
self.is_using_inpainting_conditioning = True
# Handle the different mask inputs
@@ -212,7 +217,7 @@ class StableDiffusionProcessing:
)
# Encode the new masked image using first stage of network.
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image.to(devices.dtype_unet) if devices.unet_needs_upcast else conditioning_image))
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
# Create the concatenated conditioning tensor to be fed to `c_concat`
conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
@@ -223,13 +228,18 @@ class StableDiffusionProcessing:
return image_conditioning
def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
+ source_image = devices.cond_cast_float(source_image)
+
# HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
# identify itself with a field common to all models. The conditioning_key is also hybrid.
if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
- return self.depth2img_image_conditioning(source_image.float() if devices.unet_needs_upcast else source_image)
+ return self.depth2img_image_conditioning(source_image)
+
+ if self.sd_model.cond_stage_key == "edit":
+ return self.edit_image_conditioning(source_image)
if self.sampler.conditioning_key in {'hybrid', 'concat'}:
- return self.inpainting_image_conditioning(source_image.float() if devices.unet_needs_upcast else source_image, latent_image, image_mask=image_mask)
+ return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
# Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
@@ -576,7 +586,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if not p.disable_extra_networks:
extra_networks.activate(p, extra_network_data)
- with open(os.path.join(shared.script_path, "params.txt"), "w", encoding="utf8") as file:
+ with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0))
@@ -650,6 +660,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
image = Image.fromarray(x_sample)
+ if p.scripts is not None:
+ pp = scripts.PostprocessImageArgs(image)
+ p.scripts.postprocess_image(p, pp)
+ image = pp.image
+
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
@@ -993,7 +1008,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image = torch.from_numpy(batch_images)
image = 2. * image - 1.
- image = image.to(device=shared.device, dtype=devices.dtype_unet if devices.unet_needs_upcast else None)
+ image = image.to(shared.device)
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index 47f70251..aad4a629 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -46,7 +46,7 @@ class UpscalerRealESRGAN(Upscaler):
scale=info.scale,
model_path=info.local_data_path,
model=info.model(),
- half=not cmd_opts.no_half,
+ half=not cmd_opts.no_half and not cmd_opts.upcast_sampling,
tile=opts.ESRGAN_tile,
tile_pad=opts.ESRGAN_tile_overlap,
)
diff --git a/modules/script_loading.py b/modules/script_loading.py
index f93f0951..a7d2203f 100644
--- a/modules/script_loading.py
+++ b/modules/script_loading.py
@@ -1,16 +1,14 @@
import os
import sys
import traceback
+import importlib.util
from types import ModuleType
def load_module(path):
- with open(path, "r", encoding="utf8") as file:
- text = file.read()
-
- compiled = compile(text, path, 'exec')
- module = ModuleType(os.path.basename(path))
- exec(compiled, module.__dict__)
+ module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path)
+ module = importlib.util.module_from_spec(module_spec)
+ module_spec.loader.exec_module(module)
return module
diff --git a/modules/scripts.py b/modules/scripts.py
index eefdfdd4..24056a12 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -6,12 +6,16 @@ from collections import namedtuple
import gradio as gr
-from modules.processing import StableDiffusionProcessing
from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing
AlwaysVisible = object()
+class PostprocessImageArgs:
+ def __init__(self, image):
+ self.image = image
+
+
class Script:
filename = None
args_from = None
@@ -65,7 +69,7 @@ class Script:
args contains all values returned by components from ui()
"""
- raise NotImplementedError()
+ pass
def process(self, p, *args):
"""
@@ -100,6 +104,13 @@ class Script:
pass
+ def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
+ """
+ Called for every image after it has been generated.
+ """
+
+ pass
+
def postprocess(self, p, processed, *args):
"""
This function is called after processing ends for AlwaysVisible scripts.
@@ -247,11 +258,15 @@ class ScriptRunner:
self.infotext_fields = []
def initialize_scripts(self, is_img2img):
+ from modules import scripts_auto_postprocessing
+
self.scripts.clear()
self.alwayson_scripts.clear()
self.selectable_scripts.clear()
- for script_class, path, basedir, script_module in scripts_data:
+ auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
+
+ for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data:
script = script_class()
script.filename = path
script.is_txt2img = not is_img2img
@@ -346,7 +361,7 @@ class ScriptRunner:
return inputs
- def run(self, p: StableDiffusionProcessing, *args):
+ def run(self, p, *args):
script_index = args[0]
if script_index == 0:
@@ -400,6 +415,15 @@ class ScriptRunner:
print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ def postprocess_image(self, p, pp: PostprocessImageArgs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.postprocess_image(p, pp, *script_args)
+ except Exception: