aboutsummaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/prompt_matrix.py19
-rw-r--r--scripts/sd_upscale.py15
-rw-r--r--scripts/xy_grid.py46
3 files changed, 62 insertions, 18 deletions
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index 4d1e152d..4c79eaef 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -18,7 +18,7 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell):
ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]
hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]
- first_pocessed = None
+ first_processed = None
state.job_count = len(xs) * len(ys)
@@ -27,17 +27,17 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
- if first_pocessed is None:
- first_pocessed = processed
+ if first_processed is None:
+ first_processed = processed
res.append(processed.images[0])
grid = images.image_grid(res, rows=len(ys))
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
- first_pocessed.images = [grid]
+ first_processed.images = [grid]
- return first_pocessed
+ return first_processed
class Script(scripts.Script):
@@ -46,10 +46,11 @@ class Script(scripts.Script):
def ui(self, is_img2img):
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False)
+ different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False)
- return [put_at_start]
+ return [put_at_start, different_seeds]
- def run(self, p, put_at_start):
+ def run(self, p, put_at_start, different_seeds):
modules.processing.fix_seed(p)
original_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
@@ -73,7 +74,7 @@ class Script(scripts.Script):
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
p.prompt = all_prompts
- p.seed = [p.seed for _ in all_prompts]
+ p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]
p.prompt_for_display = original_prompt
processed = process_images(p)
@@ -84,6 +85,6 @@ class Script(scripts.Script):
processed.infotexts.insert(0, processed.infotexts[0])
if opts.grid_save:
- images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", prompt=original_prompt, seed=processed.seed, grid=True, p=p)
+ images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", extension=opts.grid_format, prompt=original_prompt, seed=processed.seed, grid=True, p=p)
return processed
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 01074291..28bd96b3 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -17,13 +17,14 @@ class Script(scripts.Script):
return is_img2img
def ui(self, is_img2img):
- info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image to twice the dimensions; use width and height sliders to set tile size</p>")
+ info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image by the selected scale factor; use width and height sliders to set tile size</p>")
overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64)
+ scale_factor = gr.Slider(minimum=1, maximum=4, step=1, label='Scale Factor', value=2)
upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
- return [info, overlap, upscaler_index]
+ return [info, overlap, upscaler_index, scale_factor]
- def run(self, p, _, overlap, upscaler_index):
+ def run(self, p, _, overlap, upscaler_index, scale_factor):
processing.fix_seed(p)
upscaler = shared.sd_upscalers[upscaler_index]
@@ -34,9 +35,9 @@ class Script(scripts.Script):
seed = p.seed
init_img = p.init_images[0]
-
- if(upscaler.name != "None"):
- img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path)
+
+ if (upscaler.name != "None"):
+ img = upscaler.scaler.upscale(init_img, scale_factor, upscaler.data_path)
else:
img = init_img
@@ -69,7 +70,7 @@ class Script(scripts.Script):
work_results = []
for i in range(batch_count):
p.batch_size = batch_size
- p.init_images = work[i*batch_size:(i+1)*batch_size]
+ p.init_images = work[i * batch_size:(i + 1) * batch_size]
state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}"
processed = processing.process_images(p)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 0f27deda..3e0b2805 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -10,13 +10,16 @@ import numpy as np
import modules.scripts as scripts
import gradio as gr
-from modules import images, sd_samplers
+from modules import images, paths, sd_samplers
from modules.hypernetworks import hypernetwork
from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
+import modules.sd_vae
+import glob
+import os
import re
@@ -114,6 +117,38 @@ def apply_clip_skip(p, x, xs):
opts.data["CLIP_stop_at_last_layers"] = x
+def apply_upscale_latent_space(p, x, xs):
+ if x.lower().strip() != '0':
+ opts.data["use_scale_latent_for_hires_fix"] = True
+ else:
+ opts.data["use_scale_latent_for_hires_fix"] = False
+
+
+def find_vae(name: str):
+ if name.lower() in ['auto', 'none']:
+ return name
+ else:
+ vae_path = os.path.abspath(os.path.join(paths.models_path, 'VAE'))
+ found = glob.glob(os.path.join(vae_path, f'**/{name}.*pt'), recursive=True)
+ if found:
+ return found[0]
+ else:
+ return 'auto'
+
+
+def apply_vae(p, x, xs):
+ if x.lower().strip() == 'none':
+ modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file='None')
+ else:
+ found = find_vae(x)
+ if found:
+ v = modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=found)
+
+
+def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
+ p.styles = x.split(',')
+
+
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
@@ -167,7 +202,10 @@ axis_options = [
AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None),
AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None),
AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None),
+ AxisOption("Upscale latent space for hires.", str, apply_upscale_latent_space, format_value_add_label, None),
AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None),
+ AxisOption("VAE", str, apply_vae, format_value_add_label, None),
+ AxisOption("Styles", str, apply_styles, format_value_add_label, None),
]
@@ -229,14 +267,18 @@ class SharedSettingsStackHelper(object):
self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
self.hypernetwork = opts.sd_hypernetwork
self.model = shared.sd_model
+ self.use_scale_latent_for_hires_fix = opts.use_scale_latent_for_hires_fix
+ self.vae = opts.sd_vae
def __exit__(self, exc_type, exc_value, tb):
modules.sd_models.reload_model_weights(self.model)
+ modules.sd_vae.reload_vae_weights(self.model, vae_file=find_vae(self.vae))
hypernetwork.load_hypernetwork(self.hypernetwork)
hypernetwork.apply_strength()
opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
+ opts.data["use_scale_latent_for_hires_fix"] = self.use_scale_latent_for_hires_fix
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
@@ -383,6 +425,6 @@ class Script(scripts.Script):
)
if opts.grid_save:
- images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
+ images.save_image(processed.images[0], p.outpath_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
return processed