aboutsummaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2022-10-06 17:30:29 +0000
committerGitHub <noreply@github.com>2022-10-06 17:30:29 +0000
commitab4ddbf333eef170804ef8de67001f77c8fdd64c (patch)
tree21cb1109f8eae463aa4066eec0926cd71ab81740 /scripts
parent2a7f48cdb8dcf9acb02610cccae0d1ee5d260bc2 (diff)
parentcf7c784fcc0c84a8a4edd8d3aca4dda4c7025c43 (diff)
downloadstable-diffusion-webui-gfx803-ab4ddbf333eef170804ef8de67001f77c8fdd64c.tar.gz
stable-diffusion-webui-gfx803-ab4ddbf333eef170804ef8de67001f77c8fdd64c.tar.bz2
stable-diffusion-webui-gfx803-ab4ddbf333eef170804ef8de67001f77c8fdd64c.zip
Merge branch 'master' into gallery-styling
Diffstat (limited to 'scripts')
-rw-r--r--scripts/img2imgalt.py3
-rw-r--r--scripts/outpainting_mk_2.py45
-rw-r--r--scripts/sd_upscale.py6
-rw-r--r--scripts/xy_grid.py46
4 files changed, 54 insertions, 46 deletions
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index 0ef137f7..f9894cb0 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -8,7 +8,6 @@ import gradio as gr
from modules import processing, shared, sd_samplers, prompt_parser
from modules.processing import Processed
-from modules.sd_samplers import samplers
from modules.shared import opts, cmd_opts, state
import torch
@@ -159,7 +158,7 @@ class Script(scripts.Script):
combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
- sampler = samplers[p.sampler_index].constructor(p.sd_model)
+ sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model)
sigmas = sampler.model_wrap.get_sigmas(p.steps)
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index 9719bb8f..a6468e09 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -11,46 +11,8 @@ from modules import images, processing, devices
from modules.processing import Processed, process_images
from modules.shared import opts, cmd_opts, state
-# https://github.com/parlance-zz/g-diffuser-bot
-def expand(x, dir, amount, power=0.75):
- is_left = dir == 3
- is_right = dir == 1
- is_up = dir == 0
- is_down = dir == 2
-
- if is_left or is_right:
- noise = np.zeros((x.shape[0], amount, 3), dtype=float)
- indexes = np.random.random((x.shape[0], amount)) ** power * (1 - np.arange(amount) / amount)
- if is_right:
- indexes = 1 - indexes
- indexes = (indexes * (x.shape[1] - 1)).astype(int)
-
- for row in range(x.shape[0]):
- if is_left:
- noise[row] = x[row][indexes[row]]
- else:
- noise[row] = np.flip(x[row][indexes[row]], axis=0)
-
- x = np.concatenate([noise, x] if is_left else [x, noise], axis=1)
- return x
-
- if is_up or is_down:
- noise = np.zeros((amount, x.shape[1], 3), dtype=float)
- indexes = np.random.random((x.shape[1], amount)) ** power * (1 - np.arange(amount) / amount)
- if is_down:
- indexes = 1 - indexes
- indexes = (indexes * x.shape[0] - 1).astype(int)
-
- for row in range(x.shape[1]):
- if is_up:
- noise[:, row] = x[:, row][indexes[row]]
- else:
- noise[:, row] = np.flip(x[:, row][indexes[row]], axis=0)
-
- x = np.concatenate([noise, x] if is_up else [x, noise], axis=0)
- return x
-
+# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.05):
# helper fft routines that keep ortho normalization and auto-shift before and after fft
def _fft2(data):
@@ -123,8 +85,11 @@ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.0
src_dist = np.absolute(src_fft)
src_phase = src_fft / src_dist
+ # create a generator with a static seed to make outpainting deterministic / only follow global seed
+ rng = np.random.default_rng(0)
+
noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise
- noise_rgb = np.random.random_sample((width, height, num_channels))
+ noise_rgb = rng.random((width, height, num_channels))
noise_grey = (np.sum(noise_rgb, axis=2) / 3.)
noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
for c in range(num_channels):
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 2653e2d4..cb37ff7e 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -34,7 +34,11 @@ class Script(scripts.Script):
seed = p.seed
init_img = p.init_images[0]
- img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path)
+
+ if(upscaler.name != "None"):
+ img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path)
+ else:
+ img = init_img
devices.torch_gc()
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 146663b0..6344e612 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -1,7 +1,9 @@
from collections import namedtuple
from copy import copy
+from itertools import permutations, chain
import random
-
+import csv
+from io import StringIO
from PIL import Image
import numpy as np
@@ -29,6 +31,31 @@ def apply_prompt(p, x, xs):
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
+def apply_order(p, x, xs):
+ token_order = []
+
+ # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
+ for token in x:
+ token_order.append((p.prompt.find(token), token))
+
+ token_order.sort(key=lambda t: t[0])
+
+ prompt_parts = []
+
+ # Split the prompt up, taking out the tokens
+ for _, token in token_order:
+ n = p.prompt.find(token)
+ prompt_parts.append(p.prompt[0:n])
+ p.prompt = p.prompt[n + len(token):]
+
+ # Rebuild the prompt with the tokens in the order we want
+ prompt_tmp = ""
+ for idx, part in enumerate(prompt_parts):
+ prompt_tmp += part
+ prompt_tmp += x[idx]
+ p.prompt = prompt_tmp + p.prompt
+
+
samplers_dict = {}
for i, sampler in enumerate(modules.sd_samplers.samplers):
samplers_dict[sampler.name.lower()] = i
@@ -60,16 +87,26 @@ def format_value_add_label(p, opt, x):
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
-
return x
+
+def format_value_join_list(p, opt, x):
+ return ", ".join(x)
+
+
def do_nothing(p, x, xs):
pass
+
def format_nothing(p, opt, x):
return ""
+def str_permutations(x):
+ """dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
+ return x
+
+
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"])
@@ -82,6 +119,7 @@ axis_options = [
AxisOption("Steps", int, apply_field("steps"), format_value_add_label),
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label),
AxisOption("Prompt S/R", str, apply_prompt, format_value),
+ AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list),
AxisOption("Sampler", str, apply_sampler, format_value),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
@@ -159,7 +197,7 @@ class Script(scripts.Script):
if opt.label == 'Nothing':
return [0]
- valslist = [x.strip() for x in vals.split(",")]
+ valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
if opt.type == int:
valslist_ext = []
@@ -206,6 +244,8 @@ class Script(scripts.Script):
valslist_ext.append(val)
valslist = valslist_ext
+ elif opt.type == str_permutations:
+ valslist = list(permutations(valslist))
valslist = [opt.type(x) for x in valslist]