From a9e979977a8e3999b01b6a086bb1332ab7ab308b Mon Sep 17 00:00:00 2001 From: Artem Zagidulin Date: Wed, 2 Nov 2022 19:05:01 +0300 Subject: process_one --- modules/scripts.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'modules/scripts.py') diff --git a/modules/scripts.py b/modules/scripts.py index 533db45c..9f82efea 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -70,6 +70,13 @@ class Script: pass + def process_one(self, p, *args): + """ + Same as process(), but called for every iteration + """ + + pass + def postprocess(self, p, processed, *args): """ This function is called after processing ends for AlwaysVisible scripts. @@ -294,6 +301,15 @@ class ScriptRunner: print(f"Error running process: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) + def process_one(self, p): + for script in self.alwayson_scripts: + try: + script_args = p.script_args[script.args_from:script.args_to] + script.process_one(p, *script_args) + except Exception: + print(f"Error running process_one: {script.filename}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + def postprocess(self, p, processed): for script in self.alwayson_scripts: try: -- cgit v1.2.3 From de64146ad2fc2030a4cd3545676f9e18c93b8b18 Mon Sep 17 00:00:00 2001 From: Artem Zagidulin Date: Wed, 2 Nov 2022 21:30:50 +0300 Subject: add number of itter --- modules/processing.py | 2 +- modules/scripts.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'modules/scripts.py') diff --git a/modules/processing.py b/modules/processing.py index 72a2ee4e..17f4a5ec 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -510,7 +510,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: break if p.scripts is not None: - p.scripts.process_one(p) + p.scripts.process_one(p, n) with devices.autocast(): uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps) diff --git a/modules/scripts.py b/modules/scripts.py index 9f82efea..7aa0d56a 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -70,7 +70,7 @@ class Script: pass - def process_one(self, p, *args): + def process_one(self, p, n, *args): """ Same as process(), but called for every iteration """ @@ -301,11 +301,11 @@ class ScriptRunner: print(f"Error running process: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - def process_one(self, p): + def process_one(self, p, n): for script in self.alwayson_scripts: try: script_args = p.script_args[script.args_from:script.args_to] - script.process_one(p, *script_args) + script.process_one(p, n, *script_args) except Exception: print(f"Error running process_one: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) -- cgit v1.2.3 From 4dd898b8c15e342f817d3fb1c8dc9f2d5d111022 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 4 Nov 2022 08:38:11 +0300 Subject: do not mess with components' visibility for scripts; instead create group components and show/hide those; this will break scripts that create invisible components and rely on UI but the earlier i make this change the better --- modules/scripts.py | 34 ++++++++++++++++++---------------- scripts/custom_code.py | 2 +- scripts/outpainting_mk_2.py | 2 +- scripts/poor_mans_outpainting.py | 4 ++-- scripts/prompts_from_file.py | 10 +++++----- scripts/sd_upscale.py | 4 ++-- scripts/xy_grid.py | 8 ++++---- 7 files changed, 33 insertions(+), 31 deletions(-) (limited to 'modules/scripts.py') diff --git a/modules/scripts.py b/modules/scripts.py index 533db45c..28ce07f4 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -18,6 +18,9 @@ class Script: args_to = None alwayson = False + """A gr.Group component that has all script's UI inside it""" + group = None + infotext_fields = None """if set in ui(), this is a list of pairs of gradio component + text; the text will be used when parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example @@ -218,8 +221,6 @@ class ScriptRunner: for control in controls: control.custom_script_source = os.path.basename(script.filename) - if not script.alwayson: - control.visible = False if script.infotext_fields is not None: self.infotext_fields += script.infotext_fields @@ -229,40 +230,41 @@ class ScriptRunner: script.args_to = len(inputs) for script in self.alwayson_scripts: - with gr.Group(): + with gr.Group() as group: create_script_ui(script, inputs, inputs_alwayson) + script.group = group + dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index") dropdown.save_to_config = True inputs[0] = dropdown for script in self.selectable_scripts: - create_script_ui(script, inputs, inputs_alwayson) + with gr.Group(visible=False) as group: + create_script_ui(script, inputs, inputs_alwayson) + + script.group = group def select_script(script_index): - if 0 < script_index <= len(self.selectable_scripts): - script = self.selectable_scripts[script_index-1] - args_from = script.args_from - args_to = script.args_to - else: - args_from = 0 - args_to = 0 + selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None - return [ui.gr_show(True if i == 0 else args_from <= i < args_to or is_alwayson) for i, is_alwayson in enumerate(inputs_alwayson)] + return [gr.update(visible=selected_script == s) for s in self.selectable_scripts] def init_field(title): + """called when an initial value is set from ui-config.json to show script's UI components""" + if title == 'None': return + script_index = self.titles.index(title) - script = self.selectable_scripts[script_index] - for i in range(script.args_from, script.args_to): - inputs[i].visible = True + self.selectable_scripts[script_index].group.visible = True dropdown.init_field = init_field + dropdown.change( fn=select_script, inputs=[dropdown], - outputs=inputs + outputs=[script.group for script in self.selectable_scripts] ) return inputs diff --git a/scripts/custom_code.py b/scripts/custom_code.py index a9b10c09..22e7b77a 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -14,7 +14,7 @@ class Script(scripts.Script): return cmd_opts.allow_code def ui(self, is_img2img): - code = gr.Textbox(label="Python code", visible=False, lines=1) + code = gr.Textbox(label="Python code", lines=1) return [code] diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 2afd4aa5..cf71cb92 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -132,7 +132,7 @@ class Script(scripts.Script): info = gr.HTML("

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

") pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8) direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0) color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05) diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index b0469110..ea45beb0 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -22,8 +22,8 @@ class Script(scripts.Script): return None pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False) - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4) + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index") direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) return [pixels, mask_blur, inpainting_fill, direction] diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index d187cd9c..3388bc77 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -83,13 +83,14 @@ def cmdargs(line): def load_prompt_file(file): - if (file is None): + if file is None: lines = [] else: lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")] return None, "\n".join(lines), gr.update(lines=7) + class Script(scripts.Script): def title(self): return "Prompts from file or textbox" @@ -107,9 +108,9 @@ class Script(scripts.Script): # We don't shrink back to 1, because that causes the control to ignore [enter], and it may # be unclear to the user that shift-enter is needed. prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt]) - return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt] + return [checkbox_iterate, checkbox_iterate_batch, prompt_txt] - def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str): + def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str): lines = [x.strip() for x in prompt_txt.splitlines()] lines = [x for x in lines if len(x) > 0] @@ -157,5 +158,4 @@ class Script(scripts.Script): if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) - - return Processed(p, images, p.seed, "") \ No newline at end of file + return Processed(p, images, p.seed, "") diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index cb37ff7e..01074291 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -18,8 +18,8 @@ class Script(scripts.Script): def ui(self, is_img2img): info = gr.HTML("

Will upscale the image to twice the dimensions; use width and height sliders to set tile size

") - overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False) - upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False) + overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64) + upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") return [info, overlap, upscaler_index] diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index f5255786..417ed0d4 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -263,12 +263,12 @@ class Script(scripts.Script): current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img] with gr.Row(): - x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type") - x_values = gr.Textbox(label="X values", visible=False, lines=1) + x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type") + x_values = gr.Textbox(label="X values", lines=1) with gr.Row(): - y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, visible=False, type="index", elem_id="y_type") - y_values = gr.Textbox(label="Y values", visible=False, lines=1) + y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type") + y_values = gr.Textbox(label="Y values", lines=1) draw_legend = gr.Checkbox(label='Draw legend', value=True) include_lone_images = gr.Checkbox(label='Include Separate Images', value=False) -- cgit v1.2.3 From eeb07330131012c0294afb79165b90270679b9c7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 4 Nov 2022 11:21:40 +0300 Subject: change process_one virtual function for script to process_batch, add extra args and docs --- modules/processing.py | 2 +- modules/scripts.py | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'modules/scripts.py') diff --git a/modules/processing.py b/modules/processing.py index e20d8fc4..03c9143d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -502,7 +502,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: break if p.scripts is not None: - p.scripts.process_one(p, n) + p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) with devices.autocast(): uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps) diff --git a/modules/scripts.py b/modules/scripts.py index 75e47cd2..366c90d7 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -73,9 +73,15 @@ class Script: pass - def process_one(self, p, n, *args): + def process_batch(self, p, *args, **kwargs): """ - Same as process(), but called for every iteration + Same as process(), but called for every batch. + + **kwargs will have those items: + - batch_number - index of current batch, from 0 to number of batches-1 + - prompts - list of prompts for current batch; you can change contents of this list but changing the number of entries will likely break things + - seeds - list of seeds for current batch + - subseeds - list of subseeds for current batch """ pass @@ -303,13 +309,13 @@ class ScriptRunner: print(f"Error running process: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - def process_one(self, p, n): + def process_batch(self, p, **kwargs): for script in self.alwayson_scripts: try: script_args = p.script_args[script.args_from:script.args_to] - script.process_one(p, n, *script_args) + script.process_batch(p, *script_args, **kwargs) except Exception: - print(f"Error running process_one: {script.filename}", file=sys.stderr) + print(f"Error running process_batch: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) def postprocess(self, p, processed): -- cgit v1.2.3 From a2a1a2f7270a865175f64475229838a8d64509ea Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 6 Nov 2022 09:02:25 +0300 Subject: add ability to create extensions that add localizations --- javascript/ui.js | 2 ++ modules/localization.py | 6 ++++++ modules/scripts.py | 1 - modules/shared.py | 2 -- modules/ui.py | 3 +-- webui.py | 9 +++++---- 6 files changed, 14 insertions(+), 9 deletions(-) (limited to 'modules/scripts.py') diff --git a/javascript/ui.js b/javascript/ui.js index 7e116465..95cfd106 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -208,4 +208,6 @@ function update_token_counter(button_id) { function restart_reload(){ document.body.innerHTML='

Reloading...

'; setTimeout(function(){location.reload()},2000) + + return [] } diff --git a/modules/localization.py b/modules/localization.py index b1810cda..f6a6f2fb 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -3,6 +3,7 @@ import os import sys import traceback + localizations = {} @@ -16,6 +17,11 @@ def list_localizations(dirname): localizations[fn] = os.path.join(dirname, file) + from modules import scripts + for file in scripts.list_scripts("localizations", ".json"): + fn, ext = os.path.splitext(file.filename) + localizations[fn] = file.path + def localization_js(current_localization_name): fn = localizations.get(current_localization_name, None) diff --git a/modules/scripts.py b/modules/scripts.py index 366c90d7..637b2329 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -3,7 +3,6 @@ import sys import traceback from collections import namedtuple -import modules.ui as ui import gradio as gr from modules.processing import StableDiffusionProcessing diff --git a/modules/shared.py b/modules/shared.py index 70b998ff..e8bacd3c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -221,8 +221,6 @@ interrogator = modules.interrogate.InterrogateModels("interrogate") face_restorers = [] -localization.list_localizations(cmd_opts.localizations_dir) - def realesrgan_models_names(): import modules.realesrgan_model diff --git a/modules/ui.py b/modules/ui.py index 76ca9b07..23643c22 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1563,11 +1563,10 @@ def create_ui(wrap_gradio_gpu_call): shared.state.need_restart = True restart_gradio.click( - fn=request_restart, + _js='restart_reload', inputs=[], outputs=[], - _js='restart_reload' ) if column is not None: diff --git a/webui.py b/webui.py index a5a520f0..4342a962 100644 --- a/webui.py +++ b/webui.py @@ -10,7 +10,7 @@ from fastapi.middleware.gzip import GZipMiddleware from modules.paths import script_path -from modules import devices, sd_samplers, upscaler, extensions +from modules import devices, sd_samplers, upscaler, extensions, localization import modules.codeformer_model as codeformer import modules.extras import modules.face_restoration @@ -28,9 +28,7 @@ import modules.txt2img import modules.script_callbacks import modules.ui -from modules import devices from modules import modelloader -from modules.paths import script_path from modules.shared import cmd_opts import modules.hypernetworks.hypernetwork @@ -64,6 +62,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): def initialize(): extensions.list_extensions() + localization.list_localizations(cmd_opts.localizations_dir) if cmd_opts.ui_debug_mode: shared.sd_upscalers = upscaler.UpscalerLanczos().scalers @@ -99,7 +98,6 @@ def initialize(): else: print("Running with TLS") - # make the program just exit at ctrl+c without waiting for anything def sigint_handler(sig, frame): print(f'Interrupted with signal {sig} in {frame}') @@ -185,6 +183,9 @@ def webui(): print('Reloading extensions') extensions.list_extensions() + + localization.list_localizations(cmd_opts.localizations_dir) + print('Reloading custom scripts') modules.scripts.reload_scripts() print('Reloading modules: modules.ui') -- cgit v1.2.3 From 893191cab24cc3511135495d6d2c8d81f5ec63a3 Mon Sep 17 00:00:00 2001 From: Tong Zeng Date: Thu, 10 Nov 2022 10:34:03 +0800 Subject: fix a bug in list_files_with_name --- modules/scripts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/scripts.py') diff --git a/modules/scripts.py b/modules/scripts.py index 637b2329..22d8908b 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -140,7 +140,7 @@ def list_files_with_name(filename): continue path = os.path.join(dirpath, filename) - if os.path.isfile(filename): + if os.path.isfile(path): res.append(path) return res -- cgit v1.2.3 From a1a376331c9ecbbee77b86daeaba44587cc56557 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 12 Nov 2022 10:56:06 +0300 Subject: make existing script loading and new preload code use same code for loading modules limit extension preload scripts to just one file named preload.py --- modules/extensions.py | 21 --------------------- modules/script_loading.py | 34 ++++++++++++++++++++++++++++++++++ modules/scripts.py | 46 +++++++++++++++++----------------------------- modules/shared.py | 5 ++--- 4 files changed, 53 insertions(+), 53 deletions(-) create mode 100644 modules/script_loading.py (limited to 'modules/scripts.py') diff --git a/modules/extensions.py b/modules/extensions.py index 544f3580..94ce479a 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,7 +1,6 @@ import os import sys import traceback -from importlib.machinery import SourceFileLoader import git @@ -85,23 +84,3 @@ def list_extensions(): extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions) extensions.append(extension) - -def preload_extensions(parser): - if not os.path.isdir(extensions_dir): - return - - for dirname in sorted(os.listdir(extensions_dir)): - path = os.path.join(extensions_dir, dirname) - if not os.path.isdir(path): - continue - for file in os.listdir(path): - if "preload.py" in file: - full_file = os.path.join(path, file) - print(f"Got preload file: {full_file}") - - try: - ext = SourceFileLoader("preload", full_file).load_module() - parser = ext.preload(parser) - except Exception as e: - print(f"Exception preloading script: {e}") - return parser \ No newline at end of file diff --git a/modules/script_loading.py b/modules/script_loading.py new file mode 100644 index 00000000..f93f0951 --- /dev/null +++ b/modules/script_loading.py @@ -0,0 +1,34 @@ +import os +import sys +import traceback +from types import ModuleType + + +def load_module(path): + with open(path, "r", encoding="utf8") as file: + text = file.read() + + compiled = compile(text, path, 'exec') + module = ModuleType(os.path.basename(path)) + exec(compiled, module.__dict__) + + return module + + +def preload_extensions(extensions_dir, parser): + if not os.path.isdir(extensions_dir): + return + + for dirname in sorted(os.listdir(extensions_dir)): + preload_script = os.path.join(extensions_dir, dirname, "preload.py") + if not os.path.isfile(preload_script): + continue + + try: + module = load_module(preload_script) + if hasattr(module, 'preload'): + module.preload(parser) + + except Exception: + print(f"Error running preload() for {preload_script}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) diff --git a/modules/scripts.py b/modules/scripts.py index 22d8908b..986b1914 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -6,7 +6,7 @@ from collections import namedtuple import gradio as gr from modules.processing import StableDiffusionProcessing -from modules import shared, paths, script_callbacks, extensions +from modules import shared, paths, script_callbacks, extensions, script_loading AlwaysVisible = object() @@ -161,13 +161,7 @@ def load_scripts(): sys.path = [scriptfile.basedir] + sys.path current_basedir = scriptfile.basedir - with open(scriptfile.path, "r", encoding="utf8") as file: - text = file.read() - - from types import ModuleType - compiled = compile(text, scriptfile.path, 'exec') - module = ModuleType(scriptfile.filename) - exec(compiled, module.__dict__) + module = script_loading.load_module(scriptfile.path) for key, script_class in module.__dict__.items(): if type(script_class) == type and issubclass(script_class, Script): @@ -328,27 +322,21 @@ class ScriptRunner: def reload_sources(self, cache): for si, script in list(enumerate(self.scripts)): - with open(script.filename, "r", encoding="utf8") as file: - args_from = script.args_from - args_to = script.args_to - filename = script.filename - text = file.read() - - from types import ModuleType - - module = cache.get(filename, None) - if module is None: - compiled = compile(text, filename, 'exec') - module = ModuleType(script.filename) - exec(compiled, module.__dict__) - cache[filename] = module - - for key, script_class in module.__dict__.items(): - if type(script_class) == type and issubclass(script_class, Script): - self.scripts[si] = script_class() - self.scripts[si].filename = filename - self.scripts[si].args_from = args_from - self.scripts[si].args_to = args_to + args_from = script.args_from + args_to = script.args_to + filename = script.filename + + module = cache.get(filename, None) + if module is None: + module = script_loading.load_module(script.filename) + cache[filename] = module + + for key, script_class in module.__dict__.items(): + if type(script_class) == type and issubclass(script_class, Script): + self.scripts[si] = script_class() + self.scripts[si].filename = filename + self.scripts[si].args_from = args_from + self.scripts[si].args_to = args_to scripts_txt2img = ScriptRunner() diff --git a/modules/shared.py b/modules/shared.py index 17132e42..6936cbe0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -3,7 +3,6 @@ import datetime import json import os import sys -from collections import OrderedDict import time import gradio as gr @@ -15,7 +14,7 @@ import modules.memmon import modules.sd_models import modules.styles import modules.devices as devices -from modules import sd_samplers, sd_models, localization, sd_vae, extensions +from modules import sd_samplers, sd_models, localization, sd_vae, extensions, script_loading from modules.hypernetworks import hypernetwork from modules.paths import models_path, script_path, sd_path @@ -91,7 +90,7 @@ parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requ parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None) parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None) -extensions.preload_extensions(parser) +script_loading.preload_extensions(extensions.extensions_dir, parser) cmd_opts = parser.parse_args() -- cgit v1.2.3 From 3596af07493ab7981ef92074f979eeee8fa624c4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 19 Nov 2022 19:10:17 +0300 Subject: Add API for scripts to add elements anywhere in UI. --- modules/script_callbacks.py | 35 +++++++++++++++++++++++ modules/scripts.py | 69 +++++++++++++++++++++++++++++++++++++++++++-- modules/ui.py | 12 ++++++-- 3 files changed, 111 insertions(+), 5 deletions(-) (limited to 'modules/scripts.py') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index f19e164c..8e22f875 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -61,6 +61,8 @@ callback_map = dict( callbacks_before_image_saved=[], callbacks_image_saved=[], callbacks_cfg_denoiser=[], + callbacks_before_component=[], + callbacks_after_component=[], ) @@ -137,6 +139,22 @@ def cfg_denoiser_callback(params: CFGDenoiserParams): report_exception(c, 'cfg_denoiser_callback') +def before_component_callback(component, **kwargs): + for c in callback_map['callbacks_before_component']: + try: + c.callback(component, **kwargs) + except Exception: + report_exception(c, 'before_component_callback') + + +def after_component_callback(component, **kwargs): + for c in callback_map['callbacks_after_component']: + try: + c.callback(component, **kwargs) + except Exception: + report_exception(c, 'after_component_callback') + + def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if len(stack) > 0 else 'unknown file' @@ -220,3 +238,20 @@ def on_cfg_denoiser(callback): - params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details. """ add_callback(callback_map['callbacks_cfg_denoiser'], callback) + + +def on_before_component(callback): + """register a function to be called before a component is created. + The callback is called with arguments: + - component - gradio component that is about to be created. + - **kwargs - args to gradio.components.IOComponent.__init__ function + + Use elem_id/label fields of kwargs to figure out which component it is. + This can be useful to inject your own components somewhere in the middle of vanilla UI. + """ + add_callback(callback_map['callbacks_before_component'], callback) + + +def on_after_component(callback): + """register a function to be called after a component is created. See on_before_component for more.""" + add_callback(callback_map['callbacks_after_component'], callback) diff --git a/modules/scripts.py b/modules/scripts.py index 986b1914..b934d881 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -17,6 +17,9 @@ class Script: args_to = None alwayson = False + is_txt2img = False + is_img2img = False + """A gr.Group component that has all script's UI inside it""" group = None @@ -93,6 +96,23 @@ class Script: pass + def before_component(self, component, **kwargs): + """ + Called before a component is created. + Use elem_id/label fields of kwargs to figure out which component it is. + This can be useful to inject your own components somewhere in the middle of vanilla UI. + You can return created components in the ui() function to add them to the list of arguments for your processing functions + """ + + pass + + def after_component(self, component, **kwargs): + """ + Called after a component is created. Same as above. + """ + + pass + def describe(self): """unused""" return "" @@ -195,12 +215,18 @@ class ScriptRunner: self.titles = [] self.infotext_fields = [] - def setup_ui(self, is_img2img): + def initialize_scripts(self, is_img2img): + self.scripts.clear() + self.alwayson_scripts.clear() + self.selectable_scripts.clear() + for script_class, path, basedir in scripts_data: script = script_class() script.filename = path + script.is_txt2img = not is_img2img + script.is_img2img = is_img2img - visibility = script.show(is_img2img) + visibility = script.show(script.is_img2img) if visibility == AlwaysVisible: self.scripts.append(script) @@ -211,6 +237,7 @@ class ScriptRunner: self.scripts.append(script) self.selectable_scripts.append(script) + def setup_ui(self): self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.selectable_scripts] inputs = [None] @@ -220,7 +247,7 @@ class ScriptRunner: script.args_from = len(inputs) script.args_to = len(inputs) - controls = wrap_call(script.ui, script.filename, "ui", is_img2img) + controls = wrap_call(script.ui, script.filename, "ui", script.is_img2img) if controls is None: return @@ -320,6 +347,22 @@ class ScriptRunner: print(f"Error running postprocess: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) + def before_component(self, component, **kwargs): + for script in self.scripts: + try: + script.before_component(component, **kwargs) + except Exception: + print(f"Error running before_component: {script.filename}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + def after_component(self, component, **kwargs): + for script in self.scripts: + try: + script.after_component(component, **kwargs) + except Exception: + print(f"Error running after_component: {script.filename}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + def reload_sources(self, cache): for si, script in list(enumerate(self.scripts)): args_from = script.args_from @@ -341,6 +384,7 @@ class ScriptRunner: scripts_txt2img = ScriptRunner() scripts_img2img = ScriptRunner() +scripts_current: ScriptRunner = None def reload_script_body_only(): @@ -357,3 +401,22 @@ def reload_scripts(): scripts_txt2img = ScriptRunner() scripts_img2img = ScriptRunner() + +def IOComponent_init(self, *args, **kwargs): + if scripts_current is not None: + scripts_current.before_component(self, **kwargs) + + script_callbacks.before_component_callback(self, **kwargs) + + res = original_IOComponent_init(self, *args, **kwargs) + + script_callbacks.after_component_callback(self, **kwargs) + + if scripts_current is not None: + scripts_current.after_component(self, **kwargs) + + return res + + +original_IOComponent_init = gr.components.IOComponent.__init__ +gr.components.IOComponent.__init__ = IOComponent_init diff --git a/modules/ui.py b/modules/ui.py index bb090c62..a5953fce 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -695,6 +695,9 @@ def create_ui(wrap_gradio_gpu_call): parameters_copypaste.reset() + modules.scripts.scripts_current = modules.scripts.scripts_txt2img + modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) + with gr.Blocks(analytics_enabled=False) as txt2img_interface: txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) @@ -737,7 +740,7 @@ def create_ui(wrap_gradio_gpu_call): seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs() with gr.Group(): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False) + custom_inputs = modules.scripts.scripts_txt2img.setup_ui() txt2img_gallery, generation_info, html_info = create_output_panel("txt2img", opts.outdir_txt2img_samples) parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt) @@ -846,6 +849,9 @@ def create_ui(wrap_gradio_gpu_call): token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) + modules.scripts.scripts_current = modules.scripts.scripts_img2img + modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) + with gr.Blocks(analytics_enabled=False) as img2img_interface: img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, token_counter, token_button = create_toprow(is_img2img=True) @@ -916,7 +922,7 @@ def create_ui(wrap_gradio_gpu_call): seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs() with gr.Group(): - custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True) + custom_inputs = modules.scripts.scripts_img2img.setup_ui() img2img_gallery, generation_info, html_info = create_output_panel("img2img", opts.outdir_img2img_samples) parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt) @@ -1065,6 +1071,8 @@ def create_ui(wrap_gradio_gpu_call): parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields) parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields) + modules.scripts.scripts_current = None + with gr.Blocks(analytics_enabled=False) as extras_interface: with gr.Row().style(equal_height=False): with gr.Column(variant='panel'): -- cgit v1.2.3 From 991e2dcee9d6baa66b5c0b1969c4c07407be933a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 10 Dec 2022 14:54:02 +0300 Subject: remove NSFW filter and its dependency; if you still want it, find it in the extensions section --- modules/processing.py | 7 +++---- modules/safety.py | 42 ------------------------------------------ modules/scripts.py | 20 ++++++++++++++++++++ modules/shared.py | 1 - requirements.txt | 1 - requirements_versions.txt | 1 - 6 files changed, 23 insertions(+), 49 deletions(-) delete mode 100644 modules/safety.py (limited to 'modules/scripts.py') diff --git a/modules/processing.py b/modules/processing.py index 81400d14..056c9322 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -13,7 +13,7 @@ from skimage import exposure from typing import Any, Dict, List, Optional import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -571,9 +571,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - if opts.filter_nsfw: - import modules.safety as safety - x_samples_ddim = modules.safety.censor_batch(x_samples_ddim) + if p.scripts is not None: + p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) for i, x_sample in enumerate(x_samples_ddim): x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) diff --git a/modules/safety.py b/modules/safety.py deleted file mode 100644 index cff4b278..00000000 --- a/modules/safety.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from transformers import AutoFeatureExtractor -from PIL import Image - -import modules.shared as shared - -safety_model_id = "CompVis/stable-diffusion-safety-checker" -safety_feature_extractor = None -safety_checker = None - -def numpy_to_pil(images): - """ - Convert a numpy image or a batch of images to a PIL image. - """ - if images.ndim == 3: - images = images[None, ...] - images = (images * 255).round().astype("uint8") - pil_images = [Image.fromarray(image) for image in images] - - return pil_images - -# check and replace nsfw content -def check_safety(x_image): - global safety_feature_extractor, safety_checker - - if safety_feature_extractor is None: - safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id) - safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id) - - safety_checker_input = safety_feature_extractor(numpy_to_pil(x_image), return_tensors="pt") - x_checked_image, has_nsfw_concept = safety_checker(images=x_image, clip_input=safety_checker_input.pixel_values) - - return x_checked_image, has_nsfw_concept - - -def censor_batch(x): - x_samples_ddim_numpy = x.cpu().permute(0, 2, 3, 1).numpy() - x_checked_image, has_nsfw_concept = check_safety(x_samples_ddim_numpy) - x = torch.from_numpy(x_checked_image).permute(0, 3, 1, 2) - - return x diff --git a/modules/scripts.py b/modules/scripts.py index b934d881..23ca195d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -88,6 +88,17 @@ class Script: pass + def postprocess_batch(self, p, *args, **kwargs): + """ + Same as process_batch(), but called for every batch after it has been generated. + + **kwargs will have same items as process_batch, and also: + - batch_number - index of current batch, from 0 to number of batches-1 + - images - torch tensor with all generated images, with values ranging from 0 to 1; + """ + + pass + def postprocess(self, p, processed, *args): """ This function is called after processing ends for AlwaysVisible scripts. @@ -347,6 +358,15 @@ class ScriptRunner: print(f"Error running postprocess: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) + def postprocess_batch(self, p, images, **kwargs): + for script in self.alwayson_scripts: + try: + script_args = p.script_args[script.args_from:script.args_to] + script.postprocess_batch(p, *script_args, images=images, **kwargs) + except Exception: + print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + def before_component(self, component, **kwargs): for script in self.scripts: try: diff --git a/modules/shared.py b/modules/shared.py index 44922c91..272267c1 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -367,7 +367,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), - "filter_nsfw": OptionInfo(False, "Filter NSFW content"), 'CLIP_stop_at_last_layers': OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}), })) diff --git a/requirements.txt b/requirements.txt index 05818aa6..678acb4d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ accelerate basicsr -diffusers fairscale==0.4.4 fonts font-roboto diff --git a/requirements_versions.txt b/requirements_versions.txt index 035fa82f..185cd066 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -1,5 +1,4 @@ transformers==4.19.2 -diffusers==0.3.0 accelerate==0.12.0 basicsr==1.4.2 gfpgan==1.3.8 -- cgit v1.2.3 From c0355caefe3d82e304e6d832699d581fc8f9fbf9 Mon Sep 17 00:00:00 2001 From: Jim Hays Date: Wed, 14 Dec 2022 21:01:32 -0500 Subject: Fix various typos --- README.md | 4 ++-- javascript/contextMenus.js | 24 ++++++++++++------------ javascript/progressbar.js | 12 ++++++------ javascript/ui.js | 2 +- modules/api/api.py | 18 +++++++++--------- modules/api/models.py | 2 +- modules/images.py | 4 ++-- modules/processing.py | 14 +++++++------- modules/safe.py | 4 ++-- modules/scripts.py | 4 ++-- modules/sd_hijack_inpainting.py | 6 +++--- modules/sd_hijack_unet.py | 2 +- modules/textual_inversion/dataset.py | 10 +++++----- modules/textual_inversion/textual_inversion.py | 16 ++++++++-------- scripts/prompt_matrix.py | 10 +++++----- webui.py | 4 ++-- 16 files changed, 68 insertions(+), 68 deletions(-) (limited to 'modules/scripts.py') diff --git a/README.md b/README.md index 55990581..556000fb 100644 --- a/README.md +++ b/README.md @@ -82,8 +82,8 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - Use VAEs - Estimated completion time in progress bar - API -- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML. -- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) +- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML. +- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) - [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions ## Installation and Running diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index fe67c42e..11bcce1b 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -9,7 +9,7 @@ contextMenuInit = function(){ function showContextMenu(event,element,menuEntries){ let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; - let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; + let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; let oldMenu = gradioApp().querySelector('#context-menu') if(oldMenu){ @@ -61,15 +61,15 @@ contextMenuInit = function(){ } - function appendContextMenuOption(targetEmementSelector,entryName,entryFunction){ - - currentItems = menuSpecs.get(targetEmementSelector) - + function appendContextMenuOption(targetElementSelector,entryName,entryFunction){ + + currentItems = menuSpecs.get(targetElementSelector) + if(!currentItems){ currentItems = [] - menuSpecs.set(targetEmementSelector,currentItems); + menuSpecs.set(targetElementSelector,currentItems); } - let newItem = {'id':targetEmementSelector+'_'+uid(), + let newItem = {'id':targetElementSelector+'_'+uid(), 'name':entryName, 'func':entryFunction, 'isNew':true} @@ -97,7 +97,7 @@ contextMenuInit = function(){ if(source.id && source.id.indexOf('check_progress')>-1){ return } - + let oldMenu = gradioApp().querySelector('#context-menu') if(oldMenu){ oldMenu.remove() @@ -117,7 +117,7 @@ contextMenuInit = function(){ }) }); eventListenerApplied=true - + } return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener] @@ -152,8 +152,8 @@ addContextMenuEventListener = initResponse[2]; generateOnRepeat('#img2img_generate','#img2img_interrupt'); }) - let cancelGenerateForever = function(){ - clearInterval(window.generateOnRepeatInterval) + let cancelGenerateForever = function(){ + clearInterval(window.generateOnRepeatInterval) } appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever) @@ -162,7 +162,7 @@ addContextMenuEventListener = initResponse[2]; appendContextMenuOption('#img2img_generate', 'Cancel generate forever',cancelGenerateForever) appendContextMenuOption('#roll','Roll three', - function(){ + function(){ let rollbutton = get_uiCurrentTabContent().querySelector('#roll'); setTimeout(function(){rollbutton.click()},100) setTimeout(function(){rollbutton.click()},200) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index d58737c4..d6323ed9 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -3,7 +3,7 @@ global_progressbars = {} galleries = {} galleryObservers = {} -// this tracks laumnches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running +// this tracks launches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running timeoutIds = {} function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){ @@ -20,21 +20,21 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip var skip = id_skip ? gradioApp().getElementById(id_skip) : null var interrupt = gradioApp().getElementById(id_interrupt) - + if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){ if(progressbar.innerText){ let newtitle = '[' + progressbar.innerText.trim() + '] Stable Diffusion'; if(document.title != newtitle){ - document.title = newtitle; + document.title = newtitle; } }else{ let newtitle = 'Stable Diffusion' if(document.title != newtitle){ - document.title = newtitle; + document.title = newtitle; } } } - + if(progressbar!= null && progressbar != global_progressbars[id_progressbar]){ global_progressbars[id_progressbar] = progressbar @@ -63,7 +63,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip skip.style.display = "none" } interrupt.style.display = "none" - + //disconnect observer once generation finished, so user can close selected image if they want if (galleryObservers[id_gallery]) { galleryObservers[id_gallery].disconnect(); diff --git a/javascript/ui.js b/javascript/ui.js index 2cb280e5..587dd782 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -100,7 +100,7 @@ function create_submit_args(args){ // As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image. // This can lead to uploading a huge gallery of previously generated images, which leads to an unnecessary delay between submitting and beginning to generate. - // I don't know why gradio is seding outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some. + // I don't know why gradio is sending outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some. // If gradio at some point stops sending outputs, this may break something if(Array.isArray(res[res.length - 3])){ res[res.length - 3] = null diff --git a/modules/api/api.py b/modules/api/api.py index 89935a70..33845045 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -67,10 +67,10 @@ def encode_pil_to_base64(image): class Api: def __init__(self, app: FastAPI, queue_lock: Lock): if shared.cmd_opts.api_auth: - self.credenticals = dict() + self.credentials = dict() for auth in shared.cmd_opts.api_auth.split(","): user, password = auth.split(":") - self.credenticals[user] = password + self.credentials[user] = password self.router = APIRouter() self.app = app @@ -93,7 +93,7 @@ class Api: self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem]) self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem]) self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem]) - self.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem]) + self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str]) self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) @@ -102,9 +102,9 @@ class Api: return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs) return self.app.add_api_route(path, endpoint, **kwargs) - def auth(self, credenticals: HTTPBasicCredentials = Depends(HTTPBasic())): - if credenticals.username in self.credenticals: - if compare_digest(credenticals.password, self.credenticals[credenticals.username]): + def auth(self, credentials: HTTPBasicCredentials = Depends(HTTPBasic())): + if credentials.username in self.credentials: + if compare_digest(credentials.password, self.credentials[credentials.username]): return True raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"}) @@ -239,7 +239,7 @@ class Api: def interrogateapi(self, interrogatereq: InterrogateRequest): image_b64 = interrogatereq.image if image_b64 is None: - raise HTTPException(status_code=404, detail="Image not found") + raise HTTPException(status_code=404, detail="Image not found") img = decode_base64_to_image(image_b64) img = img.convert('RGB') @@ -252,7 +252,7 @@ class Api: processed = deepbooru.model.tag(img) else: raise HTTPException(status_code=404, detail="Model not found") - + return InterrogateResponse(caption=processed) def interruptapi(self): @@ -308,7 +308,7 @@ class Api: def get_realesrgan_models(self): return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)] - def get_promp_styles(self): + def get_prompt_styles(self): styleList = [] for k in shared.prompt_styles.styles: style = shared.prompt_styles.styles[k] diff --git a/modules/api/models.py b/modules/api/models.py index f77951fc..a22bc6b3 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -128,7 +128,7 @@ class ExtrasBaseRequest(BaseModel): upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.") upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") - upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?") + upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?") upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") diff --git a/modules/images.py b/modules/images.py index 8146f580..93a14289 100644 --- a/modules/images.py +++ b/modules/images.py @@ -429,7 +429,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory. basename (`str`): The base filename which will be applied to `filename pattern`. - seed, prompt, short_filename, + seed, prompt, short_filename, extension (`str`): Image file extension, default is `png`. pngsectionname (`str`): @@ -590,7 +590,7 @@ def read_info_from_image(image): Negative prompt: {json_info["uc"]} Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337""" except Exception: - print(f"Error parsing NovelAI iamge generation parameters:", file=sys.stderr) + print(f"Error parsing NovelAI image generation parameters:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) return geninfo, items diff --git a/modules/processing.py b/modules/processing.py index 24c537d1..fe7f4faf 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -147,11 +147,11 @@ class StableDiffusionProcessing(): # The "masked-image" in this case will just be all zeros since the entire image is masked. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning)) + image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning)) # Add the fake full 1s mask to the first dimension. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) - image_conditioning = image_conditioning.to(x.dtype) + image_conditioning = image_conditioning.to(x.dtype) return image_conditioning @@ -199,7 +199,7 @@ class StableDiffusionProcessing(): source_image * (1.0 - conditioning_mask), getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) ) - + # Encode the new masked image using first stage of network. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image)) @@ -537,7 +537,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: for n in range(p.n_iter): if state.skipped: state.skipped = False - + if state.interrupted: break @@ -612,7 +612,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image.info["parameters"] = text output_images.append(image) - del x_samples_ddim + del x_samples_ddim devices.torch_gc() @@ -704,7 +704,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2] - """saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images""" + """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" def save_intermediate(image, index): if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix: return @@ -720,7 +720,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") - # Avoid making the inpainting conditioning unless necessary as + # Avoid making the inpainting conditioning unless necessary as # this does need some extra compute to decode / encode the image again. if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0: image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples) diff --git a/modules/safe.py b/modules/safe.py index 10460ad0..20e9d2fa 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -80,7 +80,7 @@ def check_pt(filename, extra_handler): # new pytorch format is a zip file with zipfile.ZipFile(filename) as z: check_zip_filenames(filename, z.namelist()) - + # find filename of data.pkl in zip file: '/data.pkl' data_pkl_filenames = [f for f in z.namelist() if data_pkl_re.match(f)] if len(data_pkl_filenames) == 0: @@ -108,7 +108,7 @@ def load(filename, *args, **kwargs): def load_with_extra(filename, extra_handler=None, *args, **kwargs): """ - this functon is intended to be used by extensions that want to load models with + this function is intended to be used by extensions that want to load models with some extra classes in them that the usual unpickler would find suspicious. Use the extra_handler argument to specify a function that takes module and field name as text, diff --git a/modules/scripts.py b/modules/scripts.py index 23ca195d..722f8685 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -36,7 +36,7 @@ class Script: def ui(self, is_img2img): """this function should create gradio UI elements. See https://gradio.app/docs/#components The return value should be an array of all components that are used in processing. - Values of those returned componenbts will be passed to run() and process() functions. + Values of those returned components will be passed to run() and process() functions. """ pass @@ -47,7 +47,7 @@ class Script: This function should return: - False if the script should not be shown in UI at all - - True if the script should be shown in UI if it's scelected in the scripts drowpdown + - True if the script should be shown in UI if it's selected in the scripts dropdown - script.AlwaysVisible if the script should be shown in UI at all times """ diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 938f9a58..d72f83fd 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -209,7 +209,7 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) - + if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() @@ -278,7 +278,7 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) return x_prev, pred_x0, e_t - + # ================================================================================================= # Monkey patch LatentInpaintDiffusion to load the checkpoint with a proper config. # Adapted from: @@ -326,7 +326,7 @@ def do_inpainting_hijack(): # most of this stuff seems to no longer be needed because it is already included into SD2.0 # LatentInpaintDiffusion remains because SD2.0's LatentInpaintDiffusion can't be loaded without specifying a checkpoint # p_sample_plms is needed because PLMS can't work with dicts as conditionings - # this file should be cleaned up later if weverything tuens out to work fine + # this file should be cleaned up later if everything turns out to work fine # ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning ldm.models.diffusion.ddpm.LatentInpaintDiffusion = LatentInpaintDiffusion diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 1b9d7757..18daf8c1 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -4,7 +4,7 @@ import torch class TorchHijackForUnet: """ This is torch, but with cat that resizes tensors to appropriate dimensions if they do not match; - this makes it possible to create pictures with dimensions that are muliples of 8 rather than 64 + this makes it possible to create pictures with dimensions that are multiples of 8 rather than 64 """ def __getattr__(self, item): diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 2dc64c3c..88d68c76 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -28,9 +28,9 @@ class DatasetEntry: class PersonalizedBase(Dataset): - def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once'): + def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once'): re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None - + self.placeholder_token = placeholder_token self.width = width @@ -50,14 +50,14 @@ class PersonalizedBase(Dataset): self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)] - + self.shuffle_tags = shuffle_tags self.tag_drop_out = tag_drop_out print("Preparing dataset...") for path in tqdm.tqdm(self.image_paths): if shared.state.interrupted: - raise Exception("inturrupted") + raise Exception("interrupted") try: image = Image.open(path).convert('RGB').resize((self.width, self.height), PIL.Image.BICUBIC) except Exception: @@ -144,7 +144,7 @@ class PersonalizedDataLoader(DataLoader): self.collate_fn = collate_wrapper_random else: self.collate_fn = collate_wrapper - + class BatchLoader: def __init__(self, data): diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index e28c357a..daf3997b 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -133,7 +133,7 @@ class EmbeddingDatabase: process_file(fullfn, fn) except Exception: - print(f"Error loading emedding {fn}:", file=sys.stderr) + print(f"Error loading embedding {fn}:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) continue @@ -194,7 +194,7 @@ def write_loss(log_directory, filename, step, epoch_len, values): csv_writer.writeheader() epoch = (step - 1) // epoch_len - epoch_step = (step - 1) % epoch_len + epoch_step = (step - 1) % epoch_len csv_writer.writerow({ "step": step, @@ -270,9 +270,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed - + pin_memory = shared.opts.pin_memory - + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) latent_sampling_method = ds.latent_sampling_method @@ -295,12 +295,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ loss_step = 0 _loss_step = 0 #internal - + last_saved_file = "" last_saved_image = "" forced_filename = "" embedding_yet_to_be_embedded = False - + pbar = tqdm.tqdm(total=steps - initial_step) try: for i in range((steps-initial_step) * gradient_step): @@ -327,10 +327,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ c = shared.sd_model.cond_stage_model(batch.cond_text) loss = shared.sd_model(x, c)[0] / gradient_step del x - + _loss_step += loss.item() scaler.scale(loss).backward() - + # go back until we reach gradient accumulation steps if (j + 1) % gradient_step != 0: continue diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index c53ca28c..4c79eaef 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -18,7 +18,7 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell): ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys] hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs] - first_pocessed = None + first_processed = None state.job_count = len(xs) * len(ys) @@ -27,17 +27,17 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell): state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}" processed = cell(x, y) - if first_pocessed is None: - first_pocessed = processed + if first_processed is None: + first_processed = processed res.append(processed.images[0]) grid = images.image_grid(res, rows=len(ys)) grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts) - first_pocessed.images = [grid] + first_processed.images = [grid] - return first_pocessed + return first_processed class Script(scripts.Script): diff --git a/webui.py b/webui.py index c2d0c6be..4b32e77d 100644 --- a/webui.py +++ b/webui.py @@ -153,8 +153,8 @@ def webui(): # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for # an attacker to trick the user into opening a malicious HTML page, which makes a request to the - # running web ui and do whatever the attcker wants, including installing an extension and - # runnnig its code. We disable this here. Suggested by RyotaK. + # running web ui and do whatever the attacker wants, including installing an extension and + # running its code. We disable this here. Suggested by RyotaK. app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware'] setup_cors(app) -- cgit v1.2.3