From 0cc05fc492a9360d3b2f1b3f64c7d74f9041f74e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 21 May 2023 00:41:41 +0300 Subject: work on startup profile display --- webui.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'webui.py') diff --git a/webui.py b/webui.py index a76e377c..940966eb 100644 --- a/webui.py +++ b/webui.py @@ -20,7 +20,7 @@ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not from modules import paths, timer, import_hook, errors # noqa: F401 -startup_timer = timer.Timer() +startup_timer = timer.startup_timer import torch import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them @@ -269,8 +269,8 @@ def initialize_rest(*, reload_script_modules=False): localization.list_localizations(cmd_opts.localizations_dir) - modules.scripts.load_scripts() - startup_timer.record("load scripts") + with startup_timer.subcategory("load scripts"): + modules.scripts.load_scripts() if reload_script_modules: for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: @@ -416,9 +416,12 @@ def webui(): ui_extra_networks.add_pages_to_demo(app) - modules.script_callbacks.app_started_callback(shared.demo, app) - startup_timer.record("scripts app_started_callback") + startup_timer.record("add APIs") + + with startup_timer.subcategory("app_started_callback"): + modules.script_callbacks.app_started_callback(shared.demo, app) + timer.startup_record = startup_timer.dump() print(f"Startup time: {startup_timer.summary()}.") if cmd_opts.subpath: @@ -443,6 +446,7 @@ def webui(): # If we catch a keyboard interrupt, we want to stop the server and exit. shared.demo.close() break + print('Restarting UI...') shared.demo.close() time.sleep(0.5) -- cgit v1.2.3 From 8faac8b96313c6c4bf0a81bddecff4d6ba22ac25 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 21 May 2023 21:55:14 +0300 Subject: run basic torch calculation at startup in parallel to reduce the performance impact of first generation --- modules/devices.py | 18 ++++++++++++++++++ webui.py | 4 +++- 2 files changed, 21 insertions(+), 1 deletion(-) (limited to 'webui.py') diff --git a/modules/devices.py b/modules/devices.py index d8a34a0f..1ed6ffdc 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -1,5 +1,7 @@ import sys import contextlib +from functools import lru_cache + import torch from modules import errors @@ -154,3 +156,19 @@ def test_for_nans(x, where): message += " Use --disable-nan-check commandline argument to disable this check." raise NansException(message) + + +@lru_cache +def first_time_calculation(): + """ + just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and + spends about 2.7 seconds doing that, at least wih NVidia. + """ + + x = torch.zeros((1, 1)).to(device, dtype) + linear = torch.nn.Linear(1, 1).to(device, dtype) + linear(x) + + x = torch.zeros((1, 1, 3, 3)).to(device, dtype) + conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype) + conv2d(x) diff --git a/webui.py b/webui.py index d4402f55..07c70c46 100644 --- a/webui.py +++ b/webui.py @@ -20,7 +20,7 @@ import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) -from modules import paths, timer, import_hook, errors # noqa: F401 +from modules import paths, timer, import_hook, errors, devices # noqa: F401 startup_timer = timer.Timer() @@ -295,6 +295,8 @@ def initialize_rest(*, reload_script_modules=False): # (when reloading, this does nothing) Thread(target=lambda: shared.sd_model).start() + Thread(target=devices.first_time_calculation).start() + shared.reload_hypernetworks() startup_timer.record("reload hypernetworks") -- cgit v1.2.3 From 47b669bc9ff3df73f58b675abaffbdfd84771a67 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 22 May 2023 09:53:24 +0300 Subject: Upgrade Gradio, remove docs URL hack --- requirements.txt | 2 +- requirements_versions.txt | 2 +- webui.py | 15 ++++----------- 3 files changed, 6 insertions(+), 13 deletions(-) (limited to 'webui.py') diff --git a/requirements.txt b/requirements.txt index 34e4520d..a464447b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ blendmodes accelerate basicsr gfpgan -gradio==3.31.0 +gradio==3.32.0 numpy omegaconf opencv-contrib-python diff --git a/requirements_versions.txt b/requirements_versions.txt index de501fda..31b179a9 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -3,7 +3,7 @@ transformers==4.25.1 accelerate==0.18.0 basicsr==1.4.2 gfpgan==1.3.8 -gradio==3.31.0 +gradio==3.32.0 numpy==1.23.5 Pillow==9.5.0 realesrgan==0.3.0 diff --git a/webui.py b/webui.py index d4402f55..2d2c1134 100644 --- a/webui.py +++ b/webui.py @@ -370,17 +370,6 @@ def webui(): gradio_auth_creds = list(get_gradio_auth_creds()) or None - # this restores the missing /docs endpoint - if launch_api and not hasattr(FastAPI, 'original_setup'): - # TODO: replace this with `launch(app_kwargs=...)` if https://github.com/gradio-app/gradio/pull/4282 gets merged - def fastapi_setup(self): - self.docs_url = "/docs" - self.redoc_url = "/redoc" - self.original_setup() - - FastAPI.original_setup = FastAPI.setup - FastAPI.setup = fastapi_setup - app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, server_name=server_name, @@ -393,6 +382,10 @@ def webui(): inbrowser=cmd_opts.autolaunch, prevent_thread_lock=True, allowed_paths=cmd_opts.gradio_allowed_path, + app_kwargs={ + "docs_url": "/docs", + "redoc_url": "/redoc", + }, ) if cmd_opts.add_stop_route: app.add_route("/_stop", stop_route, methods=["POST"]) -- cgit v1.2.3 From a6e653be26cc05f4438145fa0082816e9fbbf5fc Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 23 May 2023 18:02:09 +0300 Subject: possible fix for empty list of optimizations #10605 --- modules/sd_hijack.py | 21 +++++++++++++++------ webui.py | 17 ++++++++++++++--- 2 files changed, 29 insertions(+), 9 deletions(-) (limited to 'webui.py') diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 08d31080..f93df0a6 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -48,6 +48,11 @@ def apply_optimizations(): undo_optimizations() + if len(optimizers) == 0: + # a script can access the model very early, and optimizations would not be filled by then + current_optimizer = None + return '' + ldm.modules.diffusionmodules.model.nonlinearity = silu ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th @@ -67,8 +72,9 @@ def apply_optimizations(): matching_optimizer = optimizers[0] if matching_optimizer is not None: - print(f"Applying optimization: {matching_optimizer.name}") + print(f"Applying optimization: {matching_optimizer.name}... ", end='') matching_optimizer.apply() + print("done.") current_optimizer = matching_optimizer return current_optimizer.name else: @@ -149,6 +155,13 @@ class StableDiffusionModelHijack: def __init__(self): self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir) + def apply_optimizations(self): + try: + self.optimization_method = apply_optimizations() + except Exception as e: + errors.display(e, "applying cross attention optimization") + undo_optimizations() + def hijack(self, m): if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: model_embeddings = m.cond_stage_model.roberta.embeddings @@ -168,11 +181,7 @@ class StableDiffusionModelHijack: if m.cond_stage_key == "edit": sd_hijack_unet.hijack_ddpm_edit() - try: - self.optimization_method = apply_optimizations() - except Exception as e: - errors.display(e, "applying cross attention optimization") - undo_optimizations() + self.apply_optimizations() self.clip = m.cond_stage_model diff --git a/webui.py b/webui.py index 6933473d..f9210f41 100644 --- a/webui.py +++ b/webui.py @@ -291,9 +291,20 @@ def initialize_rest(*, reload_script_modules=False): modules.sd_hijack.list_optimizers() startup_timer.record("scripts list_optimizers") - # load model in parallel to other startup stuff - # (when reloading, this does nothing) - Thread(target=lambda: shared.sd_model).start() + def load_model(): + """ + Accesses shared.sd_model property to load model. + After it's available, if it has been loaded before this access by some extension, + its optimization may be None because the list of optimizaers has neet been filled + by that time, so we apply optimization again. + """ + + shared.sd_model # noqa: B018 + + if modules.sd_hijack.current_optimizer is None: + modules.sd_hijack.apply_optimizations() + + Thread(target=load_model).start() Thread(target=devices.first_time_calculation).start() -- cgit v1.2.3 From 339b5315700a469f4a9f0d5afc08ca2aca60c579 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 27 May 2023 15:47:33 +0300 Subject: custom unet support --- modules/processing.py | 4 +- modules/script_callbacks.py | 20 ++++++++++ modules/sd_hijack.py | 20 +++++++--- modules/sd_models.py | 4 +- modules/sd_unet.py | 92 +++++++++++++++++++++++++++++++++++++++++++++ modules/shared.py | 1 + modules/shared_items.py | 11 ++++++ webui.py | 4 ++ 8 files changed, 148 insertions(+), 8 deletions(-) create mode 100644 modules/sd_unet.py (limited to 'webui.py') diff --git a/modules/processing.py b/modules/processing.py index 29a3743f..b75f2515 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -13,7 +13,7 @@ from skimage import exposure from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -674,6 +674,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": sd_vae_approx.model() + sd_unet.apply_unet() + if state.job_count == -1: state.job_count = p.n_iter diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 40f388a5..d2728e12 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -111,6 +111,7 @@ callback_map = dict( callbacks_before_ui=[], callbacks_on_reload=[], callbacks_list_optimizers=[], + callbacks_list_unets=[], ) @@ -271,6 +272,18 @@ def list_optimizers_callback(): return res +def list_unets_callback(): + res = [] + + for c in callback_map['callbacks_list_unets']: + try: + c.callback(res) + except Exception: + report_exception(c, 'list_unets') + + return res + + def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if len(stack) > 0 else 'unknown file' @@ -430,3 +443,10 @@ def on_list_optimizers(callback): to it.""" add_callback(callback_map['callbacks_list_optimizers'], callback) + + +def on_list_unets(callback): + """register a function to be called when UI is making a list of alternative options for unet. + The function will be called with one argument, a list, and shall add objects of type modules.sd_unet.SdUnetOption to it.""" + + add_callback(callback_map['callbacks_list_unets'], callback) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index f93df0a6..487dfd60 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -3,7 +3,7 @@ from torch.nn.functional import silu from types import MethodType import modules.textual_inversion.textual_inversion -from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors +from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr @@ -43,7 +43,7 @@ def list_optimizers(): optimizers.extend(new_optimizers) -def apply_optimizations(): +def apply_optimizations(option=None): global current_optimizer undo_optimizations() @@ -60,7 +60,7 @@ def apply_optimizations(): current_optimizer.undo() current_optimizer = None - selection = shared.opts.cross_attention_optimization + selection = option or shared.opts.cross_attention_optimization if selection == "Automatic" and len(optimizers) > 0: matching_optimizer = next(iter([x for x in optimizers if x.cmd_opt and getattr(shared.cmd_opts, x.cmd_opt, False)]), optimizers[0]) else: @@ -72,12 +72,13 @@ def apply_optimizations(): matching_optimizer = optimizers[0] if matching_optimizer is not None: - print(f"Applying optimization: {matching_optimizer.name}... ", end='') + print(f"Applying attention optimization: {matching_optimizer.name}... ", end='') matching_optimizer.apply() print("done.") current_optimizer = matching_optimizer return current_optimizer.name else: + print("Disabling attention optimization") return '' @@ -155,9 +156,9 @@ class StableDiffusionModelHijack: def __init__(self): self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir) - def apply_optimizations(self): + def apply_optimizations(self, option=None): try: - self.optimization_method = apply_optimizations() + self.optimization_method = apply_optimizations(option) except Exception as e: errors.display(e, "applying cross attention optimization") undo_optimizations() @@ -194,6 +195,11 @@ class StableDiffusionModelHijack: self.layers = flatten(m) + if not hasattr(ldm.modules.diffusionmodules.openaimodel, 'copy_of_UNetModel_forward_for_webui'): + ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui = ldm.modules.diffusionmodules.openaimodel.UNetModel.forward + + ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward + def undo_hijack(self, m): if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: m.cond_stage_model = m.cond_stage_model.wrapped @@ -215,6 +221,8 @@ class StableDiffusionModelHijack: self.layers = None self.clip = None + ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui + def apply_circular(self, enable): if self.circular_enabled == enable: return diff --git a/modules/sd_models.py b/modules/sd_models.py index 91b3eb11..835bc016 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -14,7 +14,7 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config -from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config +from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer import tomesd @@ -532,6 +532,8 @@ def reload_model_weights(sd_model=None, info=None): if sd_model.sd_model_checkpoint == checkpoint_info.filename: return + sd_unet.apply_unet("None") + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.send_everything_to_cpu() else: diff --git a/modules/sd_unet.py b/modules/sd_unet.py new file mode 100644 index 00000000..6d708ad2 --- /dev/null +++ b/modules/sd_unet.py @@ -0,0 +1,92 @@ +import torch.nn +import ldm.modules.diffusionmodules.openaimodel + +from modules import script_callbacks, shared, devices + +unet_options = [] +current_unet_option = None +current_unet = None + + +def list_unets(): + new_unets = script_callbacks.list_unets_callback() + + unet_options.clear() + unet_options.extend(new_unets) + + +def get_unet_option(option=None): + option = option or shared.opts.sd_unet + + if option == "None": + return None + + if option == "Automatic": + name = shared.sd_model.sd_checkpoint_info.model_name + + options = [x for x in unet_options if x.model_name == name] + + option = options[0].label if options else "None" + + return next(iter([x for x in unet_options if x.label == option]), None) + + +def apply_unet(option=None): + global current_unet_option + global current_unet + + new_option = get_unet_option(option) + if new_option == current_unet_option: + return + + if current_unet is not None: + print(f"Dectivating unet: {current_unet.option.label}") + current_unet.deactivate() + + current_unet_option = new_option + if current_unet_option is None: + current_unet = None + + if not (shared.cmd_opts.lowvram or shared.cmd_opts.medvram): + shared.sd_model.model.diffusion_model.to(devices.device) + + return + + shared.sd_model.model.diffusion_model.to(devices.cpu) + devices.torch_gc() + + current_unet = current_unet_option.create_unet() + current_unet.option = current_unet_option + print(f"Activating unet: {current_unet.option.label}") + current_unet.activate() + + +class SdUnetOption: + model_name = None + """name of related checkpoint - this option will be selected automatically for unet if the name of checkpoint matches this""" + + label = None + """name of the unet in UI""" + + def create_unet(self): + """returns SdUnet object to be used as a Unet instead of built-in unet when making pictures""" + raise NotImplementedError() + + +class SdUnet(torch.nn.Module): + def forward(self, x, timesteps, context, *args, **kwargs): + raise NotImplementedError() + + def activate(self): + pass + + def deactivate(self): + pass + + +def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs): + if current_unet is not None: + return current_unet.forward(x, timesteps, context, *args, **kwargs) + + return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs) + diff --git a/modules/shared.py b/modules/shared.py index 0897f937..a5e7824a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -403,6 +403,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), + "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), diff --git a/modules/shared_items.py b/modules/shared_items.py index 2a8713c8..7f306a06 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -29,3 +29,14 @@ def cross_attention_optimizations(): return ["Automatic"] + [x.title() for x in modules.sd_hijack.optimizers] + ["None"] +def sd_unet_items(): + import modules.sd_unet + + return ["Automatic"] + [x.label for x in modules.sd_unet.unet_options] + ["None"] + + +def refresh_unet_list(): + import modules.sd_unet + + modules.sd_unet.list_unets() + diff --git a/webui.py b/webui.py index f9210f41..1e3ff061 100644 --- a/webui.py +++ b/webui.py @@ -58,6 +58,7 @@ import modules.sd_hijack import modules.sd_hijack_optimizations import modules.sd_models import modules.sd_vae +import modules.sd_unet import modules.txt2img import modules.script_callbacks import modules.textual_inversion.textual_inversion @@ -291,6 +292,9 @@ def initialize_rest(*, reload_script_modules=False): modules.sd_hijack.list_optimizers() startup_timer.record("scripts list_optimizers") + modules.sd_unet.list_unets() + startup_timer.record("scripts list_unets") + def load_model(): """ Accesses shared.sd_model property to load model. -- cgit v1.2.3 From cf07983a6e5aa2cf131a75e5b974c25c171a7126 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Sun, 28 May 2023 20:42:19 +0800 Subject: Upgrade xformers --- modules/launch_utils.py | 2 +- webui.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'webui.py') diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 35a52310..6eb3ea11 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -223,7 +223,7 @@ def prepare_environment(): torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip") clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip") diff --git a/webui.py b/webui.py index 1e3ff061..3df2cd1a 100644 --- a/webui.py +++ b/webui.py @@ -135,7 +135,7 @@ there are reports of issues with training tab on the latest version. Use --skip-version-check commandline argument to disable this check. """.strip()) - expected_xformers_version = "0.0.17" + expected_xformers_version = "0.0.20" if shared.xformers_available: import xformers -- cgit v1.2.3 From 2bbe3f5f0aceeacde365e6da9f31125c35bd95ee Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 2 Jun 2023 16:51:15 +0900 Subject: remove redundant call list_optimizers() --- webui.py | 1 - 1 file changed, 1 deletion(-) (limited to 'webui.py') diff --git a/webui.py b/webui.py index 828259b8..f2e1a8e0 100644 --- a/webui.py +++ b/webui.py @@ -469,7 +469,6 @@ def webui(): startup_timer.record("scripts unloaded callback") initialize_rest(reload_script_modules=True) - modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers) modules.sd_hijack.list_optimizers() startup_timer.record("scripts list_optimizers") -- cgit v1.2.3 From 8f8405274c6a642050e540325caac7c094536a09 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 2 Jun 2023 17:18:42 +0900 Subject: remove redundant --- webui.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'webui.py') diff --git a/webui.py b/webui.py index f2e1a8e0..254fada3 100644 --- a/webui.py +++ b/webui.py @@ -469,9 +469,6 @@ def webui(): startup_timer.record("scripts unloaded callback") initialize_rest(reload_script_modules=True) - modules.sd_hijack.list_optimizers() - startup_timer.record("scripts list_optimizers") - if __name__ == "__main__": if cmd_opts.nowebui: -- cgit v1.2.3 From 1411a6e74b2fa07ecfc2117d774520f957651145 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 6 Jun 2023 00:25:28 +0900 Subject: rework-disable-autolaunch --- modules/launch_utils.py | 1 + webui.py | 5 +---- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'webui.py') diff --git a/modules/launch_utils.py b/modules/launch_utils.py index af8d8b37..59008385 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -247,6 +247,7 @@ def prepare_environment(): try: # the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution os.remove(os.path.join(script_path, "tmp", "restart")) + os.environ.setdefault('SD_WEBUI_DISABLE_AUTOLAUNCH', '1') except OSError: pass diff --git a/webui.py b/webui.py index 254fada3..1ee0e41d 100644 --- a/webui.py +++ b/webui.py @@ -396,7 +396,7 @@ def webui(): ssl_verify=cmd_opts.disable_tls_verify, debug=cmd_opts.gradio_debug, auth=gradio_auth_creds, - inbrowser=cmd_opts.autolaunch, + inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_DISABLE_AUTOLAUNCH') != '1', prevent_thread_lock=True, allowed_paths=cmd_opts.gradio_allowed_path, app_kwargs={ @@ -407,9 +407,6 @@ def webui(): if cmd_opts.add_stop_route: app.add_route("/_stop", stop_route, methods=["POST"]) - # after initial launch, disable --autolaunch for subsequent restarts - cmd_opts.autolaunch = False - startup_timer.record("gradio launch") # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for -- cgit v1.2.3 From eaace155cebeb4e713fc6f232261eeed6b958736 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 6 Jun 2023 02:47:18 +0900 Subject: restore old disable --autolaunch --- webui.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'webui.py') diff --git a/webui.py b/webui.py index 1ee0e41d..58482cf4 100644 --- a/webui.py +++ b/webui.py @@ -407,6 +407,9 @@ def webui(): if cmd_opts.add_stop_route: app.add_route("/_stop", stop_route, methods=["POST"]) + # after initial launch, disable --autolaunch for subsequent restarts + cmd_opts.autolaunch = False + startup_timer.record("gradio launch") # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for -- cgit v1.2.3 From c2808f3040babbb5b9456d15aa2a9354c1c64d23 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 6 Jun 2023 02:52:05 +0900 Subject: SD_WEBUI_RESTARTING --- modules/launch_utils.py | 2 +- webui.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'webui.py') diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 59008385..609a181e 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -247,7 +247,7 @@ def prepare_environment(): try: # the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution os.remove(os.path.join(script_path, "tmp", "restart")) - os.environ.setdefault('SD_WEBUI_DISABLE_AUTOLAUNCH', '1') + os.environ.setdefault('SD_WEBUI_RESTARTING ', '1') except OSError: pass diff --git a/webui.py b/webui.py index 58482cf4..136d036d 100644 --- a/webui.py +++ b/webui.py @@ -396,7 +396,7 @@ def webui(): ssl_verify=cmd_opts.disable_tls_verify, debug=cmd_opts.gradio_debug, auth=gradio_auth_creds, - inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_DISABLE_AUTOLAUNCH') != '1', + inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING ') != '1', prevent_thread_lock=True, allowed_paths=cmd_opts.gradio_allowed_path, app_kwargs={ -- cgit v1.2.3