diff options
Diffstat (limited to 'modules')
-rw-r--r-- | modules/api/api.py | 89 | ||||
-rw-r--r-- | modules/api/models.py | 77 | ||||
-rw-r--r-- | modules/extensions.py | 7 | ||||
-rw-r--r-- | modules/extras.py | 5 | ||||
-rw-r--r-- | modules/processing.py | 11 | ||||
-rw-r--r-- | modules/script_callbacks.py | 69 | ||||
-rw-r--r-- | modules/scripts.py | 22 | ||||
-rw-r--r-- | modules/sd_models.py | 21 | ||||
-rw-r--r-- | modules/shared.py | 34 | ||||
-rw-r--r-- | modules/ui.py | 11 | ||||
-rw-r--r-- | modules/ui_extensions.py | 2 | ||||
-rw-r--r-- | modules/upscaler.py | 12 |
12 files changed, 282 insertions, 78 deletions
diff --git a/modules/api/api.py b/modules/api/api.py index 71c9c160..8a7ab2f5 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -2,14 +2,17 @@ import base64 import io import time import uvicorn -from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image -from fastapi import APIRouter, Depends, HTTPException +from threading import Lock +from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image +from fastapi import APIRouter, Depends, FastAPI, HTTPException import modules.shared as shared from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid +from modules.sd_samplers import all_samplers from modules.extras import run_extras, run_pnginfo - +from modules.sd_models import checkpoints_list +from modules.realesrgan_model import get_realesrgan_models +from typing import List def upscaler_to_index(name: str): try: @@ -37,7 +40,7 @@ def encode_pil_to_base64(image): class Api: - def __init__(self, app, queue_lock): + def __init__(self, app: FastAPI, queue_lock: Lock): self.router = APIRouter() self.app = app self.queue_lock = queue_lock @@ -48,6 +51,18 @@ class Api: self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel) + self.app.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel) + self.app.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem]) + self.app.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem]) + self.app.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem]) + self.app.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem]) + self.app.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem]) + self.app.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem]) + self.app.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem]) + self.app.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str]) + self.app.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -190,6 +205,70 @@ class Api: shared.state.interrupt() return {} + + def get_config(self): + options = {} + for key in shared.opts.data.keys(): + metadata = shared.opts.data_labels.get(key) + if(metadata is not None): + options.update({key: shared.opts.data.get(key, shared.opts.data_labels.get(key).default)}) + else: + options.update({key: shared.opts.data.get(key, None)}) + + return options + + def set_config(self, req: OptionsModel): + # currently req has all options fields even if you send a dict like { "send_seed": false }, which means it will + # overwrite all options with default values. + raise RuntimeError('Setting options via API is not supported') + + reqDict = vars(req) + for o in reqDict: + setattr(shared.opts, o, reqDict[o]) + + shared.opts.save(shared.config_filename) + return + + def get_cmd_flags(self): + return vars(shared.cmd_opts) + + def get_samplers(self): + return [{"name":sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in all_samplers] + + def get_upscalers(self): + upscalers = [] + + for upscaler in shared.sd_upscalers: + u = upscaler.scaler + upscalers.append({"name":u.name, "model_name":u.model_name, "model_path":u.model_path, "model_url":u.model_url}) + + return upscalers + + def get_sd_models(self): + return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": x.config} for x in checkpoints_list.values()] + + def get_hypernetworks(self): + return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks] + + def get_face_restorers(self): + return [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers] + + def get_realesrgan_models(self): + return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)] + + def get_promp_styles(self): + styleList = [] + for k in shared.prompt_styles.styles: + style = shared.prompt_styles.styles[k] + styleList.append({"name":style[0], "prompt": style[1], "negative_prompr": style[2]}) + + return styleList + + def get_artists_categories(self): + return shared.artist_db.cats + + def get_artists(self): + return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists] def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 68fb45c6..a44c5ddd 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,11 +1,11 @@ import inspect -from click import prompt from pydantic import BaseModel, Field, create_model from typing import Any, Optional from typing_extensions import Literal from inflection import underscore from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img -from modules.shared import sd_upscalers +from modules.shared import sd_upscalers, opts, parser +from typing import List API_NOT_ALLOWED = [ "self", @@ -109,12 +109,12 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( ).generate_model() class TextToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: dict info: str class ImageToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: dict info: str @@ -147,10 +147,10 @@ class FileData(BaseModel): name: str = Field(title="File name") class ExtrasBatchImagesRequest(ExtrasBaseRequest): - imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") + imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): - images: list[str] = Field(title="Images", description="The generated images in base64 format.") + images: List[str] = Field(title="Images", description="The generated images in base64 format.") class PNGInfoRequest(BaseModel): image: str = Field(title="Image", description="The base64 encoded PNG image") @@ -166,3 +166,68 @@ class ProgressResponse(BaseModel): eta_relative: float = Field(title="ETA in secs") state: dict = Field(title="State", description="The current state snapshot") current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.") + +fields = {} +for key, value in opts.data.items(): + metadata = opts.data_labels.get(key) + optType = opts.typemap.get(type(value), type(value)) + + if (metadata is not None): + fields.update({key: (Optional[optType], Field( + default=metadata.default ,description=metadata.label))}) + else: + fields.update({key: (Optional[optType], Field())}) + +OptionsModel = create_model("Options", **fields) + +flags = {} +_options = vars(parser)['_option_string_actions'] +for key in _options: + if(_options[key].dest != 'help'): + flag = _options[key] + _type = str + if _options[key].default is not None: _type = type(_options[key].default) + flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))}) + +FlagsModel = create_model("Flags", **flags) + +class SamplerItem(BaseModel): + name: str = Field(title="Name") + aliases: list[str] = Field(title="Aliases") + options: dict[str, str] = Field(title="Options") + +class UpscalerItem(BaseModel): + name: str = Field(title="Name") + model_name: Optional[str] = Field(title="Model Name") + model_path: Optional[str] = Field(title="Path") + model_url: Optional[str] = Field(title="URL") + +class SDModelItem(BaseModel): + title: str = Field(title="Title") + model_name: str = Field(title="Model Name") + hash: str = Field(title="Hash") + filename: str = Field(title="Filename") + config: str = Field(title="Config file") + +class HypernetworkItem(BaseModel): + name: str = Field(title="Name") + path: Optional[str] = Field(title="Path") + +class FaceRestorerItem(BaseModel): + name: str = Field(title="Name") + cmd_dir: Optional[str] = Field(title="Path") + +class RealesrganItem(BaseModel): + name: str = Field(title="Name") + path: Optional[str] = Field(title="Path") + scale: Optional[int] = Field(title="Scale") + +class PromptStyleItem(BaseModel): + name: str = Field(title="Name") + prompt: Optional[str] = Field(title="Prompt") + negative_prompt: Optional[str] = Field(title="Negative Prompt") + +class ArtistItem(BaseModel): + name: str = Field(title="Name") + score: float = Field(title="Score") + category: str = Field(title="Category")
\ No newline at end of file diff --git a/modules/extensions.py b/modules/extensions.py index 897af96e..8e0977fd 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -34,8 +34,11 @@ class Extension: if repo is None or repo.bare:
self.remote = None
else:
- self.remote = next(repo.remote().urls, None)
- self.status = 'unknown'
+ try:
+ self.remote = next(repo.remote().urls, None)
+ self.status = 'unknown'
+ except Exception:
+ self.remote = None
def list_files(self, subdir, extension):
from modules import scripts
diff --git a/modules/extras.py b/modules/extras.py index 8e2ab35c..71b93a06 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -136,12 +136,13 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ def run_upscalers_blend(params: List[UpscaleParams], image: Image.Image, info: str) -> Tuple[Image.Image, str]:
blended_result: Image.Image = None
+ image_hash: str = hash(np.array(image.getdata()).tobytes())
for upscaler in params:
upscale_args = (upscaler.upscaler_idx, upscaling_resize, resize_mode,
upscaling_resize_w, upscaling_resize_h, upscaling_crop)
- cache_key = LruCache.Key(image_hash=hash(np.array(image.getdata()).tobytes()),
+ cache_key = LruCache.Key(image_hash=image_hash,
info_hash=hash(info),
- args_hash=hash((upscale_args, upscale_first)))
+ args_hash=hash(upscale_args))
cached_entry = cached_images.get(cache_key)
if cached_entry is None:
res = upscale(image, *upscale_args)
diff --git a/modules/processing.py b/modules/processing.py index a46e592d..03c9143d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -501,6 +501,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if len(prompts) == 0:
break
+ if p.scripts is not None:
+ p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
+
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
@@ -665,17 +668,17 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
if opts.use_scale_latent_for_hires_fix:
+ for i in range(samples.shape[0]):
+ save_intermediate(samples, i)
+
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
-
+
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
else:
image_conditioning = self.txt2img_image_conditioning(samples)
-
- for i in range(samples.shape[0]):
- save_intermediate(samples, i)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index c28e220e..74dfb880 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -46,25 +46,23 @@ class CFGDenoiserParams: ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
-callbacks_app_started = []
-callbacks_model_loaded = []
-callbacks_ui_tabs = []
-callbacks_ui_settings = []
-callbacks_before_image_saved = []
-callbacks_image_saved = []
-callbacks_cfg_denoiser = []
+callback_map = dict(
+ callbacks_app_started=[],
+ callbacks_model_loaded=[],
+ callbacks_ui_tabs=[],
+ callbacks_ui_settings=[],
+ callbacks_before_image_saved=[],
+ callbacks_image_saved=[],
+ callbacks_cfg_denoiser=[]
+)
def clear_callbacks():
- callbacks_model_loaded.clear()
- callbacks_ui_tabs.clear()
- callbacks_ui_settings.clear()
- callbacks_before_image_saved.clear()
- callbacks_image_saved.clear()
- callbacks_cfg_denoiser.clear()
+ for callback_list in callback_map.values():
+ callback_list.clear()
def app_started_callback(demo: Optional[Blocks], app: FastAPI):
- for c in callbacks_app_started:
+ for c in callback_map['callbacks_app_started']:
try:
c.callback(demo, app)
except Exception:
@@ -72,7 +70,7 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI): def model_loaded_callback(sd_model):
- for c in callbacks_model_loaded:
+ for c in callback_map['callbacks_model_loaded']:
try:
c.callback(sd_model)
except Exception:
@@ -82,7 +80,7 @@ def model_loaded_callback(sd_model): def ui_tabs_callback():
res = []
- for c in callbacks_ui_tabs:
+ for c in callback_map['callbacks_ui_tabs']:
try:
res += c.callback() or []
except Exception:
@@ -92,7 +90,7 @@ def ui_tabs_callback(): def ui_settings_callback():
- for c in callbacks_ui_settings:
+ for c in callback_map['callbacks_ui_settings']:
try:
c.callback()
except Exception:
@@ -100,7 +98,7 @@ def ui_settings_callback(): def before_image_saved_callback(params: ImageSaveParams):
- for c in callbacks_before_image_saved:
+ for c in callback_map['callbacks_before_image_saved']:
try:
c.callback(params)
except Exception:
@@ -108,7 +106,7 @@ def before_image_saved_callback(params: ImageSaveParams): def image_saved_callback(params: ImageSaveParams):
- for c in callbacks_image_saved:
+ for c in callback_map['callbacks_image_saved']:
try:
c.callback(params)
except Exception:
@@ -116,7 +114,7 @@ def image_saved_callback(params: ImageSaveParams): def cfg_denoiser_callback(params: CFGDenoiserParams):
- for c in callbacks_cfg_denoiser:
+ for c in callback_map['callbacks_cfg_denoiser']:
try:
c.callback(params)
except Exception:
@@ -129,17 +127,33 @@ def add_callback(callbacks, fun): callbacks.append(ScriptCallback(filename, fun))
+
+def remove_current_script_callbacks():
+ stack = [x for x in inspect.stack() if x.filename != __file__]
+ filename = stack[0].filename if len(stack) > 0 else 'unknown file'
+ if filename == 'unknown file':
+ return
+ for callback_list in callback_map.values():
+ for callback_to_remove in [cb for cb in callback_list if cb.script == filename]:
+ callback_list.remove(callback_to_remove)
+
+
+def remove_callbacks_for_function(callback_func):
+ for callback_list in callback_map.values():
+ for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]:
+ callback_list.remove(callback_to_remove)
+
def on_app_started(callback):
"""register a function to be called when the webui started, the gradio `Block` component and
fastapi `FastAPI` object are passed as the arguments"""
- add_callback(callbacks_app_started, callback)
+ add_callback(callback_map['callbacks_app_started'], callback)
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
passed as an argument"""
- add_callback(callbacks_model_loaded, callback)
+ add_callback(callback_map['callbacks_model_loaded'], callback)
def on_ui_tabs(callback):
@@ -152,13 +166,13 @@ def on_ui_tabs(callback): title is tab text displayed to user in the UI
elem_id is HTML id for the tab
"""
- add_callback(callbacks_ui_tabs, callback)
+ add_callback(callback_map['callbacks_ui_tabs'], callback)
def on_ui_settings(callback):
"""register a function to be called before UI settings are populated; add your settings
by using shared.opts.add_option(shared.OptionInfo(...)) """
- add_callback(callbacks_ui_settings, callback)
+ add_callback(callback_map['callbacks_ui_settings'], callback)
def on_before_image_saved(callback):
@@ -166,7 +180,7 @@ def on_before_image_saved(callback): The callback is called with one argument:
- params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object.
"""
- add_callback(callbacks_before_image_saved, callback)
+ add_callback(callback_map['callbacks_before_image_saved'], callback)
def on_image_saved(callback):
@@ -174,7 +188,7 @@ def on_image_saved(callback): The callback is called with one argument:
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
"""
- add_callback(callbacks_image_saved, callback)
+ add_callback(callback_map['callbacks_image_saved'], callback)
def on_cfg_denoiser(callback):
@@ -182,5 +196,4 @@ def on_cfg_denoiser(callback): The callback is called with one argument:
- params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
"""
- add_callback(callbacks_cfg_denoiser, callback)
-
+ add_callback(callback_map['callbacks_cfg_denoiser'], callback)
diff --git a/modules/scripts.py b/modules/scripts.py index 28ce07f4..366c90d7 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -73,6 +73,19 @@ class Script: pass
+ def process_batch(self, p, *args, **kwargs):
+ """
+ Same as process(), but called for every batch.
+
+ **kwargs will have those items:
+ - batch_number - index of current batch, from 0 to number of batches-1
+ - prompts - list of prompts for current batch; you can change contents of this list but changing the number of entries will likely break things
+ - seeds - list of seeds for current batch
+ - subseeds - list of subseeds for current batch
+ """
+
+ pass
+
def postprocess(self, p, processed, *args):
"""
This function is called after processing ends for AlwaysVisible scripts.
@@ -296,6 +309,15 @@ class ScriptRunner: print(f"Error running process: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ def process_batch(self, p, **kwargs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.process_batch(p, *script_args, **kwargs)
+ except Exception:
+ print(f"Error running process_batch: {script.filename}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+
def postprocess(self, p, processed):
for script in self.alwayson_scripts:
try:
diff --git a/modules/sd_models.py b/modules/sd_models.py index ae427a5c..34c57bfa 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -163,11 +163,13 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
- vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
+ if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"):
+ sd_vae.restore_base_vae(model)
+ checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()
- checkpoint_key = checkpoint_info
+ vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
- if checkpoint_key not in checkpoints_loaded:
+ if checkpoint_info not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
@@ -197,18 +199,15 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): model.first_stage_model.to(devices.dtype_vae)
- if shared.opts.sd_checkpoint_cache > 0:
- # if PR #4035 were to get merged, restore base VAE first before caching
- checkpoints_loaded[checkpoint_key] = model.state_dict().copy()
- while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
- checkpoints_loaded.popitem(last=False) # LRU
-
else:
vae_name = sd_vae.get_filename(vae_file) if vae_file else None
vae_message = f" with {vae_name} VAE" if vae_name else ""
print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
- checkpoints_loaded.move_to_end(checkpoint_key)
- model.load_state_dict(checkpoints_loaded[checkpoint_key])
+ model.load_state_dict(checkpoints_loaded[checkpoint_info])
+
+ if shared.opts.sd_checkpoint_cache > 0:
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
diff --git a/modules/shared.py b/modules/shared.py index 6e7a02e0..71587557 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -44,6 +44,7 @@ parser.add_argument("--precision", type=str, help="evaluate at this precision", parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
+parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
@@ -85,6 +86,9 @@ parser.add_argument("--nowebui", action='store_true', help="use api=True to laun parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
+parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
+parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
+parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
cmd_opts = parser.parse_args()
restricted_opts = {
@@ -99,7 +103,7 @@ restricted_opts = { "outdir_save",
}
-cmd_opts.disable_extension_access = cmd_opts.share or cmd_opts.listen
+cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
@@ -146,6 +150,9 @@ class State: self.interrupted = True
def nextjob(self):
+ if opts.show_progress_every_n_steps == -1:
+ self.do_set_current_image()
+
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
@@ -186,17 +193,21 @@ class State: """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
+ if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0:
+ self.do_set_current_image()
+
+ def do_set_current_image(self):
if not parallel_processing_allowed:
return
+ if self.current_latent is None:
+ return
+
+ if opts.show_progress_grid:
+ self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
+ else:
+ self.current_image = sd_samplers.sample_to_image(self.current_latent)
- if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and self.current_latent is not None:
- if opts.show_progress_grid:
- self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
- else:
- self.current_image = sd_samplers.sample_to_image(self.current_latent)
-
- self.current_image_sampling_step = self.sampling_step
-
+ self.current_image_sampling_step = self.sampling_step
state = State()
@@ -352,7 +363,7 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
- "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
+ "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set to 0 to disable. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
"show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
@@ -399,7 +410,8 @@ class Options: if key in self.data or key in self.data_labels:
assert not cmd_opts.freeze_settings, "changing settings is disabled"
- comp_args = opts.data_labels[key].component_args
+ info = opts.data_labels.get(key, None)
+ comp_args = info.component_args if info else None
if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
raise RuntimeError(f"not possible to set {key} because it is restricted")
diff --git a/modules/ui.py b/modules/ui.py index 633b56ef..4c2829af 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -276,7 +276,7 @@ def check_progress_call(id_part): image = gr_show(False)
preview_visibility = gr_show(False)
- if opts.show_progress_every_n_steps > 0:
+ if opts.show_progress_every_n_steps != 0:
shared.state.set_current_image()
image = shared.state.current_image
@@ -1439,8 +1439,7 @@ def create_ui(wrap_gradio_gpu_call): changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
- if comp != dummy_component and not opts.same_type(value, opts.data_labels[key].default):
- return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}", opts.dumpjson()
+ assert comp == dummy_component or opts.same_type(value, opts.data_labels[key].default), f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if comp == dummy_component:
@@ -1458,7 +1457,7 @@ def create_ui(wrap_gradio_gpu_call): opts.save(shared.config_filename)
- return f'{changed} settings changed.', opts.dumpjson()
+ return opts.dumpjson(), f'{changed} settings changed.'
def run_settings_single(value, key):
if not opts.same_type(value, opts.data_labels[key].default):
@@ -1622,9 +1621,9 @@ def create_ui(wrap_gradio_gpu_call): text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
- fn=run_settings,
+ fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]),
inputs=components,
- outputs=[result, text_settings],
+ outputs=[text_settings, result],
)
for i, k, item in quicksettings_list:
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index ab807722..a81de9a7 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -86,7 +86,7 @@ def extension_table(): code += f"""
<tr>
<td><label><input class="gr-check-radio gr-checkbox" name="enable_{html.escape(ext.name)}" type="checkbox" {'checked="checked"' if ext.enabled else ''}>{html.escape(ext.name)}</label></td>
- <td><a href="{html.escape(ext.remote or '')}">{html.escape(ext.remote or '')}</a></td>
+ <td><a href="{html.escape(ext.remote or '')}" target="_blank">{html.escape(ext.remote or '')}</a></td>
<td{' class="extension_status"' if ext.remote is not None else ''}>{ext_status}</td>
</tr>
"""
diff --git a/modules/upscaler.py b/modules/upscaler.py index 83fde7ca..c4e6e6bd 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -57,10 +57,18 @@ class Upscaler: self.scale = scale dest_w = img.width * scale dest_h = img.height * scale + for i in range(3): - if img.width > dest_w and img.height > dest_h: - break + shape = (img.width, img.height) + img = self.do_upscale(img, selected_model) + + if shape == (img.width, img.height): + break + + if img.width >= dest_w and img.height >= dest_h: + break + if img.width != dest_w or img.height != dest_h: img = img.resize((int(dest_w), int(dest_h)), resample=LANCZOS) |