From efe98ca0900bdf1098a5a957f576b86e4ddebbea Mon Sep 17 00:00:00 2001 From: Acncagua Slt Date: Wed, 3 May 2023 00:44:16 +0900 Subject: Initialize the upscalers Add modelloader.load_upscalers to def initialize() --- webui.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'webui.py') diff --git a/webui.py b/webui.py index 357bf4c1..dd385a38 100644 --- a/webui.py +++ b/webui.py @@ -185,6 +185,9 @@ def initialize(): modules.scripts.load_scripts() startup_timer.record("load scripts") + modelloader.load_upscalers() + #startup_timer.record("load upscalers") #Is this necessary? I don't know. + modules.sd_vae.refresh_vae_list() startup_timer.record("refresh VAE") -- cgit v1.2.3 From 5427b7128db9656038bebc0a0240ee122b98e6b9 Mon Sep 17 00:00:00 2001 From: mouhao Date: Sun, 7 May 2023 20:54:48 +0800 Subject: Update webui.py Fix missing /docs endpoint in newer gradio versions Newer versions of gradio (>=3.27.1) have removed the /docs endpoint by default. This commit adds it back to enable accessing the API documentation. --- webui.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'webui.py') diff --git a/webui.py b/webui.py index 357bf4c1..a3dfdcee 100644 --- a/webui.py +++ b/webui.py @@ -286,6 +286,11 @@ def api_only(): print(f"Startup time: {startup_timer.summary()}.") api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861) +# patch in url for api docs +def my_setup(self): + self.docs_url = "/docs" + self.redoc_url = "/redoc" + self.orig_setup() def webui(): launch_api = cmd_opts.api @@ -313,6 +318,9 @@ def webui(): for line in file.readlines(): gradio_auth_creds += [x.strip() for x in line.split(',') if x.strip()] + if launch_api: + FastAPI.orig_setup = FastAPI.setup + setattr(FastAPI, "setup", my_setup) app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, server_name=server_name, -- cgit v1.2.3 From 160780283a991f56b5671402f2cd3830f8f2a66b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 07:57:17 +0300 Subject: put all code for /docs in same place and make it work properly with UI reloads --- webui.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'webui.py') diff --git a/webui.py b/webui.py index 8ffb0844..3ecc3f07 100644 --- a/webui.py +++ b/webui.py @@ -280,12 +280,6 @@ def api_only(): print(f"Startup time: {startup_timer.summary()}.") api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861) -# patch in url for api docs -def my_setup(self): - self.docs_url = "/docs" - self.redoc_url = "/redoc" - self.orig_setup() - def webui(): launch_api = cmd_opts.api initialize() @@ -312,9 +306,16 @@ def webui(): for line in file.readlines(): gradio_auth_creds += [x.strip() for x in line.split(',') if x.strip()] - if launch_api: - FastAPI.orig_setup = FastAPI.setup - setattr(FastAPI, "setup", my_setup) + # this restores the missing /docs endpoint + if launch_api and not hasattr(FastAPI, 'original_setup'): + def fastapi_setup(self): + self.docs_url = "/docs" + self.redoc_url = "/redoc" + self.original_setup() + + FastAPI.original_setup = FastAPI.setup + FastAPI.setup = fastapi_setup + app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, server_name=server_name, -- cgit v1.2.3 From 2b96a7b694d3392f76940dfe5df895a2833400fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 16:46:35 +0300 Subject: add links to wiki for filename pattern settings add extended info for quicksettings setting --- javascript/ui_settings_hints.js | 41 +++++++++++++++++++++++++++++++++++++++++ modules/ui.py | 14 ++++++++++++++ style.css | 16 ++++++++++++++++ webui.py | 1 + 4 files changed, 72 insertions(+) create mode 100644 javascript/ui_settings_hints.js (limited to 'webui.py') diff --git a/javascript/ui_settings_hints.js b/javascript/ui_settings_hints.js new file mode 100644 index 00000000..87a289d3 --- /dev/null +++ b/javascript/ui_settings_hints.js @@ -0,0 +1,41 @@ +// various hints and extra info for the settings tab + +onUiLoaded(function(){ + createLink = function(elem_id, text, href){ + var a = document.createElement('A') + a.textContent = text + a.target = '_blank'; + + elem = gradioApp().querySelector('#'+elem_id) + elem.insertBefore(a, elem.querySelector('label')) + + return a + } + + createLink("setting_samples_filename_pattern", "[wiki] ").href = "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory" + createLink("setting_directories_filename_pattern", "[wiki] ").href = "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory" + + createLink("setting_quicksettings_list", "[info] ").addEventListener("click", function(event){ + requestGet("./internal/quicksettings-hint", {}, function(data){ + var table = document.createElement('table') + table.className = 'settings-value-table' + + data.forEach(function(obj){ + var tr = document.createElement('tr') + var td = document.createElement('td') + td.textContent = obj.name + tr.appendChild(td) + + var td = document.createElement('td') + td.textContent = obj.label + tr.appendChild(td) + + table.appendChild(tr) + }) + + popup(table); + }) + }); +}) + + diff --git a/modules/ui.py b/modules/ui.py index 883d37e7..842c57f7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1944,3 +1944,17 @@ gradio: {gr.__version__}  •  checkpoint: N/A """ + + +def setup_ui_api(app): + from pydantic import BaseModel, Field + from typing import List + + class QuicksettingsHint(BaseModel): + name: str = Field(title="Name of the quicksettings field") + label: str = Field(title="Label of the quicksettings field") + + def quicksettings_hint(): + return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] + + app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) diff --git a/style.css b/style.css index 57ddba0e..b823c7dd 100644 --- a/style.css +++ b/style.css @@ -125,6 +125,10 @@ div.gradio-html.min{ text-decoration: none; } +a{ + font-weight: bold; + cursor: pointer; +} /* general styled components */ @@ -397,6 +401,18 @@ div#extras_scale_to_tab div.form{ margin: 0 1.2em; } +table.settings-value-table{ + background: white; + border-collapse: collapse; + margin: 1em; + border: 4px solid white; +} + +table.settings-value-table td{ + padding: 0.4em; + border: 1px solid #ccc; + max-width: 36em; +} /* live preview */ .progressDiv{ diff --git a/webui.py b/webui.py index f770db54..727ebd31 100644 --- a/webui.py +++ b/webui.py @@ -345,6 +345,7 @@ def webui(): setup_middleware(app) modules.progress.setup_progress_api(app) + modules.ui.setup_ui_api(app) if launch_api: create_api(app) -- cgit v1.2.3 From 762265eab58cdb8f2d6398769bab43d8b8db0075 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 07:52:45 +0300 Subject: autofixes from ruff --- extensions-builtin/LDSR/ldsr_model_arch.py | 1 - extensions-builtin/LDSR/sd_hijack_autoencoder.py | 2 +- modules/api/api.py | 14 +++++++------- modules/extras.py | 4 ++-- modules/images.py | 4 ++-- modules/img2img.py | 2 +- modules/prompt_parser.py | 2 +- modules/realesrgan_model.py | 2 +- modules/sd_disable_initialization.py | 2 +- modules/sd_hijack.py | 4 ++-- modules/sd_hijack_ip2p.py | 2 +- modules/sd_hijack_optimizations.py | 1 - modules/sd_models.py | 6 +++--- modules/textual_inversion/textual_inversion.py | 2 +- modules/ui.py | 13 ++++++------- modules/ui_extensions.py | 2 +- modules/ui_extra_networks.py | 2 +- pyproject.toml | 4 +++- scripts/outpainting_mk_2.py | 2 +- scripts/postprocessing_upscale.py | 6 +++--- scripts/xyz_grid.py | 2 +- webui.py | 2 +- 22 files changed, 40 insertions(+), 41 deletions(-) (limited to 'webui.py') diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index bc11cc6e..2339de7f 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -110,7 +110,6 @@ class LDSR: diffusion_steps = int(steps) eta = 1.0 - down_sample_method = 'Lanczos' gc.collect() if torch.cuda.is_available: diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py index 8e03c7f8..db2231dd 100644 --- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py @@ -165,7 +165,7 @@ class VQModel(pl.LightningModule): def validation_step(self, batch, batch_idx): log_dict = self._validation_step(batch, batch_idx) with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") + self._validation_step(batch, batch_idx, suffix="_ema") return log_dict def _validation_step(self, batch, batch_idx, suffix=""): diff --git a/modules/api/api.py b/modules/api/api.py index 9bb95dfd..d47c39fc 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -60,7 +60,7 @@ def decode_base64_to_image(encoding): try: image = Image.open(BytesIO(base64.b64decode(encoding))) return image - except Exception as err: + except Exception: raise HTTPException(status_code=500, detail="Invalid encoded image") def encode_pil_to_base64(image): @@ -264,11 +264,11 @@ class Api: if request.alwayson_scripts and (len(request.alwayson_scripts) > 0): for alwayson_script_name in request.alwayson_scripts.keys(): alwayson_script = self.get_script(alwayson_script_name, script_runner) - if alwayson_script == None: + if alwayson_script is None: raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found") # Selectable script in always on script param check - if alwayson_script.alwayson == False: - raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params") + if alwayson_script.alwayson is False: + raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params") # always on script with no arg should always run so you don't really need to add them to the requests if "args" in request.alwayson_scripts[alwayson_script_name]: # min between arg length in scriptrunner and arg length in the request @@ -310,7 +310,7 @@ class Api: p.outpath_samples = opts.outdir_txt2img_samples shared.state.begin() - if selectable_scripts != None: + if selectable_scripts is not None: p.script_args = script_args processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here else: @@ -367,7 +367,7 @@ class Api: p.outpath_samples = opts.outdir_img2img_samples shared.state.begin() - if selectable_scripts != None: + if selectable_scripts is not None: p.script_args = script_args processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here else: @@ -642,7 +642,7 @@ class Api: sd_hijack.apply_optimizations() shared.state.end() return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") - except AssertionError as msg: + except AssertionError: shared.state.end() return TrainResponse(info=f"train embedding error: {error}") diff --git a/modules/extras.py b/modules/extras.py index ff4e9c4e..eb4f0b42 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -136,14 +136,14 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ result_is_instruct_pix2pix_model = False if theta_func2: - shared.state.textinfo = f"Loading B" + shared.state.textinfo = "Loading B" print(f"Loading {secondary_model_info.filename}...") theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu') else: theta_1 = None if theta_func1: - shared.state.textinfo = f"Loading C" + shared.state.textinfo = "Loading C" print(f"Loading {tertiary_model_info.filename}...") theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu') diff --git a/modules/images.py b/modules/images.py index a41965ab..3d5d76cc 100644 --- a/modules/images.py +++ b/modules/images.py @@ -409,13 +409,13 @@ class FilenameGenerator: time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format try: time_zone = pytz.timezone(args[1]) if len(args) > 1 else None - except pytz.exceptions.UnknownTimeZoneError as _: + except pytz.exceptions.UnknownTimeZoneError: time_zone = None time_zone_time = time_datetime.astimezone(time_zone) try: formatted_time = time_zone_time.strftime(time_format) - except (ValueError, TypeError) as _: + except (ValueError, TypeError): formatted_time = time_zone_time.strftime(self.default_time_format) return sanitize_filename_part(formatted_time, replace_spaces=False) diff --git a/modules/img2img.py b/modules/img2img.py index 9fc3a698..cdae301a 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -59,7 +59,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): # try to find corresponding mask for an image using simple filename matching mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image)) # if not found use first one ("same mask for all images" use-case) - if not mask_image_path in inpaint_masks: + if mask_image_path not in inpaint_masks: mask_image_path = inpaint_masks[0] mask_image = Image.open(mask_image_path) p.image_mask = mask_image diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 69665372..e084e948 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -92,7 +92,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): def get_schedule(prompt): try: tree = schedule_parser.parse(prompt) - except lark.exceptions.LarkError as e: + except lark.exceptions.LarkError: if 0: import traceback traceback.print_exc() diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index efd7fca5..9ec1adf2 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -134,6 +134,6 @@ def get_realesrgan_models(scaler): ), ] return models - except Exception as e: + except Exception: print("Error making Real-ESRGAN models list:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py index c4a09d15..9fc89dc6 100644 --- a/modules/sd_disable_initialization.py +++ b/modules/sd_disable_initialization.py @@ -61,7 +61,7 @@ class DisableInitialization: if res is None: res = original(url, *args, local_files_only=False, **kwargs) return res - except Exception as e: + except Exception: return original(url, *args, local_files_only=False, **kwargs) def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs): diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index f4bb0266..d8135211 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -118,7 +118,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): try: #Delete temporary weights if appended del sd_model._custom_loss_weight - except AttributeError as e: + except AttributeError: pass #If we have an old loss function, reset the loss function to the original one @@ -133,7 +133,7 @@ def apply_weighted_forward(sd_model): def undo_weighted_forward(sd_model): try: del sd_model.weighted_forward - except AttributeError as e: + except AttributeError: pass diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 3c727d3b..41ed54a2 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -10,4 +10,4 @@ def should_hijack_ip2p(checkpoint_info): ckpt_basename = os.path.basename(checkpoint_info.filename).lower() cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower() - return "pix2pix" in ckpt_basename and not "pix2pix" in cfg_basename + return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index f10865cd..b623d53d 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -296,7 +296,6 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_ if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: # the big matmul fits into our memory limit; do everything in 1 chunk, # i.e. send it down the unchunked fast-path - query_chunk_size = q_tokens kv_chunk_size = k_tokens with devices.without_autocast(disable=q.dtype == v.dtype): diff --git a/modules/sd_models.py b/modules/sd_models.py index 36f643e1..11c1a344 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -239,7 +239,7 @@ def read_metadata_from_safetensors(filename): if isinstance(v, str) and v[0:1] == '{': try: res[k] = json.loads(v) - except Exception as e: + except Exception: pass return res @@ -467,7 +467,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): try: with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd): sd_model = instantiate_from_config(sd_config.model) - except Exception as e: + except Exception: pass if sd_model is None: @@ -544,7 +544,7 @@ def reload_model_weights(sd_model=None, info=None): try: load_model_weights(sd_model, checkpoint_info, state_dict, timer) - except Exception as e: + except Exception: print("Failed to load checkpoint, restoring previous") load_model_weights(sd_model, current_checkpoint_info, None, timer) raise diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 4368eb63..f753b75f 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -603,7 +603,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st try: vectorSize = list(data['string_to_param'].values())[0].shape[0] - except Exception as e: + except Exception: vectorSize = '?' checkpoint = sd_models.select_checkpoint() diff --git a/modules/ui.py b/modules/ui.py index d02f6e82..2171f3aa 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -246,7 +246,7 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: all_seeds = gen_info.get('all_seeds', [-1]) res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - except json.decoder.JSONDecodeError as e: + except json.decoder.JSONDecodeError: if gen_info_string != '': print("Error parsing JSON generation info:", file=sys.stderr) print(gen_info_string, file=sys.stderr) @@ -736,8 +736,8 @@ def create_ui(): with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' gr.HTML( - f"

Process images in a directory on the same machine where the server is running." + - f"
Use an empty output directory to save pictures normally instead of writing to the output directory." + + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + f"
Add inpaint batch mask directory to enable inpaint batch processing." f"{hidden}

" ) @@ -746,7 +746,6 @@ def create_ui(): img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - img2img_image_inputs = [init_img, sketch, init_img_with_mask, inpaint_color_sketch] for i, tab in enumerate(img2img_tabs): tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) @@ -1290,8 +1289,8 @@ def create_ui(): with gr.Column(elem_id='ti_gallery_container'): ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - ti_progress = gr.HTML(elem_id="ti_progress", value="") + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.HTML(elem_id="ti_progress", value="") ti_outcome = gr.HTML(elem_id="ti_error", value="") create_embedding.click( @@ -1668,7 +1667,7 @@ def create_ui(): interface.render() if os.path.exists(os.path.join(script_path, "notification.mp3")): - audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) footer = shared.html("footer.html") footer = footer.format(versions=versions_html()) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index d9faf85a..ed70abe5 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -490,7 +490,7 @@ def create_ui(): config_states.list_config_states() with gr.Blocks(analytics_enabled=False) as ui: - with gr.Tabs(elem_id="tabs_extensions") as tabs: + with gr.Tabs(elem_id="tabs_extensions"): with gr.TabItem("Installed", id="installed"): with gr.Row(elem_id="extensions_installed_top"): diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 8c3dea56..49e06289 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -263,7 +263,7 @@ def create_ui(container, button, tabname): ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy()) ui.tabname = tabname - with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: + with gr.Tabs(elem_id=tabname+"_extra_tabs"): for page in ui.stored_extra_pages: page_id = page.title.lower().replace(" ", "_") diff --git a/pyproject.toml b/pyproject.toml index 9e9662ad..1e164abc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,9 @@ ignore = [ "E501", - "E731" + "E731", + "E402", # Module level import not at top of file + "F401" # Module imported but unused ] exclude = ["extensions"] diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 670bb8ac..b10fed6c 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -72,7 +72,7 @@ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.0 height = _np_src_image.shape[1] num_channels = _np_src_image.shape[2] - np_src_image = _np_src_image[:] * (1. - np_mask_rgb) + _np_src_image[:] * (1. - np_mask_rgb) np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.) img_mask = np_mask_grey > 1e-6 ref_mask = np_mask_grey < 1e-3 diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index ef1186ac..edb70ac0 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -98,13 +98,13 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}' upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) - pp.info[f"Postprocess upscaler"] = upscaler1.name + pp.info["Postprocess upscaler"] = upscaler1.name if upscaler2 and upscaler_2_visibility > 0: second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility) - pp.info[f"Postprocess upscaler 2"] = upscaler2.name + pp.info["Postprocess upscaler 2"] = upscaler2.name pp.image = upscaled_image @@ -134,4 +134,4 @@ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale): assert upscaler1, f'could not find upscaler named {upscaler_name}' pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False) - pp.info[f"Postprocess upscaler"] = upscaler1.name + pp.info["Postprocess upscaler"] = upscaler1.name diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index a725d74a..2ff42ef8 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -316,7 +316,7 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend return Processed(p, []) z_count = len(zs) - sub_grids = [None] * z_count + for i in range(z_count): start_index = (i * len(xs) * len(ys)) + i end_index = start_index + len(xs) * len(ys) diff --git a/webui.py b/webui.py index 727ebd31..ec3d2aba 100644 --- a/webui.py +++ b/webui.py @@ -360,7 +360,7 @@ def webui(): if cmd_opts.subpath: redirector = FastAPI() redirector.get("/") - mounted_app = gradio.mount_gradio_app(redirector, shared.demo, path=f"/{cmd_opts.subpath}") + gradio.mount_gradio_app(redirector, shared.demo, path=f"/{cmd_opts.subpath}") wait_on_server(shared.demo) print('Restarting UI...') -- cgit v1.2.3 From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- extensions-builtin/Lora/lora.py | 1 - extensions-builtin/ScuNET/scripts/scunet_model.py | 1 - extensions-builtin/SwinIR/scripts/swinir_model.py | 3 +-- modules/codeformer/codeformer_arch.py | 4 +--- modules/codeformer/vqgan_arch.py | 2 -- modules/codeformer_model.py | 4 +--- modules/config_states.py | 2 +- modules/esrgan_model.py | 2 +- modules/esrgan_model_arch.py | 1 - modules/extensions.py | 1 - modules/generation_parameters_copypaste.py | 4 ---- modules/hypernetworks/hypernetwork.py | 3 +-- modules/hypernetworks/ui.py | 2 -- modules/images.py | 2 +- modules/img2img.py | 5 +---- modules/mac_specific.py | 1 - modules/modelloader.py | 1 - modules/models/diffusion/uni_pc/uni_pc.py | 1 - modules/processing.py | 5 ++--- modules/sd_hijack.py | 2 +- modules/sd_hijack_inpainting.py | 6 ------ modules/sd_hijack_ip2p.py | 5 +---- modules/sd_hijack_xlmr.py | 2 -- modules/sd_models.py | 2 +- modules/sd_models_config.py | 1 - modules/sd_samplers_kdiffusion.py | 1 - modules/sd_vae.py | 3 --- modules/shared.py | 3 --- modules/styles.py | 9 --------- modules/textual_inversion/autocrop.py | 4 +--- modules/textual_inversion/image_embedding.py | 2 +- modules/textual_inversion/preprocess.py | 4 ---- modules/textual_inversion/textual_inversion.py | 1 - modules/txt2img.py | 9 +++------ modules/ui.py | 5 ++--- modules/ui_extra_networks.py | 1 - modules/ui_postprocessing.py | 2 +- modules/upscaler.py | 2 -- modules/xlmr.py | 2 +- pyproject.toml | 11 +++++++---- scripts/custom_code.py | 2 +- scripts/outpainting_mk_2.py | 4 ++-- scripts/poor_mans_outpainting.py | 4 ++-- scripts/prompt_matrix.py | 7 ++----- scripts/prompts_from_file.py | 5 +---- scripts/sd_upscale.py | 4 ++-- scripts/xyz_grid.py | 6 ++---- webui.py | 2 +- 48 files changed, 42 insertions(+), 114 deletions(-) (limited to 'webui.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index ba1293df..0ab43229 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -1,4 +1,3 @@ -import glob import os import re import torch diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index c7fd5739..aa2fdb3a 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -13,7 +13,6 @@ import modules.upscaler from modules import devices, modelloader from scunet_model_arch import SCUNet as net from modules.shared import opts -from modules import images class UpscalerScuNET(modules.upscaler.Upscaler): diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index d77c3a92..55dd94ab 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -1,4 +1,3 @@ -import contextlib import os import numpy as np @@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url from tqdm import tqdm from modules import modelloader, devices, script_callbacks, shared -from modules.shared import cmd_opts, opts, state +from modules.shared import opts, state from swinir_model_arch import SwinIR as net from swinir_model_arch_v2 import Swin2SR as net2 from modules.upscaler import Upscaler, UpscalerData diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index f1a7cf09..00c407de 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -1,14 +1,12 @@ # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py import math -import numpy as np import torch from torch import nn, Tensor import torch.nn.functional as F -from typing import Optional, List +from typing import Optional from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock -from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY def calc_mean_std(feat, eps=1e-5): diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index e7293683..820e6b12 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py ''' -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import copy from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8d84bbc9..8e56cb89 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -33,11 +33,9 @@ def setup_model(dirname): try: from torchvision.transforms.functional import normalize from modules.codeformer.codeformer_arch import CodeFormer - from basicsr.utils.download_util import load_file_from_url - from basicsr.utils import imwrite, img2tensor, tensor2img + from basicsr.utils import img2tensor, tensor2img from facelib.utils.face_restoration_helper import FaceRestoreHelper from facelib.detection.retinaface import retinaface - from modules.shared import cmd_opts net_class = CodeFormer diff --git a/modules/config_states.py b/modules/config_states.py index 2ea00929..8f1ff428 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -14,7 +14,7 @@ from collections import OrderedDict import git from modules import shared, extensions -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir +from modules.paths_internal import script_path, config_states_dir all_config_states = OrderedDict() diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index f4369257..85aa6934 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -6,7 +6,7 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url import modules.esrgan_model_arch as arch -from modules import shared, modelloader, images, devices +from modules import modelloader, images, devices from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 7f8bc7c0..4de9dd8d 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -2,7 +2,6 @@ from collections import OrderedDict import math -import functools import torch import torch.nn as nn import torch.nn.functional as F diff --git a/modules/extensions.py b/modules/extensions.py index 34d9d654..829f8cd9 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,7 +3,6 @@ import sys import traceback import time -from datetime import datetime import git from modules import shared diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fe8b18b2..f1c59c46 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,15 +1,11 @@ import base64 -import html import io -import math import os import re -from pathlib import Path import gradio as gr from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks -import tempfile from PIL import Image re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 1fc49537..9fe749b7 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -1,4 +1,3 @@ -import csv import datetime import glob import html @@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ -from collections import defaultdict, deque +from collections import deque from statistics import stdev, mean diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index 76599f5a..be168736 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -1,6 +1,4 @@ import html -import os -import re import gradio as gr import modules.hypernetworks.hypernetwork diff --git a/modules/images.py b/modules/images.py index 5eb6d855..7392cb8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -19,7 +19,7 @@ import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors -from modules.shared import opts, cmd_opts +from modules.shared import opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) diff --git a/modules/img2img.py b/modules/img2img.py index 32b1ecd6..d704bf90 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,12 +1,9 @@ -import math import os -import sys -import traceback import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError -from modules import devices, sd_samplers +from modules import sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 40ce2101..5c2f92a1 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,6 +1,5 @@ import torch import platform -from modules import paths from modules.sd_hijack_utils import CondFunc from packaging import version diff --git a/modules/modelloader.py b/modules/modelloader.py index cf685000..92ada694 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -1,4 +1,3 @@ -import glob import os import shutil import importlib diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index 11b330bc..a4c4ef4e 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -1,5 +1,4 @@ import torch -import torch.nn.functional as F import math from tqdm.auto import trange diff --git a/modules/processing.py b/modules/processing.py index 6f5233c1..c3932d6b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -2,7 +2,6 @@ import json import math import os import sys -import warnings import hashlib import torch @@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d8135211..81573b78 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -3,7 +3,7 @@ from torch.nn.functional import silu from types import MethodType import modules.textual_inversion.textual_inversion -from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint +from modules import devices, sd_hijack_optimizations, shared from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 55a2ce4d..344d75c8 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -1,15 +1,9 @@ -import os import torch -from einops import repeat -from omegaconf import ListConfig - import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 41ed54a2..6fe6b6ff 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -1,8 +1,5 @@ -import collections import os.path -import sys -import gc -import time + def should_hijack_ip2p(checkpoint_info): from modules import sd_models_config diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py index 4ac51c38..28528329 100644 --- a/modules/sd_hijack_xlmr.py +++ b/modules/sd_hijack_xlmr.py @@ -1,8 +1,6 @@ -import open_clip.tokenizer import torch from modules import sd_hijack_clip, devices -from modules.shared import opts class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): diff --git a/modules/sd_models.py b/modules/sd_models.py index 11c1a344..1c09c709 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -565,7 +565,7 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - from modules import lowvram, devices, sd_hijack + from modules import devices, sd_hijack timer = Timer() if model_data.sd_model: diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 7a79925a..9bfe1237 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -1,4 +1,3 @@ -import re import os import torch diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0fc9f456..3b8e9622 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,7 +1,6 @@ from collections import deque import torch import inspect -import einops import k_diffusion.sampling from modules import prompt_parser, devices, sd_samplers_common diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 521e485a..b7176125 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,8 +1,5 @@ -import torch -import safetensors.torch import os import collections -from collections import namedtuple from modules import paths, shared, devices, script_callbacks, sd_models import glob from copy import deepcopy diff --git a/modules/shared.py b/modules/shared.py index 4631965b..44cd2c0c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,12 +1,9 @@ -import argparse import datetime import json import os import sys import time -import requests -from PIL import Image import gradio as gr import tqdm diff --git a/modules/styles.py b/modules/styles.py index 11642075..c22769cf 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,18 +1,9 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - import csv import os import os.path import typing -import collections.abc as abc -import tempfile import shutil -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - class PromptStyle(typing.NamedTuple): name: str diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index d7d8d2e3..7770d22f 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -1,10 +1,8 @@ import cv2 import requests import os -from collections import defaultdict -from math import log, sqrt import numpy as np -from PIL import Image, ImageDraw +from PIL import ImageDraw GREEN = "#0F0" BLUE = "#00F" diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index 5593f88c..ee0e850a 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -2,7 +2,7 @@ import base64 import json import numpy as np import zlib -from PIL import Image, PngImagePlugin, ImageDraw, ImageFont +from PIL import Image, ImageDraw, ImageFont from fonts.ttf import Roboto import torch from modules.shared import opts diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index da0bcb26..d0cad09e 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,13 +1,9 @@ import os from PIL import Image, ImageOps import math -import platform -import sys import tqdm -import time from modules import paths, shared, images, deepbooru -from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f753b75f..9ed9ba45 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,7 +1,6 @@ import os import sys import traceback -import inspect from collections import namedtuple import torch diff --git a/modules/txt2img.py b/modules/txt2img.py index 16841d0f..f022381c 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,18 +1,15 @@ import modules.scripts -from modules import sd_samplers +from modules import sd_samplers, processing from modules.generation_parameters_copypaste import create_override_settings_dict -from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \ - StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, cmd_opts import modules.shared as shared -import modules.processing as processing from modules.ui import plaintext_to_html def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) - p = StableDiffusionProcessingTxt2Img( + p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, @@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step processed = modules.scripts.scripts_txt2img.run(p, *args) if processed is None: - processed = process_images(p) + processed = processing.process_images(p) p.close() diff --git a/modules/ui.py b/modules/ui.py index 6beda76f..f7e57593 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,10 +14,10 @@ from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress -from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path -from modules.shared import opts, cmd_opts, restricted_opts +from modules.shared import opts, cmd_opts import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste @@ -28,7 +28,6 @@ import modules.shared as shared import modules.styles import modules.textual_inversion.ui from modules import prompt_parser -from modules.images import save_image from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img from modules.textual_inversion import textual_inversion diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 49e06289..800e467a 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,4 +1,3 @@ -import glob import os.path import urllib.parse from pathlib import Path diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index f25639e5..c7dc1154 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -1,5 +1,5 @@ import gradio as gr -from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue +from modules import scripts, shared, ui_common, postprocessing, call_queue import modules.generation_parameters_copypaste as parameters_copypaste diff --git a/modules/upscaler.py b/modules/upscaler.py index 0ad4fe99..777593b0 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -2,8 +2,6 @@ import os from abc import abstractmethod import PIL -import numpy as np -import torch from PIL import Image import modules.shared diff --git a/modules/xlmr.py b/modules/xlmr.py index beab3fdf..e056c3f6 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -1,4 +1,4 @@ -from transformers import BertPreTrainedModel,BertModel,BertConfig +from transformers import BertPreTrainedModel, BertConfig import torch.nn as nn import torch from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig diff --git a/pyproject.toml b/pyproject.toml index 1e164abc..9caa9ba2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,13 @@ [tool.ruff] +exclude = ["extensions"] + ignore = [ "E501", - "E731", - "E402", # Module level import not at top of file - "F401" # Module imported but unused + + "F401", # Module imported but unused ] -exclude = ["extensions"] + +[tool.ruff.per-file-ignores] +"webui.py" = ["E402"] # Module level import not at top of file \ No newline at end of file diff --git a/scripts/custom_code.py b/scripts/custom_code.py index f36a3675..cc6f0d49 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -4,7 +4,7 @@ import ast import copy from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import cmd_opts def convertExpr2Expression(expr): diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index b10fed6c..665dbe89 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -7,9 +7,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state # this function is taken from https://github.com/parlance-zz/g-diffuser-bot diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index ddcbd2d3..c0bbecc1 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images, devices from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index e9b11517..fb06beab 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -1,14 +1,11 @@ import math -from collections import namedtuple -from copy import copy -import random import modules.scripts as scripts import gradio as gr from modules import images -from modules.processing import process_images, Processed -from modules.shared import opts, cmd_opts, state +from modules.processing import process_images +from modules.shared import opts, state import modules.sd_samplers diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 76dc5778..149bc85f 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -1,6 +1,4 @@ import copy -import math -import os import random import sys import traceback @@ -11,8 +9,7 @@ import gradio as gr from modules import sd_samplers from modules.processing import Processed, process_images -from PIL import Image -from modules.shared import opts, cmd_opts, state +from modules.shared import state def process_string_tag(tag): diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 332d76d9..d873a09c 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image -from modules import processing, shared, sd_samplers, images, devices +from modules import processing, shared, images, devices from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 2ff42ef8..332e0ecd 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -10,15 +10,13 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images, paths, sd_samplers, processing, sd_models, sd_vae +from modules import images, sd_samplers, processing, sd_models, sd_vae from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state import modules.shared as shared import modules.sd_samplers import modules.sd_models import modules.sd_vae -import glob -import os import re from modules.ui_components import ToolButton diff --git a/webui.py b/webui.py index ec3d2aba..48277075 100644 --- a/webui.py +++ b/webui.py @@ -43,7 +43,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states +from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer import modules.face_restoration import modules.gfpgan_model as gfpgan -- cgit v1.2.3 From 4b854806d98cf5ccd48e5cd99c172613da7937f0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 09:02:23 +0300 Subject: F401 fixes for ruff --- extensions-builtin/LDSR/scripts/ldsr_model.py | 4 ++-- modules/cmd_args.py | 2 +- modules/deepbooru.py | 1 - modules/extensions.py | 2 +- modules/gfpgan_model.py | 2 +- modules/models/diffusion/uni_pc/__init__.py | 2 +- modules/paths.py | 4 ++-- modules/realesrgan_model.py | 6 +++--- modules/script_loading.py | 1 - modules/sd_hijack_inpainting.py | 2 +- modules/sd_models.py | 4 +--- modules/sd_samplers.py | 2 +- modules/shared.py | 2 +- modules/ui.py | 4 ++-- modules/upscaler.py | 2 +- pyproject.toml | 9 +++++---- webui.py | 8 ++++---- 17 files changed, 27 insertions(+), 30 deletions(-) (limited to 'webui.py') diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index e8dc083c..fbbe9005 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -7,8 +7,8 @@ from basicsr.utils.download_util import load_file_from_url from modules.upscaler import Upscaler, UpscalerData from ldsr_model_arch import LDSR from modules import shared, script_callbacks -import sd_hijack_autoencoder -import sd_hijack_ddpm_v1 +import sd_hijack_autoencoder # noqa: F401 +import sd_hijack_ddpm_v1 # noqa: F401 class UpscalerLDSR(Upscaler): diff --git a/modules/cmd_args.py b/modules/cmd_args.py index d906a571..e01ca655 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -1,6 +1,6 @@ import argparse import os -from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file +from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401 parser = argparse.ArgumentParser() diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 122fce7f..1c4554a2 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -2,7 +2,6 @@ import os import re import torch -from PIL import Image import numpy as np from modules import modelloader, paths, deepbooru_model, devices, images, shared diff --git a/modules/extensions.py b/modules/extensions.py index 829f8cd9..bc2c0450 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -6,7 +6,7 @@ import time import git from modules import shared -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 extensions = [] diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index fbe6215a..0131dea4 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -78,7 +78,7 @@ def setup_model(dirname): try: from gfpgan import GFPGANer - from facexlib import detection, parsing + from facexlib import detection, parsing # noqa: F401 global user_path global have_gfpgan global gfpgan_constructor diff --git a/modules/models/diffusion/uni_pc/__init__.py b/modules/models/diffusion/uni_pc/__init__.py index e1265e3f..dbb35964 100644 --- a/modules/models/diffusion/uni_pc/__init__.py +++ b/modules/models/diffusion/uni_pc/__init__.py @@ -1 +1 @@ -from .sampler import UniPCSampler +from .sampler import UniPCSampler # noqa: F401 diff --git a/modules/paths.py b/modules/paths.py index acf1894b..5f6474c0 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -1,8 +1,8 @@ import os import sys -from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir +from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401 -import modules.safe +import modules.safe # noqa: F401 # data_path = cmd_opts_pre.data diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index 9ec1adf2..c24d8dbb 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -17,9 +17,9 @@ class UpscalerRealESRGAN(Upscaler): self.user_path = path super().__init__() try: - from basicsr.archs.rrdbnet_arch import RRDBNet - from realesrgan import RealESRGANer - from realesrgan.archs.srvgg_arch import SRVGGNetCompact + from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401 + from realesrgan import RealESRGANer # noqa: F401 + from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401 self.enable = True self.scalers = [] scalers = self.load_models(path) diff --git a/modules/script_loading.py b/modules/script_loading.py index a7d2203f..57b15862 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -2,7 +2,6 @@ import os import sys import traceback import importlib.util -from types import ModuleType def load_module(path): diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 344d75c8..058575b7 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -4,7 +4,7 @@ import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddim import DDIMSampler, noise_like +from ldm.models.diffusion.ddim import noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_models.py b/modules/sd_models.py index 1c09c709..d1e946a5 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -15,7 +15,6 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config -from modules.paths import models_path from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer @@ -87,8 +86,7 @@ class CheckpointInfo: try: # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start. - - from transformers import logging, CLIPModel + from transformers import logging, CLIPModel # noqa: F401 logging.set_verbosity_error() except Exception: diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index ff361f22..4f1bf21d 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,7 +1,7 @@ from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared # imports for functions that previously were here and are used by other modules -from modules.sd_samplers_common import samples_to_image_grid, sample_to_image +from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401 all_samplers = [ *sd_samplers_kdiffusion.samplers_data_k_diffusion, diff --git a/modules/shared.py b/modules/shared.py index 44cd2c0c..7d70f041 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -12,7 +12,7 @@ import modules.memmon import modules.styles import modules.devices as devices from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args -from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from ldm.models.diffusion.ddpm import LatentDiffusion demo = None diff --git a/modules/ui.py b/modules/ui.py index f7e57593..782b569d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -10,10 +10,10 @@ import gradio as gr import gradio.routes import gradio.utils import numpy as np -from PIL import Image, PngImagePlugin +from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path diff --git a/modules/upscaler.py b/modules/upscaler.py index 777593b0..e145be30 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -41,7 +41,7 @@ class Upscaler: os.makedirs(self.model_path, exist_ok=True) try: - import cv2 + import cv2 # noqa: F401 self.can_tile = True except Exception: pass diff --git a/pyproject.toml b/pyproject.toml index 9caa9ba2..0883c127 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,13 +1,14 @@ [tool.ruff] +target-version = "py310" + exclude = ["extensions"] ignore = [ - "E501", - - "F401", # Module imported but unused + "E501", # Line too long + "E731", # Do not assign a `lambda` expression, use a `def` ] [tool.ruff.per-file-ignores] -"webui.py" = ["E402"] # Module level import not at top of file \ No newline at end of file +"webui.py" = ["E402"] # Module level import not at top of file diff --git a/webui.py b/webui.py index 48277075..5d5e80b5 100644 --- a/webui.py +++ b/webui.py @@ -16,12 +16,12 @@ from packaging import version import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) -from modules import paths, timer, import_hook, errors +from modules import paths, timer, import_hook, errors # noqa: F401 startup_timer = timer.Timer() import torch -import pytorch_lightning # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them +import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") @@ -31,12 +31,12 @@ startup_timer.record("import torch") import gradio startup_timer.record("import gradio") -import ldm.modules.encoders.modules +import ldm.modules.encoders.modules # noqa: F401 startup_timer.record("import ldm") from modules import extra_networks, ui_extra_networks_checkpoints from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion -from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call +from modules.call_queue import wrap_queued_call, queue_lock # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors if ".dev" in torch.__version__ or "+git" in torch.__version__: -- cgit v1.2.3 From 8aa87c564a79965013715d56a5f90d2a34d5d6ee Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 23:41:08 +0300 Subject: add UI to edit defaults allow setting defaults for elements in extensions' tabs fix a problem with ESRGAN upscalers disappearing after UI reload implicit change: HTML element id for train tab from tab_ti to tab_train (will this break things?) --- modules/modelloader.py | 27 +++---- modules/ui.py | 122 +++++------------------------ modules/ui_loadsave.py | 208 +++++++++++++++++++++++++++++++++++++++++++++++++ style.css | 4 + webui.py | 6 +- 5 files changed, 242 insertions(+), 125 deletions(-) create mode 100644 modules/ui_loadsave.py (limited to 'webui.py') diff --git a/modules/modelloader.py b/modules/modelloader.py index 25612bf8..2a479bcb 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -116,20 +116,6 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): pass -builtin_upscaler_classes = [] -forbidden_upscaler_classes = set() - - -def list_builtin_upscalers(): - builtin_upscaler_classes.clear() - builtin_upscaler_classes.extend(Upscaler.__subclasses__()) - -def forbid_loaded_nonbuiltin_upscalers(): - for cls in Upscaler.__subclasses__(): - if cls not in builtin_upscaler_classes: - forbidden_upscaler_classes.add(cls) - - def load_upscalers(): # We can only do this 'magic' method to dynamically load upscalers if they are referenced, # so we'll try to import any _model.py files before looking in __subclasses__ @@ -145,10 +131,17 @@ def load_upscalers(): datas = [] commandline_options = vars(shared.cmd_opts) - for cls in Upscaler.__subclasses__(): - if cls in forbidden_upscaler_classes: - continue + # some of upscaler classes will not go away after reloading their modules, and we'll end + # up with two copies of those classes. The newest copy will always be the last in the list, + # so we go from end to beginning and ignore duplicates + used_classes = {} + for cls in reversed(Upscaler.__subclasses__()): + classname = str(cls) + if classname not in used_classes: + used_classes[classname] = cls + + for cls in reversed(used_classes.values()): name = cls.__name__ cmd_name = f"{name.lower().replace('upscaler', '')}_models_path" scaler = cls(commandline_options.get(cmd_name, None)) diff --git a/modules/ui.py b/modules/ui.py index 7ee99473..1efb656a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path @@ -86,16 +86,6 @@ def send_gradio_gallery_to_image(x): return None return image_from_url_text(x[0]) -def visit(x, func, path=""): - if hasattr(x, 'children'): - if isinstance(x, gr.Tabs) and x.elem_id is not None: - # Tabs element can't have a label, have to use elem_id instead - func(f"{path}/Tabs@{x.elem_id}", x) - for c in x.children: - visit(c, func, path) - elif x.label is not None: - func(f"{path}/{x.label}", x) - def add_style(name: str, prompt: str, negative_prompt: str): if name is None: @@ -1471,6 +1461,8 @@ def create_ui(): return res + loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) + components = [] component_dict = {} shared.settings_components = component_dict @@ -1558,6 +1550,9 @@ def create_ui(): current_row.__exit__() current_tab.__exit__() + with gr.TabItem("Defaults", id="defaults", elem_id="settings_tab_defaults"): + loadsave.create_ui() + with gr.TabItem("Actions", id="actions", elem_id="settings_tab_actions"): request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") download_localization = gr.Button(value='Download localization template', elem_id="download_localization") @@ -1631,7 +1626,7 @@ def create_ui(): (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "ti"), + (train_interface, "Train", "train"), ] interfaces += script_callbacks.ui_tabs_callback() @@ -1659,6 +1654,16 @@ def create_ui(): with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): interface.render() + for interface, _label, ifid in interfaces: + if ifid in ["extensions", "settings"]: + continue + + loadsave.add_block(interface, ifid) + + loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) + + loadsave.setup_ui() + if os.path.exists(os.path.join(script_path, "notification.mp3")): gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) @@ -1747,97 +1752,8 @@ def create_ui(): ] ) - ui_config_file = cmd_opts.ui_config_file - ui_settings = {} - settings_count = len(ui_settings) - error_loading = False - - try: - if os.path.exists(ui_config_file): - with open(ui_config_file, "r", encoding="utf8") as file: - ui_settings = json.load(file) - except Exception: - error_loading = True - print("Error loading settings:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - def loadsave(path, x): - def apply_field(obj, field, condition=None, init_field=None): - key = f"{path}/{field}" - - if getattr(obj, 'custom_script_source', None) is not None: - key = f"customscript/{obj.custom_script_source}/{key}" - - if getattr(obj, 'do_not_save_to_config', False): - return - - saved_value = ui_settings.get(key, None) - if saved_value is None: - ui_settings[key] = getattr(obj, field) - elif condition and not condition(saved_value): - pass - - # this warning is generally not useful; - # print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.') - else: - setattr(obj, field, saved_value) - if init_field is not None: - init_field(saved_value) - - if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton] and x.visible: - apply_field(x, 'visible') - - if type(x) == gr.Slider: - apply_field(x, 'value') - apply_field(x, 'minimum') - apply_field(x, 'maximum') - apply_field(x, 'step') - - if type(x) == gr.Radio: - apply_field(x, 'value', lambda val: val in x.choices) - - if type(x) == gr.Checkbox: - apply_field(x, 'value') - - if type(x) == gr.Textbox: - apply_field(x, 'value') - - if type(x) == gr.Number: - apply_field(x, 'value') - - if type(x) == gr.Dropdown: - def check_dropdown(val): - if getattr(x, 'multiselect', False): - return all(value in x.choices for value in val) - else: - return val in x.choices - - apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None)) - - def check_tab_id(tab_id): - tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children)) - if type(tab_id) == str: - tab_ids = [t.id for t in tab_items] - return tab_id in tab_ids - elif type(tab_id) == int: - return tab_id >= 0 and tab_id < len(tab_items) - else: - return False - - if type(x) == gr.Tabs: - apply_field(x, 'selected', check_tab_id) - - visit(txt2img_interface, loadsave, "txt2img") - visit(img2img_interface, loadsave, "img2img") - visit(extras_interface, loadsave, "extras") - visit(modelmerger_interface, loadsave, "modelmerger") - visit(train_interface, loadsave, "train") - - loadsave(f"webui/Tabs@{tabs.elem_id}", tabs) - - if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)): - with open(ui_config_file, "w", encoding="utf8") as file: - json.dump(ui_settings, file, indent=4) + loadsave.dump_defaults() + demo.ui_loadsave = loadsave # Required as a workaround for change() event not triggering when loading values from ui-config.json interp_description.value = update_interp_description(interp_method.value) diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py new file mode 100644 index 00000000..728fec9e --- /dev/null +++ b/modules/ui_loadsave.py @@ -0,0 +1,208 @@ +import json +import os + +import gradio as gr + +from modules import errors +from modules.ui_components import ToolButton + + +class UiLoadsave: + """allows saving and restorig default values for gradio components""" + + def __init__(self, filename): + self.filename = filename + self.ui_settings = {} + self.component_mapping = {} + self.error_loading = False + self.finalized_ui = False + + self.ui_defaults_view = None + self.ui_defaults_apply = None + self.ui_defaults_review = None + + try: + if os.path.exists(self.filename): + self.ui_settings = self.read_from_file() + except Exception as e: + self.error_loading = True + errors.display(e, "loading settings") + + def add_component(self, path, x): + """adds component to the registry of tracked components""" + + assert not self.finalized_ui + + def apply_field(obj, field, condition=None, init_field=None): + key = f"{path}/{field}" + + if getattr(obj, 'custom_script_source', None) is not None: + key = f"customscript/{obj.custom_script_source}/{key}" + + if getattr(obj, 'do_not_save_to_config', False): + return + + saved_value = self.ui_settings.get(key, None) + if saved_value is None: + self.ui_settings[key] = getattr(obj, field) + elif condition and not condition(saved_value): + pass + else: + setattr(obj, field, saved_value) + if init_field is not None: + init_field(saved_value) + + if field == 'value' and key not in self.component_mapping: + self.component_mapping[key] = x + + if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton] and x.visible: + apply_field(x, 'visible') + + if type(x) == gr.Slider: + apply_field(x, 'value') + apply_field(x, 'minimum') + apply_field(x, 'maximum') + apply_field(x, 'step') + + if type(x) == gr.Radio: + apply_field(x, 'value', lambda val: val in x.choices) + + if type(x) == gr.Checkbox: + apply_field(x, 'value') + + if type(x) == gr.Textbox: + apply_field(x, 'value') + + if type(x) == gr.Number: + apply_field(x, 'value') + + if type(x) == gr.Dropdown: + def check_dropdown(val): + if getattr(x, 'multiselect', False): + return all(value in x.choices for value in val) + else: + return val in x.choices + + apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None)) + + def check_tab_id(tab_id): + tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children)) + if type(tab_id) == str: + tab_ids = [t.id for t in tab_items] + return tab_id in tab_ids + elif type(tab_id) == int: + return 0 <= tab_id < len(tab_items) + else: + return False + + if type(x) == gr.Tabs: + apply_field(x, 'selected', check_tab_id) + + def add_block(self, x, path=""): + """adds all components inside a gradio block x to the registry of tracked components""" + + if hasattr(x, 'children'): + if isinstance(x, gr.Tabs) and x.elem_id is not None: + # Tabs element can't have a label, have to use elem_id instead + self.add_component(f"{path}/Tabs@{x.elem_id}", x) + for c in x.children: + self.add_block(c, path) + elif x.label is not None: + self.add_component(f"{path}/{x.label}", x) + + def read_from_file(self): + with open(self.filename, "r", encoding="utf8") as file: + return json.load(file) + + def write_to_file(self, current_ui_settings): + with open(self.filename, "w", encoding="utf8") as file: + json.dump(current_ui_settings, file, indent=4) + + def dump_defaults(self): + """saves default values to a file unless tjhe file is present and there was an error loading default values at start""" + + if self.error_loading and os.path.exists(self.filename): + return + + self.write_to_file(self.ui_settings) + + def iter_changes(self, current_ui_settings, values): + """ + given a dictionary with defaults from a file and current values from gradio elements, returns + an iterator over tuples of values that are not the same between the file and the current; + tuple contents are: path, old value, new value + """ + + for (path, component), new_value in zip(self.component_mapping.items(), values): + old_value = current_ui_settings.get(path) + + choices = getattr(component, 'choices', None) + if isinstance(new_value, int) and choices: + if new_value >= len(choices): + continue + + new_value = choices[new_value] + + if new_value == old_value: + continue + + if old_value is None and new_value == '' or new_value == []: + continue + + yield path, old_value, new_value + + def ui_view(self, *values): + text = [""] + + for path, old_value, new_value in self.iter_changes(self.read_from_file(), values): + if old_value is None: + old_value = "None" + + text.append(f"") + + if len(text) == 1: + text.append("") + + text.append("") + return "".join(text) + + def ui_apply(self, *values): + num_changed = 0 + + current_ui_settings = self.read_from_file() + + for path, _, new_value in self.iter_changes(current_ui_settings.copy(), values): + num_changed += 1 + current_ui_settings[path] = new_value + + if num_changed == 0: + return "No changes." + + self.write_to_file(current_ui_settings) + + return f"Wrote {num_changed} changes." + + def create_ui(self): + """creates ui elements for editing defaults UI, without adding any logic to them""" + + gr.HTML( + f"This page allows you to change default values in UI elements on other tabs.
" + f"Make your changes, press 'View changes' to review the changed default values,
" + f"then press 'Apply' to write them to {self.filename}.
" + f"New defaults will apply after you restart the UI.
" + ) + + with gr.Row(): + self.ui_defaults_view = gr.Button(value='View changes', elem_id="ui_defaults_view", variant="secondary") + self.ui_defaults_apply = gr.Button(value='Apply', elem_id="ui_defaults_apply", variant="primary") + + self.ui_defaults_review = gr.HTML("") + + def setup_ui(self): + """adds logic to elements created with create_ui; all add_block class must be made before this""" + + assert not self.finalized_ui + self.finalized_ui = True + + self.ui_defaults_view.click(fn=self.ui_view, inputs=list(self.component_mapping.values()), outputs=[self.ui_defaults_review]) + self.ui_defaults_apply.click(fn=self.ui_apply, inputs=list(self.component_mapping.values()), outputs=[self.ui_defaults_review]) diff --git a/style.css b/style.css index b823c7dd..4ac919b5 100644 --- a/style.css +++ b/style.css @@ -414,6 +414,10 @@ table.settings-value-table td{ max-width: 36em; } +.ui-defaults-none{ + color: #aaa !important; +} + /* live preview */ .progressDiv{ position: relative; diff --git a/webui.py b/webui.py index 5d5e80b5..2eecfaa0 100644 --- a/webui.py +++ b/webui.py @@ -181,14 +181,11 @@ def initialize(): gfpgan.setup_model(cmd_opts.gfpgan_models_path) startup_timer.record("setup gfpgan") - modelloader.list_builtin_upscalers() - startup_timer.record("list builtin upscalers") - modules.scripts.load_scripts() startup_timer.record("load scripts") modelloader.load_upscalers() - #startup_timer.record("load upscalers") #Is this necessary? I don't know. + startup_timer.record("load upscalers") modules.sd_vae.refresh_vae_list() startup_timer.record("refresh VAE") @@ -388,7 +385,6 @@ def webui(): localization.list_localizations(cmd_opts.localizations_dir) - modelloader.forbid_loaded_nonbuiltin_upscalers() modules.scripts.reload_scripts() startup_timer.record("load scripts") -- cgit v1.2.3 From 87c3aa7389cea993710c4182f5314e5cea0ad4c6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 11 May 2023 10:09:29 +0300 Subject: return wrap_gradio_gpu_call to webui.py for extensions --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'webui.py') diff --git a/webui.py b/webui.py index 2eecfaa0..293a16cc 100644 --- a/webui.py +++ b/webui.py @@ -36,7 +36,7 @@ startup_timer.record("import ldm") from modules import extra_networks, ui_extra_networks_checkpoints from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion -from modules.call_queue import wrap_queued_call, queue_lock +from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401 # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors if ".dev" in torch.__version__ or "+git" in torch.__version__: -- cgit v1.2.3
PathOld valueNew value
{path}{old_value}{new_value}
No changes