From fa9d2ac2ff7cf6fbc73525190bd7fde724ec1fb3 Mon Sep 17 00:00:00 2001 From: Jared Deckard Date: Wed, 14 Jun 2023 13:53:13 -0500 Subject: Fix gradio special args in the call queue --- modules/call_queue.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/call_queue.py') diff --git a/modules/call_queue.py b/modules/call_queue.py index 447bb764..64ebf868 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -1,3 +1,4 @@ +from functools import wraps import html import sys import threading @@ -20,6 +21,7 @@ def wrap_queued_call(func): def wrap_gradio_gpu_call(func, extra_outputs=None): + @wraps(func) def f(*args, **kwargs): # if the first argument is a string that says "task(...)", it is treated as a job id @@ -47,6 +49,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): def wrap_gradio_call(func, extra_outputs=None, add_stats=False): + @wraps(func) def f(*args, extra_outputs_array=extra_outputs, **kwargs): run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats if run_memmon: -- cgit v1.2.3 From f44feb6a10aacc6a5ff4c9275fba2546b2858935 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Fri, 30 Jun 2023 13:11:31 +0300 Subject: Add job argument to State.begin() --- modules/api/api.py | 14 +++++++------- modules/call_queue.py | 2 +- modules/extras.py | 3 +-- modules/interrogate.py | 3 +-- modules/postprocessing.py | 3 +-- modules/shared.py | 4 ++-- 6 files changed, 13 insertions(+), 16 deletions(-) (limited to 'modules/call_queue.py') diff --git a/modules/api/api.py b/modules/api/api.py index 279c384a..3ea099ad 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -327,7 +327,7 @@ class Api: p.outpath_grids = opts.outdir_txt2img_grids p.outpath_samples = opts.outdir_txt2img_samples - shared.state.begin() + shared.state.begin(job="scripts_txt2img") if selectable_scripts is not None: p.script_args = script_args processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here @@ -384,7 +384,7 @@ class Api: p.outpath_grids = opts.outdir_img2img_grids p.outpath_samples = opts.outdir_img2img_samples - shared.state.begin() + shared.state.begin(job="scripts_img2img") if selectable_scripts is not None: p.script_args = script_args processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here @@ -599,7 +599,7 @@ class Api: def create_embedding(self, args: dict): try: - shared.state.begin() + shared.state.begin(job="create_embedding") filename = create_embedding(**args) # create empty embedding sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used shared.state.end() @@ -610,7 +610,7 @@ class Api: def create_hypernetwork(self, args: dict): try: - shared.state.begin() + shared.state.begin(job="create_hypernetwork") filename = create_hypernetwork(**args) # create empty embedding shared.state.end() return models.CreateResponse(info=f"create hypernetwork filename: {filename}") @@ -620,7 +620,7 @@ class Api: def preprocess(self, args: dict): try: - shared.state.begin() + shared.state.begin(job="preprocess") preprocess(**args) # quick operation unless blip/booru interrogation is enabled shared.state.end() return models.PreprocessResponse(info = 'preprocess complete') @@ -636,7 +636,7 @@ class Api: def train_embedding(self, args: dict): try: - shared.state.begin() + shared.state.begin(job="train_embedding") apply_optimizations = shared.opts.training_xattention_optimizations error = None filename = '' @@ -657,7 +657,7 @@ class Api: def train_hypernetwork(self, args: dict): try: - shared.state.begin() + shared.state.begin(job="train_hypernetwork") shared.loaded_hypernetworks = [] apply_optimizations = shared.opts.training_xattention_optimizations error = None diff --git a/modules/call_queue.py b/modules/call_queue.py index 69bf63d2..3b94f8a4 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -30,7 +30,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): id_task = None with queue_lock: - shared.state.begin() + shared.state.begin(job=id_task) progress.start_task(id_task) try: diff --git a/modules/extras.py b/modules/extras.py index 830b53aa..e9c0263e 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -73,8 +73,7 @@ def to_half(tensor, enable): def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata): - shared.state.begin() - shared.state.job = 'model-merge' + shared.state.begin(job="model-merge") def fail(message): shared.state.textinfo = message diff --git a/modules/interrogate.py b/modules/interrogate.py index 9b2c5b60..a3ae1dd5 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -184,8 +184,7 @@ class InterrogateModels: def interrogate(self, pil_image): res = "" - shared.state.begin() - shared.state.job = 'interrogate' + shared.state.begin(job="interrogate") try: if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.send_everything_to_cpu() diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 736315e2..544b2f72 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -9,8 +9,7 @@ from modules.shared import opts def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): devices.torch_gc() - shared.state.begin() - shared.state.job = 'extras' + shared.state.begin(job="extras") image_data = [] image_names = [] diff --git a/modules/shared.py b/modules/shared.py index 203ee1b9..7df2879c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -173,7 +173,7 @@ class State: return obj - def begin(self): + def begin(self, job: str = "(unknown)"): self.sampling_step = 0 self.job_count = -1 self.processing_has_refined_job_count = False @@ -187,7 +187,7 @@ class State: self.interrupted = False self.textinfo = None self.time_start = time.time() - + self.job = job devices.torch_gc() def end(self): -- cgit v1.2.3 From 95ee0cb18817df3c4fae2e7ba7063b79b0c60b9c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 14 Jul 2023 22:51:58 +0300 Subject: restyle time taken/VRAM display --- javascript/hints.js | 2 -- modules/call_queue.py | 18 +++++++++++++----- style.css | 17 ++++++++++++++--- 3 files changed, 27 insertions(+), 10 deletions(-) (limited to 'modules/call_queue.py') diff --git a/javascript/hints.js b/javascript/hints.js index dc75ce31..41201b2f 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -84,8 +84,6 @@ var titles = { "Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.", "Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.", - "vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).", - "Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.", "Filename word regex": "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.", diff --git a/modules/call_queue.py b/modules/call_queue.py index 3b94f8a4..61aa240f 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -85,9 +85,9 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): elapsed = time.perf_counter() - t elapsed_m = int(elapsed // 60) elapsed_s = elapsed % 60 - elapsed_text = f"{elapsed_s:.2f}s" + elapsed_text = f"{elapsed_s:.1f} sec." if elapsed_m > 0: - elapsed_text = f"{elapsed_m}m "+elapsed_text + elapsed_text = f"{elapsed_m} min. "+elapsed_text if run_memmon: mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()} @@ -95,14 +95,22 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): reserved_peak = mem_stats['reserved_peak'] sys_peak = mem_stats['system_peak'] sys_total = mem_stats['total'] - sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2) + sys_pct = sys_peak/max(sys_total, 1) * 100 - vram_html = f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" + toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)" + toltip_r = "Reserved: total amout of video memory allocated by the Torch library " + toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity" + + text_a = f"A: {active_peak/1024:.2f} GB" + text_r = f"R: {reserved_peak/1024:.2f} GB" + text_sys = f"Sys: {sys_peak/1024:.1f}/{sys_total/1024:g} GB ({sys_pct:.1f}%)" + + vram_html = f"

{text_a}, {text_r}, {text_sys}

" else: vram_html = '' # last item is always HTML - res[-1] += f"

Time taken: {elapsed_text}

{vram_html}
" + res[-1] += f"

Time taken: {elapsed_text}

{vram_html}
" return tuple(res) diff --git a/style.css b/style.css index 5073f0f0..27ea6467 100644 --- a/style.css +++ b/style.css @@ -230,17 +230,28 @@ button.custom-button{ .performance { font-size: 0.85em; color: #444; + display: flex; } .performance p{ display: inline-block; } -.performance .time { - margin-right: 0; +.performance p.time, .performance p.vram, .performance p.time abbr, .performance p.vram abbr { + margin-bottom: 0; + color: var(--block-title-text-color); } -.performance .vram { +.performance p.time { +} + +.performance p.vram { + margin-left: auto; +} + +.performance .measurement{ + color: var(--body-text-color); + font-weight: bold; } #txt2img_generate, #img2img_generate { -- cgit v1.2.3