From d3c86e5178725b11a4679097f0aefb0a9fc90014 Mon Sep 17 00:00:00 2001 From: Jared Deckard Date: Wed, 14 Jun 2023 14:03:44 -0500 Subject: Note the Gradio user in the Exif data --- modules/txt2img.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'modules/txt2img.py') diff --git a/modules/txt2img.py b/modules/txt2img.py index 2e7d202d..6aa79f23 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -4,10 +4,10 @@ from modules.generation_parameters_copypaste import create_override_settings_dic from modules.shared import opts, cmd_opts import modules.shared as shared from modules.ui import plaintext_to_html +import gradio as gr - -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -48,6 +48,8 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step p.scripts = modules.scripts.scripts_txt2img p.script_args = args + p.user = request.username + if cmd_opts.enable_console_prompts: print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) -- cgit v1.2.3 From 44c27ebc7393ea793245aa565ace6c9bf1313980 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 10 Jul 2023 20:08:23 +0300 Subject: Use closing() with processing classes everywhere Follows up on #11569 --- modules/hypernetworks/hypernetwork.py | 6 ++++-- modules/img2img.py | 20 ++++++++++---------- modules/textual_inversion/textual_inversion.py | 6 ++++-- modules/txt2img.py | 11 ++++++----- 4 files changed, 24 insertions(+), 19 deletions(-) (limited to 'modules/txt2img.py') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 51941c11..79670b87 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -3,6 +3,7 @@ import glob import html import os import inspect +from contextlib import closing import modules.textual_inversion.dataset import torch @@ -711,8 +712,9 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi preview_text = p.prompt - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images) > 0 else None + with closing(p): + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images) > 0 else None if unload: shared.sd_model.cond_stage_model.to(devices.cpu) diff --git a/modules/img2img.py b/modules/img2img.py index ef87eb0f..4d9a02cc 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,4 +1,5 @@ import os +from contextlib import closing from pathlib import Path import numpy as np @@ -217,18 +218,17 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if mask: p.extra_generation_params["Mask blur"] = mask_blur - if is_batch: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + with closing(p): + if is_batch: + assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir) + process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir) - processed = Processed(p, [], p.seed, "") - else: - processed = modules.scripts.scripts_img2img.run(p, *args) - if processed is None: - processed = process_images(p) - - p.close() + processed = Processed(p, [], p.seed, "") + else: + processed = modules.scripts.scripts_img2img.run(p, *args) + if processed is None: + processed = process_images(p) shared.total_tqdm.clear() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index bb6f211c..cbe975b7 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,5 +1,6 @@ import os from collections import namedtuple +from contextlib import closing import torch import tqdm @@ -584,8 +585,9 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st preview_text = p.prompt - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images) > 0 else None + with closing(p): + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images) > 0 else None if unload: shared.sd_model.first_stage_model.to(devices.cpu) diff --git a/modules/txt2img.py b/modules/txt2img.py index 6aa79f23..d0be2e73 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,3 +1,5 @@ +from contextlib import closing + import modules.scripts from modules import sd_samplers, processing from modules.generation_parameters_copypaste import create_override_settings_dict @@ -53,12 +55,11 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step if cmd_opts.enable_console_prompts: print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) - processed = modules.scripts.scripts_txt2img.run(p, *args) - - if processed is None: - processed = processing.process_images(p) + with closing(p): + processed = modules.scripts.scripts_txt2img.run(p, *args) - p.close() + if processed is None: + processed = processing.process_images(p) shared.total_tqdm.clear() -- cgit v1.2.3 From 127635409a7959f6c057a68ccb8e70734cbaf9f3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 15 Jul 2023 08:07:25 +0300 Subject: add padding and identification to generation log section (Failed to find Loras, Used embeddings, etc...) --- modules/img2img.py | 2 +- modules/txt2img.py | 2 +- modules/ui.py | 3 +-- modules/ui_common.py | 9 +++++---- style.css | 16 ++++++++++------ 5 files changed, 18 insertions(+), 14 deletions(-) (limited to 'modules/txt2img.py') diff --git a/modules/img2img.py b/modules/img2img.py index 664e2688..a811e7a4 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -240,4 +240,4 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if opts.do_not_show_images: processed.images = [] - return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments) + return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments") diff --git a/modules/txt2img.py b/modules/txt2img.py index d0be2e73..29d94e8c 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -70,4 +70,4 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step if opts.do_not_show_images: processed.images = [] - return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments) + return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments") diff --git a/modules/ui.py b/modules/ui.py index 39d226ad..07ecee7b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -83,8 +83,7 @@ detect_image_size_symbol = '\U0001F4D0' # 📐 up_down_symbol = '\u2195\ufe0f' # ↕️ -def plaintext_to_html(text): - return ui_common.plaintext_to_html(text) +plaintext_to_html = ui_common.plaintext_to_html def send_gradio_gallery_to_image(x): diff --git a/modules/ui_common.py b/modules/ui_common.py index 57c2d0ad..11eb2a4b 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -29,9 +29,10 @@ def update_generation_info(generation_info, html_info, img_index): return html_info, gr.update() -def plaintext_to_html(text): - text = "

" + "
\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

" - return text +def plaintext_to_html(text, classname=None): + content = "
\n".join(html.escape(x) for x in text.split('\n')) + + return f"

{content}

" if classname else f"

{content}

" def save_files(js_data, images, do_make_zip, index): @@ -157,7 +158,7 @@ Requested path was: {f} with gr.Group(): html_info = gr.HTML(elem_id=f'html_info_{tabname}', elem_classes="infotext") - html_log = gr.HTML(elem_id=f'html_log_{tabname}') + html_log = gr.HTML(elem_id=f'html_log_{tabname}', elem_classes="html-log") generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}') if tabname == 'txt2img' or tabname == 'img2img': diff --git a/style.css b/style.css index 27ea6467..a424067f 100644 --- a/style.css +++ b/style.css @@ -227,29 +227,33 @@ button.custom-button{ align-self: end; } -.performance { +.html-log .comments{ + padding-top: 0.5em; +} + +.html-log .performance { font-size: 0.85em; color: #444; display: flex; } -.performance p{ +.html-log .performance p{ display: inline-block; } -.performance p.time, .performance p.vram, .performance p.time abbr, .performance p.vram abbr { +.html-log .performance p.time, .performance p.vram, .performance p.time abbr, .performance p.vram abbr { margin-bottom: 0; color: var(--block-title-text-color); } -.performance p.time { +.html-log .performance p.time { } -.performance p.vram { +.html-log .performance p.vram { margin-left: auto; } -.performance .measurement{ +.html-log .performance .measurement{ color: var(--body-text-color); font-weight: bold; } -- cgit v1.2.3