From b1c6e39620dd398ae6a2cb1e9236b65a7294cf59 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 29 Jun 2023 19:25:34 +0900 Subject: starts left --- style.css | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'style.css') diff --git a/style.css b/style.css index e1df716f..5073f0f0 100644 --- a/style.css +++ b/style.css @@ -704,11 +704,24 @@ table.popup-table .link{ margin: 0; } -#available_extensions .date_added{ - opacity: 0.85; +#available_extensions .info{ + margin: 0.5em 0; + display: flex; + margin-top: auto; + opacity: 0.80; font-size: 90%; } +#available_extensions .date_added{ + margin-right: auto; + display: inline-block; +} + +#available_extensions .star_count{ + margin-left: auto; + display: inline-block; +} + /* replace original footer with ours */ footer { -- cgit v1.2.3 From 95ee0cb18817df3c4fae2e7ba7063b79b0c60b9c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 14 Jul 2023 22:51:58 +0300 Subject: restyle time taken/VRAM display --- javascript/hints.js | 2 -- modules/call_queue.py | 18 +++++++++++++----- style.css | 17 ++++++++++++++--- 3 files changed, 27 insertions(+), 10 deletions(-) (limited to 'style.css') diff --git a/javascript/hints.js b/javascript/hints.js index dc75ce31..41201b2f 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -84,8 +84,6 @@ var titles = { "Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.", "Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.", - "vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).", - "Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.", "Filename word regex": "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.", diff --git a/modules/call_queue.py b/modules/call_queue.py index 3b94f8a4..61aa240f 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -85,9 +85,9 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): elapsed = time.perf_counter() - t elapsed_m = int(elapsed // 60) elapsed_s = elapsed % 60 - elapsed_text = f"{elapsed_s:.2f}s" + elapsed_text = f"{elapsed_s:.1f} sec." if elapsed_m > 0: - elapsed_text = f"{elapsed_m}m "+elapsed_text + elapsed_text = f"{elapsed_m} min. "+elapsed_text if run_memmon: mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()} @@ -95,14 +95,22 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): reserved_peak = mem_stats['reserved_peak'] sys_peak = mem_stats['system_peak'] sys_total = mem_stats['total'] - sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2) + sys_pct = sys_peak/max(sys_total, 1) * 100 - vram_html = f"
Torch active/reserved: {active_peak}/{reserved_peak} MiB,
{text_a},
Time taken:
Time taken:
" + "
\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "
{content}
" if classname else f"{content}
" def save_files(js_data, images, do_make_zip, index): @@ -157,7 +158,7 @@ Requested path was: {f} with gr.Group(): html_info = gr.HTML(elem_id=f'html_info_{tabname}', elem_classes="infotext") - html_log = gr.HTML(elem_id=f'html_log_{tabname}') + html_log = gr.HTML(elem_id=f'html_log_{tabname}', elem_classes="html-log") generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}') if tabname == 'txt2img' or tabname == 'img2img': diff --git a/style.css b/style.css index 27ea6467..a424067f 100644 --- a/style.css +++ b/style.css @@ -227,29 +227,33 @@ button.custom-button{ align-self: end; } -.performance { +.html-log .comments{ + padding-top: 0.5em; +} + +.html-log .performance { font-size: 0.85em; color: #444; display: flex; } -.performance p{ +.html-log .performance p{ display: inline-block; } -.performance p.time, .performance p.vram, .performance p.time abbr, .performance p.vram abbr { +.html-log .performance p.time, .performance p.vram, .performance p.time abbr, .performance p.vram abbr { margin-bottom: 0; color: var(--block-title-text-color); } -.performance p.time { +.html-log .performance p.time { } -.performance p.vram { +.html-log .performance p.vram { margin-left: auto; } -.performance .measurement{ +.html-log .performance .measurement{ color: var(--body-text-color); font-weight: bold; } -- cgit v1.2.3 From 2b1bae0d755c2d5201f6a6aadeadb5588208d43f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 15 Jul 2023 08:41:22 +0300 Subject: add textual inversion hashes to infotext --- modules/processing.py | 7 ++++--- modules/sd_hijack.py | 5 ++++- modules/sd_hijack_clip.py | 15 ++++++++++++--- modules/shared.py | 1 + modules/textual_inversion/textual_inversion.py | 9 ++++++++- style.css | 4 ++++ 6 files changed, 33 insertions(+), 8 deletions(-) (limited to 'style.css') diff --git a/modules/processing.py b/modules/processing.py index cd568a20..49441e77 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -732,9 +732,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.setup_conds() - if len(model_hijack.comments) > 0: - for comment in model_hijack.comments: - comments[comment] = 1 + for comment in model_hijack.comments: + comments[comment] = 1 + + p.extra_generation_params.update(model_hijack.extra_generation_params) if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 3b6f95ce..6b5aae4b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -147,7 +147,6 @@ def undo_weighted_forward(sd_model): class StableDiffusionModelHijack: fixes = None - comments = [] layers = None circular_enabled = False clip = None @@ -156,6 +155,9 @@ class StableDiffusionModelHijack: embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase() def __init__(self): + self.extra_generation_params = {} + self.comments = [] + self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir) def apply_optimizations(self, option=None): @@ -236,6 +238,7 @@ class StableDiffusionModelHijack: def clear_comments(self): self.comments = [] + self.extra_generation_params = {} def get_prompt_lengths(self, text): if self.clip is None: diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 3b5a7666..c1d780a3 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -229,9 +229,18 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): z = self.process_tokens(tokens, multipliers) zs.append(z) - if len(used_embeddings) > 0: - embeddings_list = ", ".join([f'{name} [{embedding.checksum()}]' for name, embedding in used_embeddings.items()]) - self.hijack.comments.append(f"Used embeddings: {embeddings_list}") + if opts.textual_inversion_add_hashes_to_infotext and used_embeddings: + hashes = [] + for name, embedding in used_embeddings.items(): + shorthash = embedding.shorthash + if not shorthash: + continue + + name = name.replace(":", "").replace(",", "") + hashes.append(f"{name}: {shorthash}") + + if hashes: + self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes) return torch.hstack(zs) diff --git a/modules/shared.py b/modules/shared.py index 48478a68..a32fd4ed 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -472,6 +472,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(), + "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), })) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index cbe975b7..38e072a8 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -13,7 +13,7 @@ import numpy as np from PIL import Image, PngImagePlugin from torch.utils.tensorboard import SummaryWriter -from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors +from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes import modules.textual_inversion.dataset from modules.textual_inversion.learn_schedule import LearnRateScheduler @@ -49,6 +49,8 @@ class Embedding: self.sd_checkpoint_name = None self.optimizer_state_dict = None self.filename = None + self.hash = None + self.shorthash = None def save(self, filename): embedding_data = { @@ -82,6 +84,10 @@ class Embedding: self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}' return self.cached_checksum + def set_hash(self, v): + self.hash = v + self.shorthash = self.hash[0:12] + class DirWithTextualInversionEmbeddings: def __init__(self, path): @@ -199,6 +205,7 @@ class EmbeddingDatabase: embedding.vectors = vec.shape[0] embedding.shape = vec.shape[-1] embedding.filename = path + embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '') if self.expected_shape == -1 or self.expected_shape == embedding.shape: self.register_embedding(embedding, shared.sd_model) diff --git a/style.css b/style.css index a424067f..9e13d7fd 100644 --- a/style.css +++ b/style.css @@ -231,6 +231,10 @@ button.custom-button{ padding-top: 0.5em; } +.html-log .comments:empty{ + padding-top: 0; +} + .html-log .performance { font-size: 0.85em; color: #444; -- cgit v1.2.3 From e5d3ae2bf4e9d39c35e6edc96d6449fd42528e55 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 15 Jul 2023 20:39:04 +0300 Subject: user metadata system for custom networks --- extensions-builtin/Lora/ui_extra_networks_lora.py | 2 +- html/extra-networks-card.html | 8 +- javascript/extraNetworks.js | 37 ++++- modules/ui_extra_networks.py | 54 ++++++- modules/ui_extra_networks_checkpoints.py | 2 +- modules/ui_extra_networks_hypernets.py | 6 +- modules/ui_extra_networks_user_metadata.py | 169 ++++++++++++++++++++++ style.css | 56 +++++-- 8 files changed, 300 insertions(+), 34 deletions(-) create mode 100644 modules/ui_extra_networks_user_metadata.py (limited to 'style.css') diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index da49790b..29b16c1c 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -20,7 +20,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): yield { "name": name, - "filename": path, + "filename": lora_on_disk.filename, "preview": self.find_preview(path), "description": self.find_description(path), "search_term": self.search_terms_from_path(lora_on_disk.filename), diff --git a/html/extra-networks-card.html b/html/extra-networks-card.html index 68a84c3a..fb787ffe 100644 --- a/html/extra-networks-card.html +++ b/html/extra-networks-card.html @@ -1,11 +1,11 @@{name} | {value} |
---|
{name} | {value} |
---|
{name} | {value} |
---|