diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-07-15 05:41:22 +0000 |
---|---|---|
committer | AUTOMATIC1111 <16777216c@gmail.com> | 2023-07-15 05:41:22 +0000 |
commit | 2b1bae0d755c2d5201f6a6aadeadb5588208d43f (patch) | |
tree | acb519133f2d9dee1b98a33d1be15d56c97ef4d9 /modules | |
parent | 127635409a7959f6c057a68ccb8e70734cbaf9f3 (diff) | |
download | stable-diffusion-webui-gfx803-2b1bae0d755c2d5201f6a6aadeadb5588208d43f.tar.gz stable-diffusion-webui-gfx803-2b1bae0d755c2d5201f6a6aadeadb5588208d43f.tar.bz2 stable-diffusion-webui-gfx803-2b1bae0d755c2d5201f6a6aadeadb5588208d43f.zip |
add textual inversion hashes to infotext
Diffstat (limited to 'modules')
-rw-r--r-- | modules/processing.py | 7 | ||||
-rw-r--r-- | modules/sd_hijack.py | 5 | ||||
-rw-r--r-- | modules/sd_hijack_clip.py | 15 | ||||
-rw-r--r-- | modules/shared.py | 1 | ||||
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 9 |
5 files changed, 29 insertions, 8 deletions
diff --git a/modules/processing.py b/modules/processing.py index cd568a20..49441e77 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -732,9 +732,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.setup_conds()
- if len(model_hijack.comments) > 0:
- for comment in model_hijack.comments:
- comments[comment] = 1
+ for comment in model_hijack.comments:
+ comments[comment] = 1
+
+ p.extra_generation_params.update(model_hijack.extra_generation_params)
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 3b6f95ce..6b5aae4b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -147,7 +147,6 @@ def undo_weighted_forward(sd_model): class StableDiffusionModelHijack:
fixes = None
- comments = []
layers = None
circular_enabled = False
clip = None
@@ -156,6 +155,9 @@ class StableDiffusionModelHijack: embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase()
def __init__(self):
+ self.extra_generation_params = {}
+ self.comments = []
+
self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
def apply_optimizations(self, option=None):
@@ -236,6 +238,7 @@ class StableDiffusionModelHijack: def clear_comments(self):
self.comments = []
+ self.extra_generation_params = {}
def get_prompt_lengths(self, text):
if self.clip is None:
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 3b5a7666..c1d780a3 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -229,9 +229,18 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): z = self.process_tokens(tokens, multipliers)
zs.append(z)
- if len(used_embeddings) > 0:
- embeddings_list = ", ".join([f'{name} [{embedding.checksum()}]' for name, embedding in used_embeddings.items()])
- self.hijack.comments.append(f"Used embeddings: {embeddings_list}")
+ if opts.textual_inversion_add_hashes_to_infotext and used_embeddings:
+ hashes = []
+ for name, embedding in used_embeddings.items():
+ shorthash = embedding.shorthash
+ if not shorthash:
+ continue
+
+ name = name.replace(":", "").replace(",", "")
+ hashes.append(f"{name}: {shorthash}")
+
+ if hashes:
+ self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes)
return torch.hstack(zs)
diff --git a/modules/shared.py b/modules/shared.py index 48478a68..a32fd4ed 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -472,6 +472,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(),
+ "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks),
}))
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index cbe975b7..38e072a8 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -13,7 +13,7 @@ import numpy as np from PIL import Image, PngImagePlugin
from torch.utils.tensorboard import SummaryWriter
-from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors
+from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
@@ -49,6 +49,8 @@ class Embedding: self.sd_checkpoint_name = None
self.optimizer_state_dict = None
self.filename = None
+ self.hash = None
+ self.shorthash = None
def save(self, filename):
embedding_data = {
@@ -82,6 +84,10 @@ class Embedding: self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
return self.cached_checksum
+ def set_hash(self, v):
+ self.hash = v
+ self.shorthash = self.hash[0:12]
+
class DirWithTextualInversionEmbeddings:
def __init__(self, path):
@@ -199,6 +205,7 @@ class EmbeddingDatabase: embedding.vectors = vec.shape[0]
embedding.shape = vec.shape[-1]
embedding.filename = path
+ embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '')
if self.expected_shape == -1 or self.expected_shape == embedding.shape:
self.register_embedding(embedding, shared.sd_model)
|