From 9e846083b702a498fdb60accd72f075fa26701d9 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Fri, 14 Oct 2022 14:50:25 +0100 Subject: add vector size to embed text --- modules/textual_inversion/textual_inversion.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index e754747e..6f549d62 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -327,10 +327,16 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc info.add_text("sd-ti-embedding", embedding_to_b64(data)) title = "<{}>".format(data.get('name', '???')) + + try: + vectorSize = list(data['string_to_param'].values())[0].shape[0] + except Exception as e: + vectorSize = '?' + checkpoint = sd_models.select_checkpoint() footer_left = checkpoint.model_name footer_mid = '[{}]'.format(checkpoint.hash) - footer_right = '{}'.format(embedding.step) + footer_right = 'v{} {}s'.format(vectorSize, embedding.step) captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) captioned_image = insert_image_data_embed(captioned_image, data) -- cgit v1.2.3 From 939f16529a72fe48c2ce3ef31bdaba785925a33c Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Fri, 14 Oct 2022 14:55:05 +0100 Subject: only save 1 image per embedding --- modules/textual_inversion/textual_inversion.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 6f549d62..1d697c90 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -242,6 +242,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc last_saved_file = "" last_saved_image = "" + embedding_yet_to_be_embedded = False ititial_step = embedding.step or 0 if ititial_step > steps: @@ -281,6 +282,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0: last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt') embedding.save(last_saved_file) + embedding_yet_to_be_embedded = True write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { "loss": f"{losses.mean():.7f}", @@ -318,7 +320,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc shared.state.current_image = image - if save_image_with_stored_embedding and os.path.exists(last_saved_file): + if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{embedding.step}.png') @@ -342,6 +344,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc captioned_image = insert_image_data_embed(captioned_image, data) captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) + embedding_yet_to_be_embedded = False image.save(last_saved_image) -- cgit v1.2.3 From 9a1dcd78edbf9caf68b9e6286d7b5ca81500e243 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Fri, 14 Oct 2022 18:14:02 +0100 Subject: add webp for embed load --- modules/textual_inversion/textual_inversion.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 1d697c90..c07bffc3 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -96,6 +96,10 @@ class EmbeddingDatabase: else: data = extract_image_data_embed(embed_image) name = data.get('name', name) + elif filename.upper().endswith('.WEBP'): + embed_image = Image.open(path) + data = extract_image_data_embed(embed_image) + name = data.get('name', name) else: data = torch.load(path, map_location="cpu") -- cgit v1.2.3 From ddf6899df0cf87d4da77cb2ce223061f4a5edf18 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Fri, 14 Oct 2022 18:23:20 +0100 Subject: generalise to popular lossless formats --- modules/textual_inversion/textual_inversion.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c07bffc3..b99df3b1 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -88,18 +88,14 @@ class EmbeddingDatabase: data = [] - if filename.upper().endswith('.PNG'): + if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']: embed_image = Image.open(path) - if 'sd-ti-embedding' in embed_image.text: + if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text: data = embedding_from_b64(embed_image.text['sd-ti-embedding']) name = data.get('name', name) else: data = extract_image_data_embed(embed_image) name = data.get('name', name) - elif filename.upper().endswith('.WEBP'): - embed_image = Image.open(path) - data = extract_image_data_embed(embed_image) - name = data.get('name', name) else: data = torch.load(path, map_location="cpu") -- cgit v1.2.3 From b6e3b96dab94a00f51725f9cc977eebc6b4072ab Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 15 Oct 2022 15:17:21 +0100 Subject: Change vector size footer label --- modules/textual_inversion/textual_inversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index b99df3b1..2ed345b1 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -338,7 +338,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc checkpoint = sd_models.select_checkpoint() footer_left = checkpoint.model_name footer_mid = '[{}]'.format(checkpoint.hash) - footer_right = 'v{} {}s'.format(vectorSize, embedding.step) + footer_right = '{}v {}s'.format(vectorSize, embedding.step) captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) captioned_image = insert_image_data_embed(captioned_image, data) -- cgit v1.2.3