From 10137589336199d6185ff3f255d611ff8f3edb88 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Sun, 28 May 2023 14:41:44 +0300 Subject: Mark caption_image_overlay's textfont as deprecated; fix #10778 --- modules/textual_inversion/image_embedding.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'modules/textual_inversion') diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index 5858a55f..81cff7bf 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -1,8 +1,10 @@ import base64 import json +import warnings + import numpy as np import zlib -from PIL import Image, ImageDraw, ImageFont +from PIL import Image, ImageDraw import torch @@ -129,14 +131,17 @@ def extract_image_data_embed(image): def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, textfont=None): + from modules.images import get_font + if textfont: + warnings.warn( + 'passing in a textfont to caption_image_overlay is deprecated and does nothing', + DeprecationWarning, + stacklevel=2, + ) from math import cos image = srcimage.copy() fontsize = 32 - if textfont is None: - from modules.images import get_font - textfont = get_font(fontsize) - factor = 1.5 gradient = Image.new('RGBA', (1, image.size[1]), color=(0, 0, 0, 0)) for y in range(image.size[1]): @@ -147,12 +152,12 @@ def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, t draw = ImageDraw.Draw(image) - font = ImageFont.truetype(textfont, fontsize) + font = get_font(fontsize) padding = 10 _, _, w, h = draw.textbbox((0, 0), title, font=font) fontsize = min(int(fontsize * (((image.size[0]*0.75)-(padding*4))/w)), 72) - font = ImageFont.truetype(textfont, fontsize) + font = get_font(fontsize) _, _, w, h = draw.textbbox((0, 0), title, font=font) draw.text((padding, padding), title, anchor='lt', font=font, fill=(255, 255, 255, 230)) @@ -163,7 +168,7 @@ def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, t _, _, w, h = draw.textbbox((0, 0), footerRight, font=font) fontsize_right = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72) - font = ImageFont.truetype(textfont, min(fontsize_left, fontsize_mid, fontsize_right)) + font = get_font(min(fontsize_left, fontsize_mid, fontsize_right)) draw.text((padding, image.size[1]-padding), footerLeft, anchor='ls', font=font, fill=(255, 255, 255, 230)) draw.text((image.size[0]/2, image.size[1]-padding), footerMid, anchor='ms', font=font, fill=(255, 255, 255, 230)) -- cgit v1.2.3 From 4635f31270d1b5d41ad63815cb400b1ca73ea859 Mon Sep 17 00:00:00 2001 From: klimaleksus Date: Mon, 29 May 2023 01:09:59 +0500 Subject: Refactor EmbeddingDatabase.register_embedding() to allow unregistering --- modules/textual_inversion/textual_inversion.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) (limited to 'modules/textual_inversion') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d489ed1e..cbf94498 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -120,16 +120,29 @@ class EmbeddingDatabase: self.embedding_dirs.clear() def register_embedding(self, embedding, model): - self.word_embeddings[embedding.name] = embedding - - ids = model.cond_stage_model.tokenize([embedding.name])[0] + return self.register_embedding_by_name(embedding, model, embedding.name) + def register_embedding_by_name(self, embedding, model, name): + ids = model.cond_stage_model.tokenize([name])[0] first_id = ids[0] if first_id not in self.ids_lookup: self.ids_lookup[first_id] = [] - - self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True) - + if name in self.word_embeddings: + # remove old one from the lookup list + lookup = [x for x in self.ids_lookup[first_id] if x[1].name!=name] + else: + lookup = self.ids_lookup[first_id] + if embedding is not None: + lookup += [(ids, embedding)] + self.ids_lookup[first_id] = sorted(lookup, key=lambda x: len(x[0]), reverse=True) + if embedding is None: + # unregister embedding with specified name + if name in self.word_embeddings: + del self.word_embeddings[name] + if len(self.ids_lookup[first_id])==0: + del self.ids_lookup[first_id] + return None + self.word_embeddings[name] = embedding return embedding def get_expected_shape(self): -- cgit v1.2.3 From 00dfe27f59727407c5b408a80ff2a262934df495 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 08:54:13 +0300 Subject: Add & use modules.errors.print_error where currently printing exception info by hand --- modules/textual_inversion/textual_inversion.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'modules/textual_inversion') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d489ed1e..a040a988 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,6 +1,4 @@ import os -import sys -import traceback from collections import namedtuple import torch @@ -16,6 +14,7 @@ from torch.utils.tensorboard import SummaryWriter from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint import modules.textual_inversion.dataset +from modules.errors import print_error from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay @@ -207,8 +206,7 @@ class EmbeddingDatabase: self.load_from_file(fullfn, fn) except Exception: - print(f"Error loading embedding {fn}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error loading embedding {fn}", exc_info=True) continue def load_textual_inversion_embeddings(self, force_reload=False): @@ -632,8 +630,7 @@ Last saved image: {html.escape(last_saved_image)}
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True) except Exception: - print(traceback.format_exc(), file=sys.stderr) - pass + print_error("Error training embedding", exc_info=True) finally: pbar.leave = False pbar.close() -- cgit v1.2.3 From 05933840f0676dd1a90a7e2ad3f2a0672624b2cd Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 31 May 2023 19:56:37 +0300 Subject: rename print_error to report, use it with together with package name --- modules/textual_inversion/textual_inversion.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'modules/textual_inversion') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index b3dcb140..8da050ca 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -12,9 +12,8 @@ import numpy as np from PIL import Image, PngImagePlugin from torch.utils.tensorboard import SummaryWriter -from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint +from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors import modules.textual_inversion.dataset -from modules.errors import print_error from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay @@ -219,7 +218,7 @@ class EmbeddingDatabase: self.load_from_file(fullfn, fn) except Exception: - print_error(f"Error loading embedding {fn}", exc_info=True) + errors.report(f"Error loading embedding {fn}", exc_info=True) continue def load_textual_inversion_embeddings(self, force_reload=False): @@ -643,7 +642,7 @@ Last saved image: {html.escape(last_saved_image)}
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True) except Exception: - print_error("Error training embedding", exc_info=True) + errors.report("Error training embedding", exc_info=True) finally: pbar.leave = False pbar.close() -- cgit v1.2.3