aboutsummaryrefslogtreecommitdiffstats
path: root/modules/textual_inversion/textual_inversion.py
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2023-05-13 05:16:37 +0000
committerAUTOMATIC <16777216c@gmail.com>2023-05-13 05:16:37 +0000
commitb08500cec8a791ef20082628b49b17df833f5dda (patch)
tree2d09f3ca93139f082b88463f3a2a43a4ac45526f /modules/textual_inversion/textual_inversion.py
parent5ab7f213bec2f816f9c5644becb32eb72c8ffb89 (diff)
parent231562ea13e4f697953bdbabd6b76b22a88c587b (diff)
downloadstable-diffusion-webui-gfx803-b08500cec8a791ef20082628b49b17df833f5dda.tar.gz
stable-diffusion-webui-gfx803-b08500cec8a791ef20082628b49b17df833f5dda.tar.bz2
stable-diffusion-webui-gfx803-b08500cec8a791ef20082628b49b17df833f5dda.zip
Merge branch 'release_candidate'
Diffstat (limited to 'modules/textual_inversion/textual_inversion.py')
-rw-r--r--modules/textual_inversion/textual_inversion.py12
1 files changed, 6 insertions, 6 deletions
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 379df243..4368eb63 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -69,7 +69,7 @@ class Embedding:
'hash': self.checksum(),
'optimizer_state_dict': self.optimizer_state_dict,
}
- torch.save(optimizer_saved_dict, filename + '.optim')
+ torch.save(optimizer_saved_dict, f"{filename}.optim")
def checksum(self):
if self.cached_checksum is not None:
@@ -437,8 +437,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
if shared.opts.save_optimizer_state:
optimizer_state_dict = None
- if os.path.exists(filename + '.optim'):
- optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu')
+ if os.path.exists(f"{filename}.optim"):
+ optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu')
if embedding.checksum() == optimizer_saved_dict.get('hash', None):
optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
@@ -599,7 +599,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
data = torch.load(last_saved_file)
info.add_text("sd-ti-embedding", embedding_to_b64(data))
- title = "<{}>".format(data.get('name', '???'))
+ title = f"<{data.get('name', '???')}>"
try:
vectorSize = list(data['string_to_param'].values())[0].shape[0]
@@ -608,8 +608,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name
- footer_mid = '[{}]'.format(checkpoint.shorthash)
- footer_right = '{}v {}s'.format(vectorSize, steps_done)
+ footer_mid = f'[{checkpoint.shorthash}]'
+ footer_right = f'{vectorSize}v {steps_done}s'
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
captioned_image = insert_image_data_embed(captioned_image, data)