diff options
author | Melan <alexleander91@gmail.com> | 2022-10-20 20:37:16 +0000 |
---|---|---|
committer | Melan <alexleander91@gmail.com> | 2022-10-20 20:37:16 +0000 |
commit | 8f5912984794c4c69e429c4636e984854d911b6a (patch) | |
tree | a5c45c4fc4d212c38f2d0b70cd013d0b2ce69354 /modules/textual_inversion/textual_inversion.py | |
parent | a6d593a6b51dc6a8443f2aa5c24caa391a04cd56 (diff) | |
download | stable-diffusion-webui-gfx803-8f5912984794c4c69e429c4636e984854d911b6a.tar.gz stable-diffusion-webui-gfx803-8f5912984794c4c69e429c4636e984854d911b6a.tar.bz2 stable-diffusion-webui-gfx803-8f5912984794c4c69e429c4636e984854d911b6a.zip |
Some changes to the tensorboard code and hypernetwork support
Diffstat (limited to 'modules/textual_inversion/textual_inversion.py')
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 45 |
1 files changed, 27 insertions, 18 deletions
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index ec8176bf..b1dc2596 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -201,19 +201,30 @@ def write_loss(log_directory, filename, step, epoch_len, values): **values,
})
+def tensorboard_setup(log_directory):
+ os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True)
+ return SummaryWriter(
+ log_dir=os.path.join(log_directory, "tensorboard"),
+ flush_secs=shared.opts.training_tensorboard_flush_every)
+
+def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epoch_num):
+ tensorboard_add_scaler(tensorboard_writer, "Loss/train", loss, global_step)
+ tensorboard_add_scaler(tensorboard_writer, f"Loss/train/epoch-{epoch_num}", loss, step)
+ tensorboard_add_scaler(tensorboard_writer, "Learn rate/train", learn_rate, global_step)
+ tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step)
+
def tensorboard_add_scaler(tensorboard_writer, tag, value, step):
- if shared.opts.training_enable_tensorboard:
- tensorboard_writer.add_scalar(tag=tag,
- scalar_value=value, global_step=step)
+ tensorboard_writer.add_scalar(tag=tag,
+ scalar_value=value, global_step=step)
def tensorboard_add_image(tensorboard_writer, tag, pil_image, step):
- if shared.opts.training_enable_tensorboard:
- # Convert a pil image to a torch tensor
- img_tensor = torch.as_tensor(np.array(pil_image, copy=True))
- img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], len(pil_image.getbands()))
- img_tensor = img_tensor.permute((2, 0, 1))
+ # Convert a pil image to a torch tensor
+ img_tensor = torch.as_tensor(np.array(pil_image, copy=True))
+ img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0],
+ len(pil_image.getbands()))
+ img_tensor = img_tensor.permute((2, 0, 1))
- tensorboard_writer.add_image(tag, img_tensor, global_step=step)
+ tensorboard_writer.add_image(tag, img_tensor, global_step=step)
def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
assert embedding_name, 'embedding not selected'
@@ -268,10 +279,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
if shared.opts.training_enable_tensorboard:
- os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True)
- tensorboard_writer = SummaryWriter(
- log_dir=os.path.join(log_directory, "tensorboard"),
- flush_secs=shared.opts.training_tensorboard_flush_every)
+ tensorboard_writer = tensorboard_setup(log_directory)
pbar = tqdm.tqdm(enumerate(ds), total=steps-initial_step)
for i, entries in pbar:
@@ -308,10 +316,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = True
if shared.opts.training_enable_tensorboard:
- tensorboard_add_scaler(tensorboard_writer, "Loss/train", losses.mean(), embedding.step)
- tensorboard_add_scaler(tensorboard_writer, f"Loss/train/epoch-{epoch_num}", losses.mean(), epoch_step)
- tensorboard_add_scaler(tensorboard_writer, "Learn rate/train", scheduler.learn_rate, embedding.step)
- tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", scheduler.learn_rate, epoch_step)
+ tensorboard_add(tensorboard_writer, loss=losses.mean(), global_step=embedding.step,
+ step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num)
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), {
"loss": f"{losses.mean():.7f}",
@@ -377,7 +383,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = False
image.save(last_saved_image)
- tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, embedding.step)
+
+ if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
+ tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}",
+ image, embedding.step)
last_saved_image += f", prompt: {preview_text}"
|