diff options
author | Muhammad Rizqi Nur <rizqinur2010@gmail.com> | 2022-10-28 03:31:27 +0000 |
---|---|---|
committer | Muhammad Rizqi Nur <rizqinur2010@gmail.com> | 2022-10-28 03:31:27 +0000 |
commit | 1618df41bad092e068c61bf510b1e20856821ad5 (patch) | |
tree | c2a76e626fa09517a0b8488e321e5721ce5f3bf6 | |
parent | a133042c669f666763f5da0f4440abdc839db653 (diff) | |
download | stable-diffusion-webui-gfx803-1618df41bad092e068c61bf510b1e20856821ad5.tar.gz stable-diffusion-webui-gfx803-1618df41bad092e068c61bf510b1e20856821ad5.tar.bz2 stable-diffusion-webui-gfx803-1618df41bad092e068c61bf510b1e20856821ad5.zip |
Gradient clipping for textual embedding
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 11 | ||||
-rw-r--r-- | modules/ui.py | 2 |
2 files changed, 12 insertions, 1 deletions
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index ff002d3e..7bad73a6 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -206,7 +206,7 @@ def write_loss(log_directory, filename, step, epoch_len, values): })
-def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
assert embedding_name, 'embedding not selected'
shared.state.textinfo = "Initializing textual inversion training..."
@@ -256,6 +256,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if ititial_step > steps:
return embedding, filename
+ clip_grad_mode_value = clip_grad_mode == "value"
+ clip_grad_mode_norm = clip_grad_mode == "norm"
+
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
@@ -280,6 +283,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc optimizer.zero_grad()
loss.backward()
+
+ if clip_grad_mode_value:
+ torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_value)
+ elif clip_grad_mode_norm:
+ torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_value)
+
optimizer.step()
diff --git a/modules/ui.py b/modules/ui.py index ba5e92a7..97de7da2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1409,6 +1409,8 @@ def create_ui(wrap_gradio_gpu_call): training_width,
training_height,
steps,
+ clip_grad_mode,
+ clip_grad_value,
create_image_every,
save_embedding_every,
template_file,
|