diff options
author | flamelaw <flamelaw.com3d2@gmail.com> | 2022-11-26 15:35:44 +0000 |
---|---|---|
committer | flamelaw <flamelaw.com3d2@gmail.com> | 2022-11-26 15:35:44 +0000 |
commit | 755df94b2aa62eabd96f900e0dd7ddc83c2f692c (patch) | |
tree | ee02b0a05e868e6e1234f469d1d503bf96d3ccb8 | |
parent | 1bd57cc9791e2e742f72a3d74d589f2c289e8e92 (diff) | |
download | stable-diffusion-webui-gfx803-755df94b2aa62eabd96f900e0dd7ddc83c2f692c.tar.gz stable-diffusion-webui-gfx803-755df94b2aa62eabd96f900e0dd7ddc83c2f692c.tar.bz2 stable-diffusion-webui-gfx803-755df94b2aa62eabd96f900e0dd7ddc83c2f692c.zip |
set TI AdamW default weight decay to 0
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fee08e33..b9b1394f 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -283,7 +283,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ shared.sd_model.first_stage_model.to(devices.cpu)
embedding.vec.requires_grad = True
- optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
+ optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size
|