diff options
author | Shondoit <shondoit@gmail.com> | 2023-01-12 14:34:11 +0000 |
---|---|---|
committer | Shondoit <shondoit@gmail.com> | 2023-02-15 09:03:59 +0000 |
commit | bc50936745e1a349afdc28cf1540109ba20bc71a (patch) | |
tree | 10c5a91caf895e58dffbf5a2d5d30924651e0bbf | |
parent | 21642000b33a3069e3408ea1a50239006176badb (diff) | |
download | stable-diffusion-webui-gfx803-bc50936745e1a349afdc28cf1540109ba20bc71a.tar.gz stable-diffusion-webui-gfx803-bc50936745e1a349afdc28cf1540109ba20bc71a.tar.bz2 stable-diffusion-webui-gfx803-bc50936745e1a349afdc28cf1540109ba20bc71a.zip |
Call weighted_forward during training
-rw-r--r-- | modules/hypernetworks/hypernetwork.py | 3 | ||||
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 3 |
2 files changed, 4 insertions, 2 deletions
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 825a93b2..9c79b7d0 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -640,13 +640,14 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
+ w = batch.weight.to(devices.device, non_blocking=pin_memory)
if tag_drop_out != 0 or shuffle_tags:
shared.sd_model.cond_stage_model.to(devices.device)
c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
shared.sd_model.cond_stage_model.to(devices.cpu)
else:
c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
- loss = shared.sd_model(x, c)[0] / gradient_step
+ loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step
del x
del c
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index a1a406c2..8853c868 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -480,6 +480,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
+ w = batch.weight.to(devices.device, non_blocking=pin_memory)
c = shared.sd_model.cond_stage_model(batch.cond_text)
if is_training_inpainting_model:
@@ -490,7 +491,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st else:
cond = c
- loss = shared.sd_model(x, cond)[0] / gradient_step
+ loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step
del x
_loss_step += loss.item()
|