aboutsummaryrefslogtreecommitdiffstats
path: root/modules/textual_inversion/textual_inversion.py
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2022-10-02 19:59:01 +0000
committerAUTOMATIC <16777216c@gmail.com>2022-10-02 19:59:01 +0000
commit6785331e22d6a488fbf5905fab56d7fec867e038 (patch)
tree111f08d07e3dfc82a02857155cccdeefe7afdb70 /modules/textual_inversion/textual_inversion.py
parentc7543d4940da672d970124ae8f2fec9de7bdc1da (diff)
downloadstable-diffusion-webui-gfx803-6785331e22d6a488fbf5905fab56d7fec867e038.tar.gz
stable-diffusion-webui-gfx803-6785331e22d6a488fbf5905fab56d7fec867e038.tar.bz2
stable-diffusion-webui-gfx803-6785331e22d6a488fbf5905fab56d7fec867e038.zip
keep textual inversion dataset latents in CPU memory to save a bit of VRAM
Diffstat (limited to 'modules/textual_inversion/textual_inversion.py')
-rw-r--r--modules/textual_inversion/textual_inversion.py3
1 files changed, 3 insertions, 0 deletions
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index d4e250d8..8686f534 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -212,7 +212,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
with torch.autocast("cuda"):
c = cond_model([text])
+
+ x = x.to(devices.device)
loss = shared.sd_model(x.unsqueeze(0), c)[0]
+ del x
losses[embedding.step % losses.shape[0]] = loss.item()