diff options
author | AUTOMATIC <16777216c@gmail.com> | 2023-01-01 21:38:09 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2023-01-01 21:38:09 +0000 |
commit | 311354c0bb8930ea939d6aa6b3edd50c69301320 (patch) | |
tree | 2ab21b1228b45e3feae54cf2fe51a01930f17c8a /modules/textual_inversion/textual_inversion.py | |
parent | e672cfb07418a1a3130d3bf21c14a0d3819f81fb (diff) | |
download | stable-diffusion-webui-gfx803-311354c0bb8930ea939d6aa6b3edd50c69301320.tar.gz stable-diffusion-webui-gfx803-311354c0bb8930ea939d6aa6b3edd50c69301320.tar.bz2 stable-diffusion-webui-gfx803-311354c0bb8930ea939d6aa6b3edd50c69301320.zip |
fix the issue with training on SD2.0
Diffstat (limited to 'modules/textual_inversion/textual_inversion.py')
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 3 |
1 files changed, 1 insertions, 2 deletions
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 66f40367..1e5722e7 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -282,7 +282,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ return embedding, filename
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
- # dataset loading may take a while, so input validations and early returns should be done before this
+ # dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
old_parallel_processing_allowed = shared.parallel_processing_allowed
@@ -310,7 +310,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ loss_step = 0
_loss_step = 0 #internal
-
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
|