diff options
author | AngelBottomless <aria1th@naver.com> | 2023-09-05 13:38:02 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-09-05 13:38:02 +0000 |
commit | 47033afa5c08e72b622348b0bcfd71fd1a66e2cb (patch) | |
tree | 2f44a975e9372cdf4397d097d4a9aafb06c4c80e | |
parent | de5bb4ca88df44362c9263de7334b30156540e21 (diff) | |
download | stable-diffusion-webui-gfx803-47033afa5c08e72b622348b0bcfd71fd1a66e2cb.tar.gz stable-diffusion-webui-gfx803-47033afa5c08e72b622348b0bcfd71fd1a66e2cb.tar.bz2 stable-diffusion-webui-gfx803-47033afa5c08e72b622348b0bcfd71fd1a66e2cb.zip |
Fix preview for textual inversion training
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index aa79dc09..401a0a2a 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -386,7 +386,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat assert log_directory, "Log directory is empty"
-def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_name, preview_cfg_scale, preview_seed, preview_width, preview_height):
from modules import processing
save_embedding_every = save_embedding_every or 0
@@ -590,7 +590,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
- p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
+ p.sampler_name = sd_samplers.samplers_map[preview_sampler_name.lower()]
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
|