aboutsummaryrefslogtreecommitdiffstats
path: root/modules/hypernetworks
diff options
context:
space:
mode:
authorTinkTheBoush <TinkTheBoush@github.com>2022-11-04 10:39:03 +0000
committerTinkTheBoush <TinkTheBoush@github.com>2022-11-04 10:39:03 +0000
commit821e2b883dbb42a187bc37379175cd55b7cd7e81 (patch)
treedae5c1757dbdc7130ebe16012c6d1bcf36f37223 /modules/hypernetworks
parentaf6fba247553e670ef5e2dcc1866279f9f065d6d (diff)
downloadstable-diffusion-webui-gfx803-821e2b883dbb42a187bc37379175cd55b7cd7e81.tar.gz
stable-diffusion-webui-gfx803-821e2b883dbb42a187bc37379175cd55b7cd7e81.tar.bz2
stable-diffusion-webui-gfx803-821e2b883dbb42a187bc37379175cd55b7cd7e81.zip
change option position to Training setting
Diffstat (limited to 'modules/hypernetworks')
-rw-r--r--modules/hypernetworks/hypernetwork.py4
1 files changed, 2 insertions, 2 deletions
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 7630fb81..a11e01d6 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -331,7 +331,7 @@ def report_statistics(loss_info:dict):
-def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, shuffle_tags, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
@@ -376,7 +376,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
- ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, shuffle_tags=shuffle_tags, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)