diff options
author | dan <guaneec@gmail.com> | 2023-01-07 17:36:00 +0000 |
---|---|---|
committer | dan <guaneec@gmail.com> | 2023-01-07 18:57:36 +0000 |
commit | 72497895b9b1948f86d9309fe897cbb70c20ba7e (patch) | |
tree | d4ee04160f107036da878db8f24b58a562fe7055 | |
parent | 669fb18d5222f53ae48abe0f30393d846c50ad91 (diff) | |
download | stable-diffusion-webui-gfx803-72497895b9b1948f86d9309fe897cbb70c20ba7e.tar.gz stable-diffusion-webui-gfx803-72497895b9b1948f86d9309fe897cbb70c20ba7e.tar.bz2 stable-diffusion-webui-gfx803-72497895b9b1948f86d9309fe897cbb70c20ba7e.zip |
Move batchsize check
-rw-r--r-- | modules/hypernetworks/hypernetwork.py | 2 | ||||
-rw-r--r-- | modules/textual_inversion/dataset.py | 4 |
2 files changed, 3 insertions, 3 deletions
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index dba52841..32c67ccc 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -456,7 +456,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, pin_memory = shared.opts.pin_memory
- ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
if shared.opts.save_training_settings_to_txt:
saved_params = dict(
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 7f8a314f..bcad6848 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -46,6 +46,8 @@ class PersonalizedBase(Dataset): assert data_root, 'dataset directory not specified'
assert os.path.isdir(data_root), "Dataset directory doesn't exist"
assert os.listdir(data_root), "Dataset directory is empty"
+ if varsize:
+ assert batch_size == 1, 'variable img size must have batch size 1'
self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)]
@@ -61,8 +63,6 @@ class PersonalizedBase(Dataset): image = Image.open(path).convert('RGB')
if not varsize:
image = image.resize((width, height), PIL.Image.BICUBIC)
- else:
- assert batch_size == 1, 'variable img size must have batch size 1'
except Exception:
continue
|