diff options
Diffstat (limited to 'modules/textual_inversion')
-rw-r--r-- | modules/textual_inversion/dataset.py | 2 | ||||
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 6 |
2 files changed, 6 insertions, 2 deletions
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 1568b2b8..af9fbcf2 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -115,7 +115,7 @@ class PersonalizedBase(Dataset): weight /= weight.mean()
elif use_weight:
#If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later
- weight = torch.ones([channels] + latent_size)
+ weight = torch.ones(latent_sample.shape)
else:
weight = None
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c63c7d1d..d2e62e58 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -152,7 +152,11 @@ class EmbeddingDatabase: name = data.get('name', name)
else:
data = extract_image_data_embed(embed_image)
- name = data.get('name', name)
+ if data:
+ name = data.get('name', name)
+ else:
+ # if data is None, means this is not an embeding, just a preview image
+ return
elif ext in ['.BIN', '.PT']:
data = torch.load(path, map_location="cpu")
elif ext in ['.SAFETENSORS']:
|