diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-08-04 05:05:21 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-04 05:05:21 +0000 |
commit | 56c3f94ba30d76bf680db9bc765624b9a143d769 (patch) | |
tree | 6924fc635fe13fe59911a285313308be2d9acdc7 /modules/sd_hijack_clip.py | |
parent | 952effa8b10dba2f2f7f2cf4191f987e3666e9f0 (diff) | |
parent | 073c0ebba380acbd73be8262feba41212165ff84 (diff) | |
download | stable-diffusion-webui-gfx803-56c3f94ba30d76bf680db9bc765624b9a143d769.tar.gz stable-diffusion-webui-gfx803-56c3f94ba30d76bf680db9bc765624b9a143d769.tar.bz2 stable-diffusion-webui-gfx803-56c3f94ba30d76bf680db9bc765624b9a143d769.zip |
Merge branch 'dev' into dev
Diffstat (limited to 'modules/sd_hijack_clip.py')
-rw-r--r-- | modules/sd_hijack_clip.py | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 5443e609..8f29057a 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -161,7 +161,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): position += 1
continue
- emb_len = int(embedding.vec.shape[0])
+ emb_len = int(embedding.vectors)
if len(chunk.tokens) + emb_len > self.chunk_length:
next_chunk()
@@ -245,6 +245,8 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): hashes.append(f"{name}: {shorthash}")
if hashes:
+ if self.hijack.extra_generation_params.get("TI hashes"):
+ hashes.append(self.hijack.extra_generation_params.get("TI hashes"))
self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes)
if getattr(self.wrapped, 'return_pooled', False):
@@ -270,12 +272,17 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): z = self.encode_with_transformers(tokens)
+ pooled = getattr(z, 'pooled', None)
+
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers = torch.asarray(batch_multipliers).to(devices.device)
original_mean = z.mean()
- z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
+ z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
- z *= (original_mean / new_mean)
+ z = z * (original_mean / new_mean)
+
+ if pooled is not None:
+ z.pooled = pooled
return z
|