diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-07-18 15:20:22 +0000 |
---|---|---|
committer | AUTOMATIC1111 <16777216c@gmail.com> | 2023-07-18 15:20:22 +0000 |
commit | eb7c9b58fc2fbab205d4bc9f708800870dcda3fb (patch) | |
tree | 337bc9e4e6793aa072c2e2a8c10e3a3f7daf6a95 /modules/sd_hijack_open_clip.py | |
parent | f865d3e11647dfd6c7b2cdf90dde24680e58acd8 (diff) | |
parent | 7f7db1700bda40ba3171a49b6a4ef38f868b7d0a (diff) | |
download | stable-diffusion-webui-gfx803-eb7c9b58fc2fbab205d4bc9f708800870dcda3fb.tar.gz stable-diffusion-webui-gfx803-eb7c9b58fc2fbab205d4bc9f708800870dcda3fb.tar.bz2 stable-diffusion-webui-gfx803-eb7c9b58fc2fbab205d4bc9f708800870dcda3fb.zip |
Merge branch 'dev' into release_candidate
Diffstat (limited to 'modules/sd_hijack_open_clip.py')
-rw-r--r-- | modules/sd_hijack_open_clip.py | 36 |
1 files changed, 35 insertions, 1 deletions
diff --git a/modules/sd_hijack_open_clip.py b/modules/sd_hijack_open_clip.py index f733e852..bb0b96c7 100644 --- a/modules/sd_hijack_open_clip.py +++ b/modules/sd_hijack_open_clip.py @@ -32,6 +32,40 @@ class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWit def encode_embedding_init_text(self, init_text, nvpt):
ids = tokenizer.encode(init_text)
ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
- embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0)
+ embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0)
+
+ return embedded
+
+
+class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase):
+ def __init__(self, wrapped, hijack):
+ super().__init__(wrapped, hijack)
+
+ self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ',</w>'][0]
+ self.id_start = tokenizer.encoder["<start_of_text>"]
+ self.id_end = tokenizer.encoder["<end_of_text>"]
+ self.id_pad = 0
+
+ def tokenize(self, texts):
+ assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
+
+ tokenized = [tokenizer.encode(text) for text in texts]
+
+ return tokenized
+
+ def encode_with_transformers(self, tokens):
+ d = self.wrapped.encode_with_transformer(tokens)
+ z = d[self.wrapped.layer]
+
+ pooled = d.get("pooled")
+ if pooled is not None:
+ z.pooled = pooled
+
+ return z
+
+ def encode_embedding_init_text(self, init_text, nvpt):
+ ids = tokenizer.encode(init_text)
+ ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
+ embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0)
return embedded
|