diff options
author | zhaohu xing <32668889+920232796@users.noreply.github.com> | 2022-11-30 02:13:17 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-11-30 02:13:17 +0000 |
commit | 0831ab476c626eb796b609acf8771177692bfab7 (patch) | |
tree | ebae98ea40ecc5b34497424bee19310e9fac4068 /modules/sd_hijack_open_clip.py | |
parent | ee3f5ea3eeb31f1ed72e2f0cbed2c00a782497d8 (diff) | |
parent | 4b3c5bc24bffdf429c463a465763b3077fe55eb8 (diff) | |
download | stable-diffusion-webui-gfx803-0831ab476c626eb796b609acf8771177692bfab7.tar.gz stable-diffusion-webui-gfx803-0831ab476c626eb796b609acf8771177692bfab7.tar.bz2 stable-diffusion-webui-gfx803-0831ab476c626eb796b609acf8771177692bfab7.zip |
Merge branch 'master' into master
Diffstat (limited to 'modules/sd_hijack_open_clip.py')
-rw-r--r-- | modules/sd_hijack_open_clip.py | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/modules/sd_hijack_open_clip.py b/modules/sd_hijack_open_clip.py new file mode 100644 index 00000000..f733e852 --- /dev/null +++ b/modules/sd_hijack_open_clip.py @@ -0,0 +1,37 @@ +import open_clip.tokenizer
+import torch
+
+from modules import sd_hijack_clip, devices
+from modules.shared import opts
+
+tokenizer = open_clip.tokenizer._tokenizer
+
+
+class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase):
+ def __init__(self, wrapped, hijack):
+ super().__init__(wrapped, hijack)
+
+ self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ',</w>'][0]
+ self.id_start = tokenizer.encoder["<start_of_text>"]
+ self.id_end = tokenizer.encoder["<end_of_text>"]
+ self.id_pad = 0
+
+ def tokenize(self, texts):
+ assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
+
+ tokenized = [tokenizer.encode(text) for text in texts]
+
+ return tokenized
+
+ def encode_with_transformers(self, tokens):
+ # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers
+ z = self.wrapped.encode_with_transformer(tokens)
+
+ return z
+
+ def encode_embedding_init_text(self, init_text, nvpt):
+ ids = tokenizer.encode(init_text)
+ ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
+ embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0)
+
+ return embedded
|