diff options
author | AUTOMATIC <16777216c@gmail.com> | 2022-12-31 23:41:15 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2022-12-31 23:41:15 +0000 |
commit | 210449b374d522c94a67fe54289a9eb515933a9f (patch) | |
tree | 908eb58b7e4b9149d6c14c811f741f6cbf0b023c | |
parent | 29a3a7eb13478297bc7093971b48827ab8246f45 (diff) | |
download | stable-diffusion-webui-gfx803-210449b374d522c94a67fe54289a9eb515933a9f.tar.gz stable-diffusion-webui-gfx803-210449b374d522c94a67fe54289a9eb515933a9f.tar.bz2 stable-diffusion-webui-gfx803-210449b374d522c94a67fe54289a9eb515933a9f.zip |
fix 'RuntimeError: Expected all tensors to be on the same device' error preventing models from loading on lowvram/medvram.
-rw-r--r-- | modules/sd_hijack_clip.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 6ec50cca..ca92b142 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -298,6 +298,6 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): def encode_embedding_init_text(self, init_text, nvpt):
embedding_layer = self.wrapped.transformer.text_model.embeddings
ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
- embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
+ embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0)
return embedded
|