diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-07-14 06:56:01 +0000 |
---|---|---|
committer | AUTOMATIC1111 <16777216c@gmail.com> | 2023-07-14 06:56:01 +0000 |
commit | 9a3f35b028a8026291679c35e1df5b2aea327a1d (patch) | |
tree | efb86fcbcbe81b1a993cf90cd42b6e3d816b5974 | |
parent | abb948dab09841571dd24c6be9ff9d6b212778ea (diff) | |
download | stable-diffusion-webui-gfx803-9a3f35b028a8026291679c35e1df5b2aea327a1d.tar.gz stable-diffusion-webui-gfx803-9a3f35b028a8026291679c35e1df5b2aea327a1d.tar.bz2 stable-diffusion-webui-gfx803-9a3f35b028a8026291679c35e1df5b2aea327a1d.zip |
repair medvram and lowvram
-rw-r--r-- | modules/lowvram.py | 4 | ||||
-rw-r--r-- | modules/sd_hijack_open_clip.py | 4 |
2 files changed, 5 insertions, 3 deletions
diff --git a/modules/lowvram.py b/modules/lowvram.py index da4f33a8..6bbc11eb 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -100,7 +100,9 @@ def setup_for_low_vram(sd_model, use_medvram): sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
if sd_model.embedder:
sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
- parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
+
+ if hasattr(sd_model, 'cond_stage_model'):
+ parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
if use_medvram:
sd_model.model.register_forward_pre_hook(send_me_to_gpu)
diff --git a/modules/sd_hijack_open_clip.py b/modules/sd_hijack_open_clip.py index fcf5ad07..bb0b96c7 100644 --- a/modules/sd_hijack_open_clip.py +++ b/modules/sd_hijack_open_clip.py @@ -32,7 +32,7 @@ class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWit def encode_embedding_init_text(self, init_text, nvpt):
ids = tokenizer.encode(init_text)
ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
- embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0)
+ embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0)
return embedded
@@ -66,6 +66,6 @@ class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWi def encode_embedding_init_text(self, init_text, nvpt):
ids = tokenizer.encode(init_text)
ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
- embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0)
+ embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0)
return embedded
|