diff options
author | Zac Liu <liuguang@baai.ac.cn> | 2022-11-30 07:02:02 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-11-30 07:02:02 +0000 |
commit | 231fb72872191ffa8c446af1577c9003b3d19d4f (patch) | |
tree | 5c31e75a3934327331d5636bd6ef1420c3ba32fe /modules/sd_hijack.py | |
parent | a39a57cb1f5964d9af2b541f7b352576adeeac0f (diff) | |
parent | 52cc83d36b7663a77b79fd2258d2ca871af73e55 (diff) | |
download | stable-diffusion-webui-gfx803-231fb72872191ffa8c446af1577c9003b3d19d4f.tar.gz stable-diffusion-webui-gfx803-231fb72872191ffa8c446af1577c9003b3d19d4f.tar.bz2 stable-diffusion-webui-gfx803-231fb72872191ffa8c446af1577c9003b3d19d4f.zip |
Merge pull request #2 from 920232796/master
fix bugs
Diffstat (limited to 'modules/sd_hijack.py')
-rw-r--r-- | modules/sd_hijack.py | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 3ec3f98a..edb8b420 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -28,7 +28,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At # new memory efficient cross attention blocks do not support hypernets and we already
# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
-ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
+# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
# silence new console spam from SD2
ldm.modules.attention.print = lambda *args: None
@@ -82,7 +82,12 @@ class StableDiffusionModelHijack: def hijack(self, m):
- if type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder:
+ if shared.text_model_name == "XLMR-Large":
+ model_embeddings = m.cond_stage_model.roberta.embeddings
+ model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self)
+ m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
+
+ elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder:
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
@@ -91,11 +96,7 @@ class StableDiffusionModelHijack: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self)
m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
apply_optimizations()
- elif shared.text_model_name == "XLMR-Large":
- model_embeddings = m.cond_stage_model.roberta.embeddings
- model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self)
- m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
-
+
self.clip = m.cond_stage_model
fix_checkpoint()
|