diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2022-12-03 07:19:51 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-12-03 07:19:51 +0000 |
commit | c9a2cfdf2a53d37c2de1908423e4f548088667ef (patch) | |
tree | eabdca1b7665e0ee00f130e9f8544ffd23e474a2 /modules/lowvram.py | |
parent | 39541d7725bc42f456a604b07c50aba503a5a09a (diff) | |
parent | 5cd5a672f7889dcc018c3873ec557d645ebe35d0 (diff) | |
download | stable-diffusion-webui-gfx803-c9a2cfdf2a53d37c2de1908423e4f548088667ef.tar.gz stable-diffusion-webui-gfx803-c9a2cfdf2a53d37c2de1908423e4f548088667ef.tar.bz2 stable-diffusion-webui-gfx803-c9a2cfdf2a53d37c2de1908423e4f548088667ef.zip |
Merge branch 'master' into racecond_fix
Diffstat (limited to 'modules/lowvram.py')
-rw-r--r-- | modules/lowvram.py | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/modules/lowvram.py b/modules/lowvram.py index a4652cb1..aa464a95 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -51,6 +51,10 @@ def setup_for_low_vram(sd_model, use_medvram): send_me_to_gpu(first_stage_model, None)
return first_stage_model_decode(z)
+ # for SD1, cond_stage_model is CLIP and its NN is in the tranformer frield, but for SD2, it's open clip, and it's in model field
+ if hasattr(sd_model.cond_stage_model, 'model'):
+ sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model
+
# remove three big modules, cond, first_stage, and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU.
stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model
@@ -65,6 +69,10 @@ def setup_for_low_vram(sd_model, use_medvram): sd_model.first_stage_model.decode = first_stage_model_decode_wrap
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
+ if hasattr(sd_model.cond_stage_model, 'model'):
+ sd_model.cond_stage_model.model = sd_model.cond_stage_model.transformer
+ del sd_model.cond_stage_model.transformer
+
if use_medvram:
sd_model.model.register_forward_pre_hook(send_me_to_gpu)
else:
|