aboutsummaryrefslogtreecommitdiffstats
path: root/modules/lowvram.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-01-04 14:40:19 +0000
committerGitHub <noreply@github.com>2023-01-04 14:40:19 +0000
commitda5c1e8a732c173ed8ccda9fa32f9a194ff91ab6 (patch)
treea2eec9c47e820e7ab351337f73c99d874b4b904f /modules/lowvram.py
parentcffc240a7327ae60671ff533469fc4ed4bf605de (diff)
parent47df0849019abac6722c49512f4dd2285bff5b7d (diff)
downloadstable-diffusion-webui-gfx803-da5c1e8a732c173ed8ccda9fa32f9a194ff91ab6.tar.gz
stable-diffusion-webui-gfx803-da5c1e8a732c173ed8ccda9fa32f9a194ff91ab6.tar.bz2
stable-diffusion-webui-gfx803-da5c1e8a732c173ed8ccda9fa32f9a194ff91ab6.zip
Merge branch 'master' into inpaint_textual_inversion
Diffstat (limited to 'modules/lowvram.py')
-rw-r--r--modules/lowvram.py20
1 files changed, 15 insertions, 5 deletions
diff --git a/modules/lowvram.py b/modules/lowvram.py
index a4652cb1..042a0254 100644
--- a/modules/lowvram.py
+++ b/modules/lowvram.py
@@ -51,20 +51,30 @@ def setup_for_low_vram(sd_model, use_medvram):
send_me_to_gpu(first_stage_model, None)
return first_stage_model_decode(z)
- # remove three big modules, cond, first_stage, and unet from the model and then
+ # for SD1, cond_stage_model is CLIP and its NN is in the tranformer frield, but for SD2, it's open clip, and it's in model field
+ if hasattr(sd_model.cond_stage_model, 'model'):
+ sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model
+
+ # remove four big modules, cond, first_stage, depth (if applicable), and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU.
- stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model
- sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = None, None, None
+ stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model
+ sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None, None
sd_model.to(devices.device)
- sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = stored
+ sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored
- # register hooks for those the first two models
+ # register hooks for those the first three models
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.encode = first_stage_model_encode_wrap
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
+ if sd_model.depth_model:
+ sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
+ if hasattr(sd_model.cond_stage_model, 'model'):
+ sd_model.cond_stage_model.model = sd_model.cond_stage_model.transformer
+ del sd_model.cond_stage_model.transformer
+
if use_medvram:
sd_model.model.register_forward_pre_hook(send_me_to_gpu)
else: