aboutsummaryrefslogtreecommitdiffstats
path: root/modules/lowvram.py
diff options
context:
space:
mode:
authorParityError <36368048+ParityError@users.noreply.github.com>2023-03-29 01:29:59 +0000
committerGitHub <noreply@github.com>2023-03-29 01:29:59 +0000
commitf69acfe9a4c09a5c2299e0cc2d5bdcd7a6e62285 (patch)
tree01852aeac922029273e08a90c683d63c6fc169cd /modules/lowvram.py
parentfb68d93b6a579a424919b22682cf067ce9a8e13f (diff)
parent3856ada5cc9ac4124e20ff311ce7aa77330845d9 (diff)
downloadstable-diffusion-webui-gfx803-f69acfe9a4c09a5c2299e0cc2d5bdcd7a6e62285.tar.gz
stable-diffusion-webui-gfx803-f69acfe9a4c09a5c2299e0cc2d5bdcd7a6e62285.tar.bz2
stable-diffusion-webui-gfx803-f69acfe9a4c09a5c2299e0cc2d5bdcd7a6e62285.zip
Merge branch 'AUTOMATIC1111:master' into master
Diffstat (limited to 'modules/lowvram.py')
-rw-r--r--modules/lowvram.py10
1 files changed, 6 insertions, 4 deletions
diff --git a/modules/lowvram.py b/modules/lowvram.py
index 042a0254..e254cc13 100644
--- a/modules/lowvram.py
+++ b/modules/lowvram.py
@@ -55,12 +55,12 @@ def setup_for_low_vram(sd_model, use_medvram):
if hasattr(sd_model.cond_stage_model, 'model'):
sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model
- # remove four big modules, cond, first_stage, depth (if applicable), and unet from the model and then
+ # remove several big modules: cond, first_stage, depth/embedder (if applicable), and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU.
- stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model
- sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None, None
+ stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), getattr(sd_model, 'embedder', None), sd_model.model
+ sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = None, None, None, None, None
sd_model.to(devices.device)
- sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored
+ sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = stored
# register hooks for those the first three models
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
@@ -69,6 +69,8 @@ def setup_for_low_vram(sd_model, use_medvram):
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
if sd_model.depth_model:
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
+ if sd_model.embedder:
+ sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
if hasattr(sd_model.cond_stage_model, 'model'):