aboutsummaryrefslogtreecommitdiffstats
path: root/modules/lowvram.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-03-28 16:39:20 +0000
committerGitHub <noreply@github.com>2023-03-28 16:39:20 +0000
commitf1db987e6a3d16e278ea44979b43e585d893237e (patch)
treec130645b2c8de1c528b69e613c28aed860664c12 /modules/lowvram.py
parente49c479819760af910eae4881619c7f119359a5f (diff)
parent1f08600345298fac0bcb66cc215a81875a84d7b9 (diff)
downloadstable-diffusion-webui-gfx803-f1db987e6a3d16e278ea44979b43e585d893237e.tar.gz
stable-diffusion-webui-gfx803-f1db987e6a3d16e278ea44979b43e585d893237e.tar.bz2
stable-diffusion-webui-gfx803-f1db987e6a3d16e278ea44979b43e585d893237e.zip
Merge pull request #8958 from MrCheeze/variations-model
Add support for the unclip (Variations) models, unclip-h and unclip-l
Diffstat (limited to 'modules/lowvram.py')
-rw-r--r--modules/lowvram.py10
1 files changed, 6 insertions, 4 deletions
diff --git a/modules/lowvram.py b/modules/lowvram.py
index 042a0254..e254cc13 100644
--- a/modules/lowvram.py
+++ b/modules/lowvram.py
@@ -55,12 +55,12 @@ def setup_for_low_vram(sd_model, use_medvram):
if hasattr(sd_model.cond_stage_model, 'model'):
sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model
- # remove four big modules, cond, first_stage, depth (if applicable), and unet from the model and then
+ # remove several big modules: cond, first_stage, depth/embedder (if applicable), and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU.
- stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model
- sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None, None
+ stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), getattr(sd_model, 'embedder', None), sd_model.model
+ sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = None, None, None, None, None
sd_model.to(devices.device)
- sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored
+ sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = stored
# register hooks for those the first three models
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
@@ -69,6 +69,8 @@ def setup_for_low_vram(sd_model, use_medvram):
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
if sd_model.depth_model:
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
+ if sd_model.embedder:
+ sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
if hasattr(sd_model.cond_stage_model, 'model'):