diff options
author | captin411 <captindave@gmail.com> | 2022-10-25 20:22:27 +0000 |
---|---|---|
committer | captin411 <captindave@gmail.com> | 2022-10-25 20:22:27 +0000 |
commit | 6629446a2f9bb3ade1c271854aae1530ba1a8cc3 (patch) | |
tree | ad7cfd2b3f0208c24da64c7f08e0550e783228ec /modules/lowvram.py | |
parent | 3e6c2420c1177e9e79f2b566a5a7795b7416e34a (diff) | |
parent | 3e15f8e0f5cc87507f77546d92435670644dbd18 (diff) | |
download | stable-diffusion-webui-gfx803-6629446a2f9bb3ade1c271854aae1530ba1a8cc3.tar.gz stable-diffusion-webui-gfx803-6629446a2f9bb3ade1c271854aae1530ba1a8cc3.tar.bz2 stable-diffusion-webui-gfx803-6629446a2f9bb3ade1c271854aae1530ba1a8cc3.zip |
Merge branch 'master' into focal-point-cropping
Diffstat (limited to 'modules/lowvram.py')
-rw-r--r-- | modules/lowvram.py | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/modules/lowvram.py b/modules/lowvram.py index 7eba1349..f327c3df 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -1,9 +1,8 @@ import torch
-from modules.devices import get_optimal_device
+from modules import devices
module_in_gpu = None
cpu = torch.device("cpu")
-device = gpu = get_optimal_device()
def send_everything_to_cpu():
@@ -33,7 +32,7 @@ def setup_for_low_vram(sd_model, use_medvram): if module_in_gpu is not None:
module_in_gpu.to(cpu)
- module.to(gpu)
+ module.to(devices.device)
module_in_gpu = module
# see below for register_forward_pre_hook;
@@ -51,7 +50,7 @@ def setup_for_low_vram(sd_model, use_medvram): # send the model to GPU. Then put modules back. the modules will be in CPU.
stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model
sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = None, None, None
- sd_model.to(device)
+ sd_model.to(devices.device)
sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = stored
# register hooks for those the first two models
@@ -70,7 +69,7 @@ def setup_for_low_vram(sd_model, use_medvram): # so that only one of them is in GPU at a time
stored = diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed
diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = None, None, None, None
- sd_model.model.to(device)
+ sd_model.model.to(devices.device)
diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = stored
# install hooks for bits of third model
|