From 50b5504401e50b6c94eba41b37fe212b2f27b792 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 14:04:14 +0300 Subject: remove parsing command line from devices.py --- modules/lowvram.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'modules/lowvram.py') diff --git a/modules/lowvram.py b/modules/lowvram.py index 7eba1349..f327c3df 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -1,9 +1,8 @@ import torch -from modules.devices import get_optimal_device +from modules import devices module_in_gpu = None cpu = torch.device("cpu") -device = gpu = get_optimal_device() def send_everything_to_cpu(): @@ -33,7 +32,7 @@ def setup_for_low_vram(sd_model, use_medvram): if module_in_gpu is not None: module_in_gpu.to(cpu) - module.to(gpu) + module.to(devices.device) module_in_gpu = module # see below for register_forward_pre_hook; @@ -51,7 +50,7 @@ def setup_for_low_vram(sd_model, use_medvram): # send the model to GPU. Then put modules back. the modules will be in CPU. stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = None, None, None - sd_model.to(device) + sd_model.to(devices.device) sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = stored # register hooks for those the first two models @@ -70,7 +69,7 @@ def setup_for_low_vram(sd_model, use_medvram): # so that only one of them is in GPU at a time stored = diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = None, None, None, None - sd_model.model.to(device) + sd_model.model.to(devices.device) diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = stored # install hooks for bits of third model -- cgit v1.2.3 From af758e97fa2c4c853042f121af4e974be01e6696 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Tue, 1 Nov 2022 04:01:49 -0300 Subject: Unload sd_model before loading the other --- modules/lowvram.py | 21 +++++++++++++-------- modules/processing.py | 3 +++ modules/sd_hijack.py | 4 ++++ modules/sd_models.py | 14 +++++++++++++- webui.py | 2 +- 5 files changed, 34 insertions(+), 10 deletions(-) (limited to 'modules/lowvram.py') diff --git a/modules/lowvram.py b/modules/lowvram.py index f327c3df..a4652cb1 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -38,13 +38,18 @@ def setup_for_low_vram(sd_model, use_medvram): # see below for register_forward_pre_hook; # first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is # useless here, and we just replace those methods - def first_stage_model_encode_wrap(self, encoder, x): - send_me_to_gpu(self, None) - return encoder(x) - def first_stage_model_decode_wrap(self, decoder, z): - send_me_to_gpu(self, None) - return decoder(z) + first_stage_model = sd_model.first_stage_model + first_stage_model_encode = sd_model.first_stage_model.encode + first_stage_model_decode = sd_model.first_stage_model.decode + + def first_stage_model_encode_wrap(x): + send_me_to_gpu(first_stage_model, None) + return first_stage_model_encode(x) + + def first_stage_model_decode_wrap(z): + send_me_to_gpu(first_stage_model, None) + return first_stage_model_decode(z) # remove three big modules, cond, first_stage, and unet from the model and then # send the model to GPU. Then put modules back. the modules will be in CPU. @@ -56,8 +61,8 @@ def setup_for_low_vram(sd_model, use_medvram): # register hooks for those the first two models sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu) sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu) - sd_model.first_stage_model.encode = lambda x, en=sd_model.first_stage_model.encode: first_stage_model_encode_wrap(sd_model.first_stage_model, en, x) - sd_model.first_stage_model.decode = lambda z, de=sd_model.first_stage_model.decode: first_stage_model_decode_wrap(sd_model.first_stage_model, de, z) + sd_model.first_stage_model.encode = first_stage_model_encode_wrap + sd_model.first_stage_model.decode = first_stage_model_decode_wrap parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model if use_medvram: diff --git a/modules/processing.py b/modules/processing.py index b1df4918..57d3a523 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -597,6 +597,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.postprocess(p, res) + p.sd_model = None + p.sampler = None + return res diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 0f10828e..bc49d235 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -94,6 +94,10 @@ class StableDiffusionModelHijack: if type(model_embeddings.token_embedding) == EmbeddingsWithFixes: model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped + self.layers = None + self.circular_enabled = False + self.clip = None + def apply_circular(self, enable): if self.circular_enabled == enable: return diff --git a/modules/sd_models.py b/modules/sd_models.py index f86dc3ed..90007da3 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -1,6 +1,7 @@ import collections import os.path import sys +import gc from collections import namedtuple import torch import re @@ -220,6 +221,12 @@ def load_model(checkpoint_info=None): if checkpoint_info.config != shared.cmd_opts.config: print(f"Loading config from: {checkpoint_info.config}") + if shared.sd_model: + sd_hijack.model_hijack.undo_hijack(shared.sd_model) + shared.sd_model = None + gc.collect() + devices.torch_gc() + sd_config = OmegaConf.load(checkpoint_info.config) if should_hijack_inpainting(checkpoint_info): @@ -233,6 +240,7 @@ def load_model(checkpoint_info=None): checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml")) do_inpainting_hijack() + sd_model = instantiate_from_config(sd_config.model) load_model_weights(sd_model, checkpoint_info) @@ -252,14 +260,18 @@ def load_model(checkpoint_info=None): return sd_model -def reload_model_weights(sd_model, info=None): +def reload_model_weights(sd_model=None, info=None): from modules import lowvram, devices, sd_hijack checkpoint_info = info or select_checkpoint() + if not sd_model: + sd_model = shared.sd_model + if sd_model.sd_model_checkpoint == checkpoint_info.filename: return if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info): + del sd_model checkpoints_loaded.clear() load_model(checkpoint_info) return shared.sd_model diff --git a/webui.py b/webui.py index 6ff95dc4..9c393e55 100644 --- a/webui.py +++ b/webui.py @@ -77,7 +77,7 @@ def initialize(): modules.scripts.load_scripts() modules.sd_models.load_model() - shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model))) + shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights())) shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork))) shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength) -- cgit v1.2.3 From b5050ad2071644f7b4c99660dc66a8a95136102f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 26 Nov 2022 20:52:16 +0300 Subject: make SD2 compatible with --medvram setting --- modules/lowvram.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'modules/lowvram.py') diff --git a/modules/lowvram.py b/modules/lowvram.py index a4652cb1..aa464a95 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -51,6 +51,10 @@ def setup_for_low_vram(sd_model, use_medvram): send_me_to_gpu(first_stage_model, None) return first_stage_model_decode(z) + # for SD1, cond_stage_model is CLIP and its NN is in the tranformer frield, but for SD2, it's open clip, and it's in model field + if hasattr(sd_model.cond_stage_model, 'model'): + sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model + # remove three big modules, cond, first_stage, and unet from the model and then # send the model to GPU. Then put modules back. the modules will be in CPU. stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model @@ -65,6 +69,10 @@ def setup_for_low_vram(sd_model, use_medvram): sd_model.first_stage_model.decode = first_stage_model_decode_wrap parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model + if hasattr(sd_model.cond_stage_model, 'model'): + sd_model.cond_stage_model.model = sd_model.cond_stage_model.transformer + del sd_model.cond_stage_model.transformer + if use_medvram: sd_model.model.register_forward_pre_hook(send_me_to_gpu) else: -- cgit v1.2.3 From a1c8ad88283f7b3e861e4722c71e39bf71eec744 Mon Sep 17 00:00:00 2001 From: MrCheeze Date: Sat, 10 Dec 2022 11:02:47 -0500 Subject: unload depth model if medvram/lowvram enabled --- modules/lowvram.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'modules/lowvram.py') diff --git a/modules/lowvram.py b/modules/lowvram.py index aa464a95..042a0254 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -55,18 +55,20 @@ def setup_for_low_vram(sd_model, use_medvram): if hasattr(sd_model.cond_stage_model, 'model'): sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model - # remove three big modules, cond, first_stage, and unet from the model and then + # remove four big modules, cond, first_stage, depth (if applicable), and unet from the model and then # send the model to GPU. Then put modules back. the modules will be in CPU. - stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model - sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = None, None, None + stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model + sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None, None sd_model.to(devices.device) - sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = stored + sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored - # register hooks for those the first two models + # register hooks for those the first three models sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu) sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu) sd_model.first_stage_model.encode = first_stage_model_encode_wrap sd_model.first_stage_model.decode = first_stage_model_decode_wrap + if sd_model.depth_model: + sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu) parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model if hasattr(sd_model.cond_stage_model, 'model'): -- cgit v1.2.3