diff options
author | AUTOMATIC <16777216c@gmail.com> | 2022-09-12 08:55:27 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2022-09-12 08:55:27 +0000 |
commit | 9bb20be09092bb6c568f676b63105fb85b0c05cf (patch) | |
tree | 41fd3d246dd3f3f64f8dd17a178146b0896c3cf1 | |
parent | ab0a79cdf40a149442a759226cf990b7f3033b01 (diff) | |
download | stable-diffusion-webui-gfx803-9bb20be09092bb6c568f676b63105fb85b0c05cf.tar.gz stable-diffusion-webui-gfx803-9bb20be09092bb6c568f676b63105fb85b0c05cf.tar.bz2 stable-diffusion-webui-gfx803-9bb20be09092bb6c568f676b63105fb85b0c05cf.zip |
memory optimization for CLIP interrogator
changed default cfg_scale to a higher value
-rw-r--r-- | modules/interrogate.py | 28 | ||||
-rw-r--r-- | modules/lowvram.py | 10 | ||||
-rw-r--r-- | modules/shared.py | 1 | ||||
-rw-r--r-- | modules/ui.py | 4 |
4 files changed, 36 insertions, 7 deletions
diff --git a/modules/interrogate.py b/modules/interrogate.py index 7ebb79fc..1579c042 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -11,7 +11,7 @@ from torchvision import transforms from torchvision.transforms.functional import InterpolationMode
import modules.shared as shared
-from modules import devices, paths
+from modules import devices, paths, lowvram
blip_image_eval_size = 384
blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
@@ -75,19 +75,28 @@ class InterrogateModels: self.dtype = next(self.clip_model.parameters()).dtype
- def unload(self):
+ def send_clip_to_ram(self):
if not shared.opts.interrogate_keep_models_in_memory:
if self.clip_model is not None:
self.clip_model = self.clip_model.to(devices.cpu)
+ def send_blip_to_ram(self):
+ if not shared.opts.interrogate_keep_models_in_memory:
if self.blip_model is not None:
self.blip_model = self.blip_model.to(devices.cpu)
- devices.torch_gc()
+ def unload(self):
+ self.send_clip_to_ram()
+ self.send_blip_to_ram()
+
+ devices.torch_gc()
def rank(self, image_features, text_array, top_count=1):
import clip
+ if shared.opts.interrogate_clip_dict_limit != 0:
+ text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
+
top_count = min(top_count, len(text_array))
text_tokens = clip.tokenize([text for text in text_array]).to(shared.device)
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
@@ -117,16 +126,24 @@ class InterrogateModels: res = None
try:
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ devices.torch_gc()
+
self.load()
caption = self.generate_caption(pil_image)
+ self.send_blip_to_ram()
+ devices.torch_gc()
+
res = caption
- images = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
+ cilp_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext
with torch.no_grad(), precision_scope("cuda"):
- image_features = self.clip_model.encode_image(images).type(self.dtype)
+ image_features = self.clip_model.encode_image(cilp_image).type(self.dtype)
image_features /= image_features.norm(dim=-1, keepdim=True)
@@ -146,4 +163,5 @@ class InterrogateModels: self.unload()
+ res += "<error>"
return res
diff --git a/modules/lowvram.py b/modules/lowvram.py index 079386c3..7eba1349 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -5,6 +5,16 @@ module_in_gpu = None cpu = torch.device("cpu")
device = gpu = get_optimal_device()
+
+def send_everything_to_cpu():
+ global module_in_gpu
+
+ if module_in_gpu is not None:
+ module_in_gpu.to(cpu)
+
+ module_in_gpu = None
+
+
def setup_for_low_vram(sd_model, use_medvram):
parents = {}
diff --git a/modules/shared.py b/modules/shared.py index a78c7f89..c7e60706 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -132,6 +132,7 @@ class Options: "interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum descripton length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum descripton length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
+ "interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
}
def __init__(self):
diff --git a/modules/ui.py b/modules/ui.py index b9af2c86..e407041c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -270,7 +270,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
- cfg_scale = gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='CFG Scale', value=7.0)
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
with gr.Group():
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
@@ -413,7 +413,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
- cfg_scale = gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='CFG Scale', value=7.0)
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, visible=False)
|