diff options
Diffstat (limited to 'modules/interrogate.py')
-rw-r--r-- | modules/interrogate.py | 63 |
1 files changed, 44 insertions, 19 deletions
diff --git a/modules/interrogate.py b/modules/interrogate.py index ed97a58b..06862fcc 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -1,3 +1,4 @@ +import contextlib
import os
import sys
import traceback
@@ -6,12 +7,11 @@ import re import torch
-from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import modules.shared as shared
-from modules import devices, paths
+from modules import devices, paths, lowvram
blip_image_eval_size = 384
blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
@@ -26,6 +26,7 @@ class InterrogateModels: clip_model = None
clip_preprocess = None
categories = None
+ dtype = None
def __init__(self, content_dir):
self.categories = []
@@ -60,30 +61,45 @@ class InterrogateModels: def load(self):
if self.blip_model is None:
self.blip_model = self.load_blip_model()
+ if not shared.cmd_opts.no_half:
+ self.blip_model = self.blip_model.half()
self.blip_model = self.blip_model.to(shared.device)
if self.clip_model is None:
self.clip_model, self.clip_preprocess = self.load_clip_model()
+ if not shared.cmd_opts.no_half:
+ self.clip_model = self.clip_model.half()
self.clip_model = self.clip_model.to(shared.device)
- def unload(self):
+ self.dtype = next(self.clip_model.parameters()).dtype
+
+ def send_clip_to_ram(self):
if not shared.opts.interrogate_keep_models_in_memory:
if self.clip_model is not None:
self.clip_model = self.clip_model.to(devices.cpu)
+ def send_blip_to_ram(self):
+ if not shared.opts.interrogate_keep_models_in_memory:
if self.blip_model is not None:
self.blip_model = self.blip_model.to(devices.cpu)
+ def unload(self):
+ self.send_clip_to_ram()
+ self.send_blip_to_ram()
+
+ devices.torch_gc()
def rank(self, image_features, text_array, top_count=1):
import clip
+ if shared.opts.interrogate_clip_dict_limit != 0:
+ text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
+
top_count = min(top_count, len(text_array))
- text_tokens = clip.tokenize([text for text in text_array]).cuda()
- with torch.no_grad():
- text_features = self.clip_model.encode_text(text_tokens).float()
+ text_tokens = clip.tokenize([text for text in text_array]).to(shared.device)
+ text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = torch.zeros((1, len(text_array))).to(shared.device)
@@ -94,13 +110,12 @@ class InterrogateModels: top_probs, top_labels = similarity.cpu().topk(top_count, dim=-1)
return [(text_array[top_labels[0][i].numpy()], (top_probs[0][i].numpy()*100)) for i in range(top_count)]
-
def generate_caption(self, pil_image):
gpu_image = transforms.Compose([
transforms.Resize((blip_image_eval_size, blip_image_eval_size), interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
- ])(pil_image).unsqueeze(0).to(shared.device)
+ ])(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
with torch.no_grad():
caption = self.blip_model.generate(gpu_image, sample=False, num_beams=shared.opts.interrogate_clip_num_beams, min_length=shared.opts.interrogate_clip_min_length, max_length=shared.opts.interrogate_clip_max_length)
@@ -111,31 +126,41 @@ class InterrogateModels: res = None
try:
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ devices.torch_gc()
+
self.load()
caption = self.generate_caption(pil_image)
+ self.send_blip_to_ram()
+ devices.torch_gc()
+
res = caption
- images = self.clip_preprocess(pil_image).unsqueeze(0).to(shared.device)
+ cilp_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
- with torch.no_grad():
- image_features = self.clip_model.encode_image(images).float()
+ precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext
+ with torch.no_grad(), precision_scope("cuda"):
+ image_features = self.clip_model.encode_image(cilp_image).type(self.dtype)
- image_features /= image_features.norm(dim=-1, keepdim=True)
+ image_features /= image_features.norm(dim=-1, keepdim=True)
- if shared.opts.interrogate_use_builtin_artists:
- artist = self.rank(image_features, ["by " + artist.name for artist in shared.artist_db.artists])[0]
+ if shared.opts.interrogate_use_builtin_artists:
+ artist = self.rank(image_features, ["by " + artist.name for artist in shared.artist_db.artists])[0]
- res += ", " + artist[0]
+ res += ", " + artist[0]
- for name, topn, items in self.categories:
- matches = self.rank(image_features, items, top_count=topn)
- for match, score in matches:
- res += ", " + match
+ for name, topn, items in self.categories:
+ matches = self.rank(image_features, items, top_count=topn)
+ for match, score in matches:
+ res += ", " + match
except Exception:
print(f"Error interrogating", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ res += "<error>"
self.unload()
|