aboutsummaryrefslogtreecommitdiffstats
path: root/modules/interrogate.py
diff options
context:
space:
mode:
Diffstat (limited to 'modules/interrogate.py')
-rw-r--r--modules/interrogate.py41
1 files changed, 24 insertions, 17 deletions
diff --git a/modules/interrogate.py b/modules/interrogate.py
index ed97a58b..7ebb79fc 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -1,3 +1,4 @@
+import contextlib
import os
import sys
import traceback
@@ -6,7 +7,6 @@ import re
import torch
-from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
@@ -26,6 +26,7 @@ class InterrogateModels:
clip_model = None
clip_preprocess = None
categories = None
+ dtype = None
def __init__(self, content_dir):
self.categories = []
@@ -60,14 +61,20 @@ class InterrogateModels:
def load(self):
if self.blip_model is None:
self.blip_model = self.load_blip_model()
+ if not shared.cmd_opts.no_half:
+ self.blip_model = self.blip_model.half()
self.blip_model = self.blip_model.to(shared.device)
if self.clip_model is None:
self.clip_model, self.clip_preprocess = self.load_clip_model()
+ if not shared.cmd_opts.no_half:
+ self.clip_model = self.clip_model.half()
self.clip_model = self.clip_model.to(shared.device)
+ self.dtype = next(self.clip_model.parameters()).dtype
+
def unload(self):
if not shared.opts.interrogate_keep_models_in_memory:
if self.clip_model is not None:
@@ -76,14 +83,14 @@ class InterrogateModels:
if self.blip_model is not None:
self.blip_model = self.blip_model.to(devices.cpu)
+ devices.torch_gc()
def rank(self, image_features, text_array, top_count=1):
import clip
top_count = min(top_count, len(text_array))
- text_tokens = clip.tokenize([text for text in text_array]).cuda()
- with torch.no_grad():
- text_features = self.clip_model.encode_text(text_tokens).float()
+ text_tokens = clip.tokenize([text for text in text_array]).to(shared.device)
+ text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = torch.zeros((1, len(text_array))).to(shared.device)
@@ -94,13 +101,12 @@ class InterrogateModels:
top_probs, top_labels = similarity.cpu().topk(top_count, dim=-1)
return [(text_array[top_labels[0][i].numpy()], (top_probs[0][i].numpy()*100)) for i in range(top_count)]
-
def generate_caption(self, pil_image):
gpu_image = transforms.Compose([
transforms.Resize((blip_image_eval_size, blip_image_eval_size), interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
- ])(pil_image).unsqueeze(0).to(shared.device)
+ ])(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
with torch.no_grad():
caption = self.blip_model.generate(gpu_image, sample=False, num_beams=shared.opts.interrogate_clip_num_beams, min_length=shared.opts.interrogate_clip_min_length, max_length=shared.opts.interrogate_clip_max_length)
@@ -116,22 +122,23 @@ class InterrogateModels:
caption = self.generate_caption(pil_image)
res = caption
- images = self.clip_preprocess(pil_image).unsqueeze(0).to(shared.device)
+ images = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
- with torch.no_grad():
- image_features = self.clip_model.encode_image(images).float()
+ precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext
+ with torch.no_grad(), precision_scope("cuda"):
+ image_features = self.clip_model.encode_image(images).type(self.dtype)
- image_features /= image_features.norm(dim=-1, keepdim=True)
+ image_features /= image_features.norm(dim=-1, keepdim=True)
- if shared.opts.interrogate_use_builtin_artists:
- artist = self.rank(image_features, ["by " + artist.name for artist in shared.artist_db.artists])[0]
+ if shared.opts.interrogate_use_builtin_artists:
+ artist = self.rank(image_features, ["by " + artist.name for artist in shared.artist_db.artists])[0]
- res += ", " + artist[0]
+ res += ", " + artist[0]
- for name, topn, items in self.categories:
- matches = self.rank(image_features, items, top_count=topn)
- for match, score in matches:
- res += ", " + match
+ for name, topn, items in self.categories:
+ matches = self.rank(image_features, items, top_count=topn)
+ for match, score in matches:
+ res += ", " + match
except Exception:
print(f"Error interrogating", file=sys.stderr)