aboutsummaryrefslogtreecommitdiffstats
path: root/modules/interrogate.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-05-10 18:24:18 +0000
committerGitHub <noreply@github.com>2023-05-10 18:24:18 +0000
commit5abecea34cd98537f006c5e9a197acd1fe9db023 (patch)
tree98248bc21aa4ad9715205f0a65a654532c6cfcc0 /modules/interrogate.py
parentf5ea1e9d928e0d45b3ebcd8ddd1cacbc6a96e184 (diff)
parent3ec7b705c78b7aca9569c92a419837352c7a4ec6 (diff)
downloadstable-diffusion-webui-gfx803-5abecea34cd98537f006c5e9a197acd1fe9db023.tar.gz
stable-diffusion-webui-gfx803-5abecea34cd98537f006c5e9a197acd1fe9db023.tar.bz2
stable-diffusion-webui-gfx803-5abecea34cd98537f006c5e9a197acd1fe9db023.zip
Merge pull request #10259 from AUTOMATIC1111/ruff
Ruff
Diffstat (limited to 'modules/interrogate.py')
-rw-r--r--modules/interrogate.py7
1 files changed, 3 insertions, 4 deletions
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 9f7d657f..111b1322 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -11,7 +11,6 @@ import torch.hub
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
-import modules.shared as shared
from modules import devices, paths, shared, lowvram, modelloader, errors
blip_image_eval_size = 384
@@ -160,7 +159,7 @@ class InterrogateModels:
text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
top_count = min(top_count, len(text_array))
- text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate)
+ text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate)
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
text_features /= text_features.norm(dim=-1, keepdim=True)
@@ -208,8 +207,8 @@ class InterrogateModels:
image_features /= image_features.norm(dim=-1, keepdim=True)
- for name, topn, items in self.categories():
- matches = self.rank(image_features, items, top_count=topn)
+ for cat in self.categories():
+ matches = self.rank(image_features, cat.items, top_count=cat.topn)
for match, score in matches:
if shared.opts.interrogate_return_ranks:
res += f", ({match}:{score/100:.3f})"