aboutsummaryrefslogtreecommitdiffstats
path: root/modules/interrogate.py
diff options
context:
space:
mode:
authorDynamic <bradje@naver.com>2022-10-23 13:36:56 +0000
committerGitHub <noreply@github.com>2022-10-23 13:36:56 +0000
commit660ae690bd7107b78aac6413e1370f8cd72676bc (patch)
treeb666cfd0872687ccd293a41d9d0a90fcdfe1ea0a /modules/interrogate.py
parent21364c5c39b269497944b56dd6664792d779333b (diff)
parent6bd6154a92eb05c80d66df661a38f8b70cc13729 (diff)
downloadstable-diffusion-webui-gfx803-660ae690bd7107b78aac6413e1370f8cd72676bc.tar.gz
stable-diffusion-webui-gfx803-660ae690bd7107b78aac6413e1370f8cd72676bc.tar.bz2
stable-diffusion-webui-gfx803-660ae690bd7107b78aac6413e1370f8cd72676bc.zip
Merge branch 'AUTOMATIC1111:master' into kr-localization
Diffstat (limited to 'modules/interrogate.py')
-rw-r--r--modules/interrogate.py12
1 files changed, 9 insertions, 3 deletions
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 64b91eb4..65b05d34 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -28,9 +28,11 @@ class InterrogateModels:
clip_preprocess = None
categories = None
dtype = None
+ running_on_cpu = None
def __init__(self, content_dir):
self.categories = []
+ self.running_on_cpu = devices.device_interrogate == torch.device("cpu")
if os.path.exists(content_dir):
for filename in os.listdir(content_dir):
@@ -53,7 +55,11 @@ class InterrogateModels:
def load_clip_model(self):
import clip
- model, preprocess = clip.load(clip_model_name)
+ if self.running_on_cpu:
+ model, preprocess = clip.load(clip_model_name, device="cpu")
+ else:
+ model, preprocess = clip.load(clip_model_name)
+
model.eval()
model = model.to(devices.device_interrogate)
@@ -62,14 +68,14 @@ class InterrogateModels:
def load(self):
if self.blip_model is None:
self.blip_model = self.load_blip_model()
- if not shared.cmd_opts.no_half:
+ if not shared.cmd_opts.no_half and not self.running_on_cpu:
self.blip_model = self.blip_model.half()
self.blip_model = self.blip_model.to(devices.device_interrogate)
if self.clip_model is None:
self.clip_model, self.clip_preprocess = self.load_clip_model()
- if not shared.cmd_opts.no_half:
+ if not shared.cmd_opts.no_half and not self.running_on_cpu:
self.clip_model = self.clip_model.half()
self.clip_model = self.clip_model.to(devices.device_interrogate)