diff options
author | Patryk Wychowaniec <pwychowaniec@pm.me> | 2022-10-20 17:22:59 +0000 |
---|---|---|
committer | AUTOMATIC1111 <16777216c@gmail.com> | 2022-10-21 06:52:12 +0000 |
commit | 7157e5d064741fa57ca81a2c6432a651f21ee82f (patch) | |
tree | b55a09468a5611fc90d7487512db3a077b278d0b /modules/interrogate.py | |
parent | 5f4fec307c14dd7f817244ffa92e8a4a64abed0b (diff) | |
download | stable-diffusion-webui-gfx803-7157e5d064741fa57ca81a2c6432a651f21ee82f.tar.gz stable-diffusion-webui-gfx803-7157e5d064741fa57ca81a2c6432a651f21ee82f.tar.bz2 stable-diffusion-webui-gfx803-7157e5d064741fa57ca81a2c6432a651f21ee82f.zip |
interrogate: Fix CLIP-interrogation on CPU
Currently, trying to perform CLIP interrogation on a CPU fails, saying:
```
RuntimeError: "slow_conv2d_cpu" not implemented for 'Half'
```
This merge request fixes this issue by detecting whether the target
device is CPU and, if so, force-enabling `--no-half` and passing
`device="cpu"` to `clip.load()` (which then does some extra tricks to
ensure it works correctly on CPU).
Diffstat (limited to 'modules/interrogate.py')
-rw-r--r-- | modules/interrogate.py | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/modules/interrogate.py b/modules/interrogate.py index 64b91eb4..65b05d34 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -28,9 +28,11 @@ class InterrogateModels: clip_preprocess = None
categories = None
dtype = None
+ running_on_cpu = None
def __init__(self, content_dir):
self.categories = []
+ self.running_on_cpu = devices.device_interrogate == torch.device("cpu")
if os.path.exists(content_dir):
for filename in os.listdir(content_dir):
@@ -53,7 +55,11 @@ class InterrogateModels: def load_clip_model(self):
import clip
- model, preprocess = clip.load(clip_model_name)
+ if self.running_on_cpu:
+ model, preprocess = clip.load(clip_model_name, device="cpu")
+ else:
+ model, preprocess = clip.load(clip_model_name)
+
model.eval()
model = model.to(devices.device_interrogate)
@@ -62,14 +68,14 @@ class InterrogateModels: def load(self):
if self.blip_model is None:
self.blip_model = self.load_blip_model()
- if not shared.cmd_opts.no_half:
+ if not shared.cmd_opts.no_half and not self.running_on_cpu:
self.blip_model = self.blip_model.half()
self.blip_model = self.blip_model.to(devices.device_interrogate)
if self.clip_model is None:
self.clip_model, self.clip_preprocess = self.load_clip_model()
- if not shared.cmd_opts.no_half:
+ if not shared.cmd_opts.no_half and not self.running_on_cpu:
self.clip_model = self.clip_model.half()
self.clip_model = self.clip_model.to(devices.device_interrogate)
|