aboutsummaryrefslogtreecommitdiffstats
path: root/modules/devices.py
diff options
context:
space:
mode:
authorKohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>2024-01-09 14:11:44 +0000
committerKohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>2024-01-09 14:11:44 +0000
commit209c26a1cb9e4be357ab3c5e7613caf3cbc26183 (patch)
tree977aded65d45eb72b9daadf63fa69d4960d6e004 /modules/devices.py
parent6869d95890849c9b209bb66774539bfdf870df2c (diff)
downloadstable-diffusion-webui-gfx803-209c26a1cb9e4be357ab3c5e7613caf3cbc26183.tar.gz
stable-diffusion-webui-gfx803-209c26a1cb9e4be357ab3c5e7613caf3cbc26183.tar.bz2
stable-diffusion-webui-gfx803-209c26a1cb9e4be357ab3c5e7613caf3cbc26183.zip
improve efficiency and support more device
Diffstat (limited to 'modules/devices.py')
-rw-r--r--modules/devices.py60
1 files changed, 43 insertions, 17 deletions
diff --git a/modules/devices.py b/modules/devices.py
index ff279ac5..6edfb127 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -110,6 +110,7 @@ device_codeformer: torch.device = None
dtype: torch.dtype = torch.float16
dtype_vae: torch.dtype = torch.float16
dtype_unet: torch.dtype = torch.float16
+dtype_inference: torch.dtype = torch.float16
unet_needs_upcast = False
@@ -131,21 +132,49 @@ patch_module_list = [
]
-def manual_cast_forward(self, *args, **kwargs):
- org_dtype = torch_utils.get_param(self).dtype
- self.to(dtype)
- args = [arg.to(dtype) if isinstance(arg, torch.Tensor) else arg for arg in args]
- kwargs = {k: v.to(dtype) if isinstance(v, torch.Tensor) else v for k, v in kwargs.items()}
- result = self.org_forward(*args, **kwargs)
- self.to(org_dtype)
- return result
+def manual_cast_forward(target_dtype):
+ def forward_wrapper(self, *args, **kwargs):
+ org_dtype = torch_utils.get_param(self).dtype
+ if not target_dtype == org_dtype == dtype_inference:
+ self.to(target_dtype)
+ args = [
+ arg.to(target_dtype)
+ if isinstance(arg, torch.Tensor)
+ else arg
+ for arg in args
+ ]
+ kwargs = {
+ k: v.to(target_dtype)
+ if isinstance(v, torch.Tensor)
+ else v
+ for k, v in kwargs.items()
+ }
+
+ result = self.org_forward(*args, **kwargs)
+ self.to(org_dtype)
+
+ if target_dtype != dtype_inference:
+ if isinstance(result, tuple):
+ result = tuple(
+ i.to(dtype_inference)
+ if isinstance(i, torch.Tensor)
+ else i
+ for i in result
+ )
+ elif isinstance(result, torch.Tensor):
+ result = result.to(dtype_inference)
+ return result
+ return forward_wrapper
@contextlib.contextmanager
-def manual_cast():
+def manual_cast(target_dtype):
for module_type in patch_module_list:
org_forward = module_type.forward
- module_type.forward = manual_cast_forward
+ if module_type == torch.nn.MultiheadAttention and has_xpu():
+ module_type.forward = manual_cast_forward(torch.float32)
+ else:
+ module_type.forward = manual_cast_forward(target_dtype)
module_type.org_forward = org_forward
try:
yield None
@@ -161,15 +190,12 @@ def autocast(disable=False):
if fp8 and device==cpu:
return torch.autocast("cpu", dtype=torch.bfloat16, enabled=True)
- if fp8 and (dtype == torch.float32 or shared.cmd_opts.precision == "full" or cuda_no_autocast()):
- return manual_cast()
-
- if has_mps() and shared.cmd_opts.precision != "full":
- return manual_cast()
-
- if dtype == torch.float32 or shared.cmd_opts.precision == "full":
+ if dtype == torch.float32 and shared.cmd_opts.precision == "full":
return contextlib.nullcontext()
+ if has_xpu() or has_mps() or cuda_no_autocast():
+ return manual_cast(dtype_inference)
+
return torch.autocast("cuda")