diff options
author | Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> | 2023-12-03 02:54:54 +0000 |
---|---|---|
committer | Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> | 2023-12-03 02:54:54 +0000 |
commit | 9a15ae2a92e55d614fe515cd0a104d90b854b23f (patch) | |
tree | 7977ea1ea27cfc1d21e652433f8bbc0faec0ddc9 /modules/xpu_specific.py | |
parent | 50a21cb09fe3e9ea2d4fe058e0484e192c8a86e3 (diff) | |
parent | ac02216e540cd581f9169c6c791e55721e3117b0 (diff) | |
download | stable-diffusion-webui-gfx803-9a15ae2a92e55d614fe515cd0a104d90b854b23f.tar.gz stable-diffusion-webui-gfx803-9a15ae2a92e55d614fe515cd0a104d90b854b23f.tar.bz2 stable-diffusion-webui-gfx803-9a15ae2a92e55d614fe515cd0a104d90b854b23f.zip |
Merge branch 'dev' into test-fp8
Diffstat (limited to 'modules/xpu_specific.py')
-rw-r--r-- | modules/xpu_specific.py | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/modules/xpu_specific.py b/modules/xpu_specific.py new file mode 100644 index 00000000..d933c790 --- /dev/null +++ b/modules/xpu_specific.py @@ -0,0 +1,50 @@ +from modules import shared +from modules.sd_hijack_utils import CondFunc + +has_ipex = False +try: + import torch + import intel_extension_for_pytorch as ipex # noqa: F401 + has_ipex = True +except Exception: + pass + + +def check_for_xpu(): + return has_ipex and hasattr(torch, 'xpu') and torch.xpu.is_available() + + +def get_xpu_device_string(): + if shared.cmd_opts.device_id is not None: + return f"xpu:{shared.cmd_opts.device_id}" + return "xpu" + + +def torch_xpu_gc(): + with torch.xpu.device(get_xpu_device_string()): + torch.xpu.empty_cache() + + +has_xpu = check_for_xpu() + +if has_xpu: + # W/A for https://github.com/intel/intel-extension-for-pytorch/issues/452: torch.Generator API doesn't support XPU device + CondFunc('torch.Generator', + lambda orig_func, device=None: torch.xpu.Generator(device), + lambda orig_func, device=None: device is not None and device.type == "xpu") + + # W/A for some OPs that could not handle different input dtypes + CondFunc('torch.nn.functional.layer_norm', + lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: + orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs), + lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: + weight is not None and input.dtype != weight.data.dtype) + CondFunc('torch.nn.modules.GroupNorm.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) + CondFunc('torch.nn.modules.linear.Linear.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) + CondFunc('torch.nn.modules.conv.Conv2d.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) |