diff options
author | brkirch <brkirch@users.noreply.github.com> | 2023-03-24 07:04:47 +0000 |
---|---|---|
committer | brkirch <brkirch@users.noreply.github.com> | 2023-03-24 08:04:22 +0000 |
commit | 27fe3eb6a9d8f866af8b90dff18f4445124702da (patch) | |
tree | 1ba1eb23105a690cec2e4c8dd391b82a169ccd8b | |
parent | c5142e2fbecb50531a55aa804ea132c5d870858c (diff) | |
download | stable-diffusion-webui-gfx803-27fe3eb6a9d8f866af8b90dff18f4445124702da.tar.gz stable-diffusion-webui-gfx803-27fe3eb6a9d8f866af8b90dff18f4445124702da.tar.bz2 stable-diffusion-webui-gfx803-27fe3eb6a9d8f866af8b90dff18f4445124702da.zip |
Add workaround for MPS layer_norm on PyTorch 2.0
On PyTorch 2.0, with MPS layer_norm only accepts float32 inputs. This was fixed shortly after 2.0 was finalized so the workaround can be applied with an exact version match.
-rw-r--r-- | modules/mac_specific.py | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 3a170f60..6fe8dea0 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -54,4 +54,6 @@ if has_mps: CondFunc('torch.cumsum', cumsum_fix_func, None) CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None) CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None) - + if version.parse(torch.__version__) == version.parse("2.0"): + # MPS workaround for https://github.com/pytorch/pytorch/issues/96113 + CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda *args, **kwargs: len(args) == 6) |