aboutsummaryrefslogtreecommitdiffstats
path: root/modules/devices.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-01-19 10:18:34 +0000
committerGitHub <noreply@github.com>2023-01-19 10:18:34 +0000
commitaa60fc6660df66fd771d94a226928fcd8fe5f0e5 (patch)
treea30063fa1d59c3b46b64805410c6eb2ce766d0fe /modules/devices.py
parent0f9cacaa0e47dceef19c75eb68a6a05cba185dd5 (diff)
parenta255dac4f8c5ee11c15b634563d3df513f1834b4 (diff)
downloadstable-diffusion-webui-gfx803-aa60fc6660df66fd771d94a226928fcd8fe5f0e5.tar.gz
stable-diffusion-webui-gfx803-aa60fc6660df66fd771d94a226928fcd8fe5f0e5.tar.bz2
stable-diffusion-webui-gfx803-aa60fc6660df66fd771d94a226928fcd8fe5f0e5.zip
Merge pull request #6922 from brkirch/cumsum-fix
Improve cumsum fix for MPS
Diffstat (limited to 'modules/devices.py')
-rw-r--r--modules/devices.py11
1 files changed, 7 insertions, 4 deletions
diff --git a/modules/devices.py b/modules/devices.py
index 206184fb..524ec7af 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -169,8 +169,10 @@ orig_Tensor_cumsum = torch.Tensor.cumsum
def cumsum_fix(input, cumsum_func, *args, **kwargs):
if input.device.type == 'mps':
output_dtype = kwargs.get('dtype', input.dtype)
- if any(output_dtype == broken_dtype for broken_dtype in [torch.bool, torch.int8, torch.int16, torch.int64]):
+ if output_dtype == torch.int64:
return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
+ elif cumsum_needs_bool_fix and output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16):
+ return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64)
return cumsum_func(input, *args, **kwargs)
@@ -181,9 +183,10 @@ if has_mps():
torch.nn.functional.layer_norm = layer_norm_fix
torch.Tensor.numpy = numpy_fix
elif version.parse(torch.__version__) > version.parse("1.13.1"):
- if not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.Tensor([1,1]).to(torch.device("mps")).cumsum(0, dtype=torch.int16)):
- torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
- torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
+ cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
+ cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0))
+ torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
+ torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
orig_narrow = torch.narrow
torch.narrow = lambda *args, **kwargs: ( orig_narrow(*args, **kwargs).clone() )