diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-08-24 08:09:04 +0000 |
---|---|---|
committer | AUTOMATIC1111 <16777216c@gmail.com> | 2023-08-24 08:09:04 +0000 |
commit | 189229bbf9276fb73e48c783856b02fc57ab5c9b (patch) | |
tree | 728b1ab97fec6d18a1ec687ba552ca83b0dcf109 /modules/mac_specific.py | |
parent | 31f2be3dcedf85c036c5f784c640208d122b62ed (diff) | |
parent | b6c02174050b2c5dd98bf24c797e85ff269516f5 (diff) | |
download | stable-diffusion-webui-gfx803-189229bbf9276fb73e48c783856b02fc57ab5c9b.tar.gz stable-diffusion-webui-gfx803-189229bbf9276fb73e48c783856b02fc57ab5c9b.tar.bz2 stable-diffusion-webui-gfx803-189229bbf9276fb73e48c783856b02fc57ab5c9b.zip |
Merge branch 'dev' into release_candidate
Diffstat (limited to 'modules/mac_specific.py')
-rw-r--r-- | modules/mac_specific.py | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 9ceb43ba..89256c5b 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -4,6 +4,7 @@ import torch import platform from modules.sd_hijack_utils import CondFunc from packaging import version +from modules import shared log = logging.getLogger(__name__) @@ -30,8 +31,7 @@ has_mps = check_for_mps() def torch_mps_gc() -> None: try: - from modules.shared import state - if state.current_latent is not None: + if shared.state.current_latent is not None: log.debug("`current_latent` is set, skipping MPS garbage collection") return from torch.mps import empty_cache @@ -52,9 +52,6 @@ def cumsum_fix(input, cumsum_func, *args, **kwargs): if has_mps: - # MPS fix for randn in torchsde - CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps') - if platform.mac_ver()[0].startswith("13.2."): # MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124) CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760) |