diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-05-10 18:24:18 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-05-10 18:24:18 +0000 |
commit | 5abecea34cd98537f006c5e9a197acd1fe9db023 (patch) | |
tree | 98248bc21aa4ad9715205f0a65a654532c6cfcc0 /modules/sd_hijack.py | |
parent | f5ea1e9d928e0d45b3ebcd8ddd1cacbc6a96e184 (diff) | |
parent | 3ec7b705c78b7aca9569c92a419837352c7a4ec6 (diff) | |
download | stable-diffusion-webui-gfx803-5abecea34cd98537f006c5e9a197acd1fe9db023.tar.gz stable-diffusion-webui-gfx803-5abecea34cd98537f006c5e9a197acd1fe9db023.tar.bz2 stable-diffusion-webui-gfx803-5abecea34cd98537f006c5e9a197acd1fe9db023.zip |
Merge pull request #10259 from AUTOMATIC1111/ruff
Ruff
Diffstat (limited to 'modules/sd_hijack.py')
-rw-r--r-- | modules/sd_hijack.py | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index f4bb0266..e374aeb8 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -3,7 +3,7 @@ from torch.nn.functional import silu from types import MethodType
import modules.textual_inversion.textual_inversion
-from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
+from modules import devices, sd_hijack_optimizations, shared
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr
@@ -37,7 +37,7 @@ def apply_optimizations(): optimization_method = None
- can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp
+ can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.")
@@ -118,7 +118,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): try:
#Delete temporary weights if appended
del sd_model._custom_loss_weight
- except AttributeError as e:
+ except AttributeError:
pass
#If we have an old loss function, reset the loss function to the original one
@@ -133,7 +133,7 @@ def apply_weighted_forward(sd_model): def undo_weighted_forward(sd_model):
try:
del sd_model.weighted_forward
- except AttributeError as e:
+ except AttributeError:
pass
|