From 2582a0fd3b3e91c5fba9e5e561cbdf5fee835063 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 18 May 2023 22:48:28 +0300 Subject: make it possible for scripts to add cross attention optimizations add UI selection for cross attention optimization --- modules/sd_hijack_optimizations.py | 135 ++++++++++++++++++++++++++++++++++++- 1 file changed, 132 insertions(+), 3 deletions(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index f00fe55c..1c5b709b 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -9,10 +9,139 @@ from torch import einsum from ldm.util import default from einops import rearrange -from modules import shared, errors, devices +from modules import shared, errors, devices, sub_quadratic_attention, script_callbacks from modules.hypernetworks import hypernetwork -from .sub_quadratic_attention import efficient_dot_product_attention +import ldm.modules.attention +import ldm.modules.diffusionmodules.model + +diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward + + +class SdOptimization: + def __init__(self, name, label=None, cmd_opt=None): + self.name = name + self.label = label + self.cmd_opt = cmd_opt + + def title(self): + if self.label is None: + return self.name + + return f"{self.name} - {self.label}" + + def is_available(self): + return True + + def priority(self): + return 0 + + def apply(self): + pass + + def undo(self): + ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward + + +class SdOptimizationXformers(SdOptimization): + def __init__(self): + super().__init__("xformers", cmd_opt="xformers") + + def is_available(self): + return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)) + + def priority(self): + return 100 + + def apply(self): + ldm.modules.attention.CrossAttention.forward = xformers_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = xformers_attnblock_forward + + +class SdOptimizationSdpNoMem(SdOptimization): + def __init__(self, name="sdp-no-mem", label="scaled dot product without memory efficient attention", cmd_opt="opt_sdp_no_mem_attention"): + super().__init__(name, label, cmd_opt) + + def is_available(self): + return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) + + def priority(self): + return 90 + + def apply(self): + ldm.modules.attention.CrossAttention.forward = scaled_dot_product_no_mem_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sdp_no_mem_attnblock_forward + + +class SdOptimizationSdp(SdOptimizationSdpNoMem): + def __init__(self): + super().__init__("sdp", "scaled dot product", cmd_opt="opt_sdp_attention") + + def priority(self): + return 80 + + def apply(self): + ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sdp_attnblock_forward + + +class SdOptimizationSubQuad(SdOptimization): + def __init__(self): + super().__init__("sub-quadratic", cmd_opt="opt_sub_quad_attention") + + def priority(self): + return 10 + + def apply(self): + ldm.modules.attention.CrossAttention.forward = sub_quad_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sub_quad_attnblock_forward + + +class SdOptimizationV1(SdOptimization): + def __init__(self): + super().__init__("V1", "original v1", cmd_opt="opt_split_attention_v1") + + def priority(self): + return 10 + + def apply(self): + ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_v1 + + +class SdOptimizationInvokeAI(SdOptimization): + def __init__(self): + super().__init__("InvokeAI", cmd_opt="opt_split_attention_invokeai") + + def priority(self): + return 1000 if not torch.cuda.is_available() else 10 + + def apply(self): + ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_invokeAI + + +class SdOptimizationDoggettx(SdOptimization): + def __init__(self): + super().__init__("Doggettx", cmd_opt="opt_split_attention") + + def priority(self): + return 20 + + def apply(self): + ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = cross_attention_attnblock_forward + + +def list_optimizers(res): + res.extend([ + SdOptimizationXformers(), + SdOptimizationSdpNoMem(), + SdOptimizationSdp(), + SdOptimizationSubQuad(), + SdOptimizationV1(), + SdOptimizationInvokeAI(), + SdOptimizationDoggettx(), + ]) if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: @@ -299,7 +428,7 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_ kv_chunk_size = k_tokens with devices.without_autocast(disable=q.dtype == v.dtype): - return efficient_dot_product_attention( + return sub_quadratic_attention.efficient_dot_product_attention( q, k, v, -- cgit v1.2.3 From 8a3d232839930376898634f65bd6c16f3a41e5b4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 19 May 2023 00:03:27 +0300 Subject: fix linter issues --- modules/sd_hijack_optimizations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 1c5b709b..db1e4367 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -9,7 +9,7 @@ from torch import einsum from ldm.util import default from einops import rearrange -from modules import shared, errors, devices, sub_quadratic_attention, script_callbacks +from modules import shared, errors, devices, sub_quadratic_attention from modules.hypernetworks import hypernetwork import ldm.modules.attention -- cgit v1.2.3 From 1e5afd4fa9774314d649bddb3d18a9a75871902b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 19 May 2023 09:17:36 +0300 Subject: Apply suggestions from code review Co-authored-by: Aarni Koskela --- modules/sd_hijack_optimizations.py | 66 ++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 38 deletions(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index db1e4367..0eb4c525 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -19,10 +19,10 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At class SdOptimization: - def __init__(self, name, label=None, cmd_opt=None): - self.name = name - self.label = label - self.cmd_opt = cmd_opt + name: str = None + label: str | None = None + cmd_opt: str | None = None + priority: int = 0 def title(self): if self.label is None: @@ -33,9 +33,6 @@ class SdOptimization: def is_available(self): return True - def priority(self): - return 0 - def apply(self): pass @@ -45,41 +42,37 @@ class SdOptimization: class SdOptimizationXformers(SdOptimization): - def __init__(self): - super().__init__("xformers", cmd_opt="xformers") + name = "xformers" + cmd_opt = "xformers" + priority = 100 def is_available(self): return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)) - def priority(self): - return 100 - def apply(self): ldm.modules.attention.CrossAttention.forward = xformers_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = xformers_attnblock_forward class SdOptimizationSdpNoMem(SdOptimization): - def __init__(self, name="sdp-no-mem", label="scaled dot product without memory efficient attention", cmd_opt="opt_sdp_no_mem_attention"): - super().__init__(name, label, cmd_opt) + name = "sdp-no-mem" + label = "scaled dot product without memory efficient attention" + cmd_opt = "opt_sdp_no_mem_attention" + priority = 90 def is_available(self): return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) - def priority(self): - return 90 - def apply(self): ldm.modules.attention.CrossAttention.forward = scaled_dot_product_no_mem_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sdp_no_mem_attnblock_forward class SdOptimizationSdp(SdOptimizationSdpNoMem): - def __init__(self): - super().__init__("sdp", "scaled dot product", cmd_opt="opt_sdp_attention") - - def priority(self): - return 80 + name = "sdp" + label = "scaled dot product" + cmd_opt = "opt_sdp_attention" + priority = 80 def apply(self): ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward @@ -87,11 +80,9 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem): class SdOptimizationSubQuad(SdOptimization): - def __init__(self): - super().__init__("sub-quadratic", cmd_opt="opt_sub_quad_attention") - - def priority(self): - return 10 + name = "sub-quadratic" + cmd_opt = "opt_sub_quad_attention" + priority = 10 def apply(self): ldm.modules.attention.CrossAttention.forward = sub_quad_attention_forward @@ -99,20 +90,21 @@ class SdOptimizationSubQuad(SdOptimization): class SdOptimizationV1(SdOptimization): - def __init__(self): - super().__init__("V1", "original v1", cmd_opt="opt_split_attention_v1") + name = "V1" + label = "original v1" + cmd_opt = "opt_split_attention_v1" + priority = 10 - def priority(self): - return 10 def apply(self): ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_v1 class SdOptimizationInvokeAI(SdOptimization): - def __init__(self): - super().__init__("InvokeAI", cmd_opt="opt_split_attention_invokeai") + name = "InvokeAI" + cmd_opt = "opt_split_attention_invokeai" + @property def priority(self): return 1000 if not torch.cuda.is_available() else 10 @@ -121,11 +113,9 @@ class SdOptimizationInvokeAI(SdOptimization): class SdOptimizationDoggettx(SdOptimization): - def __init__(self): - super().__init__("Doggettx", cmd_opt="opt_split_attention") - - def priority(self): - return 20 + name = "Doggettx" + cmd_opt = "opt_split_attention" + priority = 20 def apply(self): ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward -- cgit v1.2.3