From eed963e97261ee03bffe59e3d343dcf53d82dbfd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 16:54:49 +0300 Subject: Lora cache in memory --- extensions-builtin/Lora/scripts/lora_script.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'extensions-builtin/Lora/scripts/lora_script.py') diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index cd28afc9..6ab8b6e7 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -65,6 +65,7 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"), "lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"), "lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}), + "lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}), })) @@ -121,3 +122,5 @@ def infotext_pasted(infotext, d): script_callbacks.on_infotext_pasted(infotext_pasted) + +shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory) -- cgit v1.2.3 From bd4da4474bef5c9c1f690c62b971704ee73d2860 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:27:39 +0800 Subject: Add extra norm module into built-in lora ext refer to LyCORIS 1.9.0.dev6 add new option and module for training norm layer (Which is reported to be good for style) --- extensions-builtin/Lora/network.py | 7 ++- extensions-builtin/Lora/network_norm.py | 29 ++++++++++++ extensions-builtin/Lora/networks.py | 64 ++++++++++++++++++++++---- extensions-builtin/Lora/scripts/lora_script.py | 16 +++++++ 4 files changed, 105 insertions(+), 11 deletions(-) create mode 100644 extensions-builtin/Lora/network_norm.py (limited to 'extensions-builtin/Lora/scripts/lora_script.py') diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index 0a18d69e..b7b89061 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -133,7 +133,7 @@ class NetworkModule: return 1.0 - def finalize_updown(self, updown, orig_weight, output_shape): + def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): if self.bias is not None: updown = updown.reshape(self.bias.shape) updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) @@ -145,7 +145,10 @@ class NetworkModule: if orig_weight.size().numel() == updown.size().numel(): updown = updown.reshape(orig_weight.shape) - return updown * self.calc_scale() * self.multiplier() + if ex_bias is None: + ex_bias = 0 + + return updown * self.calc_scale() * self.multiplier(), ex_bias * self.multiplier() def calc_updown(self, target): raise NotImplementedError() diff --git a/extensions-builtin/Lora/network_norm.py b/extensions-builtin/Lora/network_norm.py new file mode 100644 index 00000000..dab8b684 --- /dev/null +++ b/extensions-builtin/Lora/network_norm.py @@ -0,0 +1,29 @@ +import network + + +class ModuleTypeNorm(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["w_norm", "b_norm"]): + return NetworkModuleNorm(net, weights) + + return None + + +class NetworkModuleNorm(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + print("NetworkModuleNorm") + + self.w_norm = weights.w.get("w_norm") + self.b_norm = weights.w.get("b_norm") + + def calc_updown(self, orig_weight): + output_shape = self.w_norm.shape + updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype) + + if self.b_norm is not None: + ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype) + else: + ex_bias = None + + return self.finalize_updown(updown, orig_weight, output_shape, ex_bias) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 7e3415ac..74cefe43 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -7,6 +7,7 @@ import network_hada import network_ia3 import network_lokr import network_full +import network_norm import torch from typing import Union @@ -19,6 +20,7 @@ module_types = [ network_ia3.ModuleTypeIa3(), network_lokr.ModuleTypeLokr(), network_full.ModuleTypeFull(), + network_norm.ModuleTypeNorm(), ] @@ -31,6 +33,8 @@ suffix_conversion = { "resnets": { "conv1": "in_layers_2", "conv2": "out_layers_3", + "norm1": "in_layers_0", + "norm2": "out_layers_0", "time_emb_proj": "emb_layers_1", "conv_shortcut": "skip_connection", } @@ -258,20 +262,25 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No purge_networks_from_memory() -def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): +def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): weights_backup = getattr(self, "network_weights_backup", None) + bias_backup = getattr(self, "network_bias_backup", None) - if weights_backup is None: + if weights_backup is None and bias_backup is None: return - if isinstance(self, torch.nn.MultiheadAttention): - self.in_proj_weight.copy_(weights_backup[0]) - self.out_proj.weight.copy_(weights_backup[1]) - else: - self.weight.copy_(weights_backup) + if weights_backup is not None: + if isinstance(self, torch.nn.MultiheadAttention): + self.in_proj_weight.copy_(weights_backup[0]) + self.out_proj.weight.copy_(weights_backup[1]) + else: + self.weight.copy_(weights_backup) + if bias_backup is not None: + self.bias.copy_(bias_backup) -def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): + +def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): """ Applies the currently selected set of networks to the weights of torch layer self. If weights already have this particular set of networks applied, does nothing. @@ -294,6 +303,11 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn self.network_weights_backup = weights_backup + bias_backup = getattr(self, "network_bias_backup", None) + if bias_backup is None and getattr(self, 'bias', None) is not None: + bias_backup = self.bias.to(devices.cpu, copy=True) + self.network_bias_backup = bias_backup + if current_names != wanted_names: network_restore_weights_from_backup(self) @@ -301,13 +315,15 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn module = net.modules.get(network_layer_name, None) if module is not None and hasattr(self, 'weight'): with torch.no_grad(): - updown = module.calc_updown(self.weight) + updown, ex_bias = module.calc_updown(self.weight) if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: # inpainting model. zero pad updown to make channel[1] 4 to 9 updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) self.weight += updown + if getattr(self, 'bias', None) is not None: + self.bias += ex_bias continue module_q = net.modules.get(network_layer_name + "_q_proj", None) @@ -397,6 +413,36 @@ def network_Conv2d_load_state_dict(self, *args, **kwargs): return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs) +def network_GroupNorm_forward(self, input): + if shared.opts.lora_functional: + return network_forward(self, input, torch.nn.GroupNorm_forward_before_network) + + network_apply_weights(self) + + return torch.nn.GroupNorm_forward_before_network(self, input) + + +def network_GroupNorm_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs) + + +def network_LayerNorm_forward(self, input): + if shared.opts.lora_functional: + return network_forward(self, input, torch.nn.LayerNorm_forward_before_network) + + network_apply_weights(self) + + return torch.nn.LayerNorm_forward_before_network(self, input) + + +def network_LayerNorm_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs) + + def network_MultiheadAttention_forward(self, *args, **kwargs): network_apply_weights(self) diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 6ab8b6e7..dc307f8c 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -40,6 +40,18 @@ if not hasattr(torch.nn, 'Conv2d_forward_before_network'): if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'): torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict +if not hasattr(torch.nn, 'GroupNorm_forward_before_network'): + torch.nn.GroupNorm_forward_before_network = torch.nn.GroupNorm.forward + +if not hasattr(torch.nn, 'GroupNorm_load_state_dict_before_network'): + torch.nn.GroupNorm_load_state_dict_before_network = torch.nn.GroupNorm._load_from_state_dict + +if not hasattr(torch.nn, 'LayerNorm_forward_before_network'): + torch.nn.LayerNorm_forward_before_network = torch.nn.LayerNorm.forward + +if not hasattr(torch.nn, 'LayerNorm_load_state_dict_before_network'): + torch.nn.LayerNorm_load_state_dict_before_network = torch.nn.LayerNorm._load_from_state_dict + if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'): torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward @@ -50,6 +62,10 @@ torch.nn.Linear.forward = networks.network_Linear_forward torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict torch.nn.Conv2d.forward = networks.network_Conv2d_forward torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict +torch.nn.GroupNorm.forward = networks.network_GroupNorm_forward +torch.nn.GroupNorm._load_from_state_dict = networks.network_GroupNorm_load_state_dict +torch.nn.LayerNorm.forward = networks.network_LayerNorm_forward +torch.nn.LayerNorm._load_from_state_dict = networks.network_LayerNorm_load_state_dict torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict -- cgit v1.2.3 From d8419762c1454ba51baa710d9ce8e762efc056ef Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 15:07:37 +0300 Subject: Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console --- extensions-builtin/Lora/extra_networks_lora.py | 12 ++++- extensions-builtin/Lora/networks.py | 61 ++++++++++++++++---------- extensions-builtin/Lora/scripts/lora_script.py | 6 +-- modules/processing.py | 13 +++--- 4 files changed, 58 insertions(+), 34 deletions(-) (limited to 'extensions-builtin/Lora/scripts/lora_script.py') diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py index ba2945c6..32e32cab 100644 --- a/extensions-builtin/Lora/extra_networks_lora.py +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -1,4 +1,4 @@ -from modules import extra_networks, shared +from modules import extra_networks, shared, sd_hijack import networks @@ -6,9 +6,14 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): def __init__(self): super().__init__('lora') + self.errors = {} + """mapping of network names to the number of errors the network had during operation""" + def activate(self, p, params_list): additional = shared.opts.sd_lora + self.errors.clear() + if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional): p.all_prompts = [x + f"" for x in p.all_prompts] params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) @@ -56,4 +61,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) def deactivate(self, p): - pass + if self.errors: + p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items())) + + self.errors.clear() diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index ba621139..c252ed9e 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -1,3 +1,4 @@ +import logging import os import re @@ -194,7 +195,7 @@ def load_network(name, network_on_disk): net.modules[key] = net_module if keys_failed_to_match: - print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}") + logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}") return net @@ -207,7 +208,6 @@ def purge_networks_from_memory(): devices.torch_gc() - def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): already_loaded = {} @@ -248,7 +248,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if net is None: failed_to_load_networks.append(name) - print(f"Couldn't find network with name {name}") + logging.info(f"Couldn't find network with name {name}") continue net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0 @@ -257,7 +257,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No loaded_networks.append(net) if failed_to_load_networks: - sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks)) + sd_hijack.model_hijack.comments.append("Networks not found: " + ", ".join(failed_to_load_networks)) purge_networks_from_memory() @@ -314,17 +314,22 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn for net in loaded_networks: module = net.modules.get(network_layer_name, None) if module is not None and hasattr(self, 'weight'): - with torch.no_grad(): - updown, ex_bias = module.calc_updown(self.weight) + try: + with torch.no_grad(): + updown, ex_bias = module.calc_updown(self.weight) - if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: - # inpainting model. zero pad updown to make channel[1] 4 to 9 - updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) + if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: + # inpainting model. zero pad updown to make channel[1] 4 to 9 + updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) - self.weight += updown - if ex_bias is not None and getattr(self, 'bias', None) is not None: - self.bias += ex_bias - continue + self.weight += updown + if ex_bias is not None and getattr(self, 'bias', None) is not None: + self.bias += ex_bias + except RuntimeError as e: + logging.debug(f"Network {net.name} layer {network_layer_name}: {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + + continue module_q = net.modules.get(network_layer_name + "_q_proj", None) module_k = net.modules.get(network_layer_name + "_k_proj", None) @@ -332,21 +337,28 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn module_out = net.modules.get(network_layer_name + "_out_proj", None) if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: - with torch.no_grad(): - updown_q = module_q.calc_updown(self.in_proj_weight) - updown_k = module_k.calc_updown(self.in_proj_weight) - updown_v = module_v.calc_updown(self.in_proj_weight) - updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) - updown_out = module_out.calc_updown(self.out_proj.weight) - - self.in_proj_weight += updown_qkv - self.out_proj.weight += updown_out - continue + try: + with torch.no_grad(): + updown_q = module_q.calc_updown(self.in_proj_weight) + updown_k = module_k.calc_updown(self.in_proj_weight) + updown_v = module_v.calc_updown(self.in_proj_weight) + updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) + updown_out = module_out.calc_updown(self.out_proj.weight) + + self.in_proj_weight += updown_qkv + self.out_proj.weight += updown_out + + except RuntimeError as e: + logging.debug(f"Network {net.name} layer {network_layer_name}: {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + + continue if module is None: continue - print(f'failed to calculate network weights for layer {network_layer_name}') + logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 self.network_current_names = wanted_names @@ -519,6 +531,7 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) +extra_network_lora = None available_networks = {} available_network_aliases = {} diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index dc307f8c..4c6e774a 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -23,9 +23,9 @@ def unload(): def before_ui(): ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora()) - extra_network = extra_networks_lora.ExtraNetworkLora() - extra_networks.register_extra_network(extra_network) - extra_networks.register_extra_network_alias(extra_network, "lyco") + networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora() + extra_networks.register_extra_network(networks.extra_network_lora) + extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco") if not hasattr(torch.nn, 'Linear_forward_before_network'): diff --git a/modules/processing.py b/modules/processing.py index 007a4e05..10749aa2 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -157,6 +157,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] + comments: dict = None sampler: sd_samplers_common.Sampler | None = field(default=None, init=False) is_using_inpainting_conditioning: bool = field(default=False, init=False) paste_to: tuple | None = field(default=None, init=False) @@ -196,6 +197,8 @@ class StableDiffusionProcessing: if self.sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) + self.comments = {} + self.sampler_noise_scheduler_override = None self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn @@ -226,6 +229,9 @@ class StableDiffusionProcessing: def sd_model(self, value): pass + def comment(self, text): + self.comments[text] = 1 + def txt2img_image_conditioning(self, x, width=None, height=None): self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'} @@ -429,7 +435,7 @@ class Processed: self.subseed = subseed self.subseed_strength = p.subseed_strength self.info = info - self.comments = comments + self.comments = "".join(f"{comment}\n" for comment in p.comments) self.width = p.width self.height = p.height self.sampler_name = p.sampler_name @@ -720,8 +726,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() - comments = {} - p.setup_prompts() if type(seed) == list: @@ -801,7 +805,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.setup_conds() for comment in model_hijack.comments: - comments[comment] = 1 + p.comment(comment) p.extra_generation_params.update(model_hijack.extra_generation_params) @@ -930,7 +934,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: images_list=output_images, seed=p.all_seeds[0], info=infotexts[0], - comments="".join(f"{comment}\n" for comment in comments), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts, -- cgit v1.2.3 From f01682ee01e81e8ef84fd6fffe8f7aa17233285d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 15 Aug 2023 19:23:27 +0300 Subject: store patches for Lora in a specialized module --- extensions-builtin/Lora/lora_patches.py | 31 +++++++++++++ extensions-builtin/Lora/networks.py | 32 +++++++------ extensions-builtin/Lora/scripts/lora_script.py | 52 ++------------------- modules/patches.py | 64 ++++++++++++++++++++++++++ 4 files changed, 118 insertions(+), 61 deletions(-) create mode 100644 extensions-builtin/Lora/lora_patches.py create mode 100644 modules/patches.py (limited to 'extensions-builtin/Lora/scripts/lora_script.py') diff --git a/extensions-builtin/Lora/lora_patches.py b/extensions-builtin/Lora/lora_patches.py new file mode 100644 index 00000000..b394d8e9 --- /dev/null +++ b/extensions-builtin/Lora/lora_patches.py @@ -0,0 +1,31 @@ +import torch + +import networks +from modules import patches + + +class LoraPatches: + def __init__(self): + self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward) + self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict) + self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward) + self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict) + self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward) + self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict) + self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward) + self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict) + self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward) + self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict) + + def undo(self): + self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward') + self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict') + self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward') + self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict') + self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward') + self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict') + self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward') + self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict') + self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward') + self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict') + diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 22fdff4a..9fca36b6 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -2,6 +2,7 @@ import logging import os import re +import lora_patches import network import network_lora import network_hada @@ -418,74 +419,74 @@ def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): def network_Linear_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.Linear_forward_before_network) + return network_forward(self, input, originals.Linear_forward) network_apply_weights(self) - return torch.nn.Linear_forward_before_network(self, input) + return originals.Linear_forward(self, input) def network_Linear_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.Linear_load_state_dict_before_network(self, *args, **kwargs) + return originals.Linear_load_state_dict(self, *args, **kwargs) def network_Conv2d_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.Conv2d_forward_before_network) + return network_forward(self, input, originals.Conv2d_forward) network_apply_weights(self) - return torch.nn.Conv2d_forward_before_network(self, input) + return originals.Conv2d_forward(self, input) def network_Conv2d_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs) + return originals.Conv2d_load_state_dict(self, *args, **kwargs) def network_GroupNorm_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.GroupNorm_forward_before_network) + return network_forward(self, input, originals.GroupNorm_forward) network_apply_weights(self) - return torch.nn.GroupNorm_forward_before_network(self, input) + return originals.GroupNorm_forward(self, input) def network_GroupNorm_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs) + return originals.GroupNorm_load_state_dict(self, *args, **kwargs) def network_LayerNorm_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.LayerNorm_forward_before_network) + return network_forward(self, input, originals.LayerNorm_forward) network_apply_weights(self) - return torch.nn.LayerNorm_forward_before_network(self, input) + return originals.LayerNorm_forward(self, input) def network_LayerNorm_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs) + return originals.LayerNorm_load_state_dict(self, *args, **kwargs) def network_MultiheadAttention_forward(self, *args, **kwargs): network_apply_weights(self) - return torch.nn.MultiheadAttention_forward_before_network(self, *args, **kwargs) + return originals.MultiheadAttention_forward(self, *args, **kwargs) def network_MultiheadAttention_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.MultiheadAttention_load_state_dict_before_network(self, *args, **kwargs) + return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs) def list_available_networks(): @@ -552,6 +553,9 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) + +originals: lora_patches.LoraPatches = None + extra_network_lora = None available_networks = {} diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 4c6e774a..546fb55e 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -7,17 +7,14 @@ from fastapi import FastAPI import network import networks import lora # noqa:F401 +import lora_patches import extra_networks_lora import ui_extra_networks_lora -from modules import script_callbacks, ui_extra_networks, extra_networks, shared +from modules import script_callbacks, ui_extra_networks, extra_networks, shared, patches + def unload(): - torch.nn.Linear.forward = torch.nn.Linear_forward_before_network - torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_network - torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_network - torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_network - torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_network - torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_network + networks.originals.undo() def before_ui(): @@ -28,46 +25,7 @@ def before_ui(): extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco") -if not hasattr(torch.nn, 'Linear_forward_before_network'): - torch.nn.Linear_forward_before_network = torch.nn.Linear.forward - -if not hasattr(torch.nn, 'Linear_load_state_dict_before_network'): - torch.nn.Linear_load_state_dict_before_network = torch.nn.Linear._load_from_state_dict - -if not hasattr(torch.nn, 'Conv2d_forward_before_network'): - torch.nn.Conv2d_forward_before_network = torch.nn.Conv2d.forward - -if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'): - torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict - -if not hasattr(torch.nn, 'GroupNorm_forward_before_network'): - torch.nn.GroupNorm_forward_before_network = torch.nn.GroupNorm.forward - -if not hasattr(torch.nn, 'GroupNorm_load_state_dict_before_network'): - torch.nn.GroupNorm_load_state_dict_before_network = torch.nn.GroupNorm._load_from_state_dict - -if not hasattr(torch.nn, 'LayerNorm_forward_before_network'): - torch.nn.LayerNorm_forward_before_network = torch.nn.LayerNorm.forward - -if not hasattr(torch.nn, 'LayerNorm_load_state_dict_before_network'): - torch.nn.LayerNorm_load_state_dict_before_network = torch.nn.LayerNorm._load_from_state_dict - -if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'): - torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward - -if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_network'): - torch.nn.MultiheadAttention_load_state_dict_before_network = torch.nn.MultiheadAttention._load_from_state_dict - -torch.nn.Linear.forward = networks.network_Linear_forward -torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict -torch.nn.Conv2d.forward = networks.network_Conv2d_forward -torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict -torch.nn.GroupNorm.forward = networks.network_GroupNorm_forward -torch.nn.GroupNorm._load_from_state_dict = networks.network_GroupNorm_load_state_dict -torch.nn.LayerNorm.forward = networks.network_LayerNorm_forward -torch.nn.LayerNorm._load_from_state_dict = networks.network_LayerNorm_load_state_dict -torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward -torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict +networks.originals = lora_patches.LoraPatches() script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules) script_callbacks.on_script_unloaded(unload) diff --git a/modules/patches.py b/modules/patches.py new file mode 100644 index 00000000..348235e7 --- /dev/null +++ b/modules/patches.py @@ -0,0 +1,64 @@ +from collections import defaultdict + + +def patch(key, obj, field, replacement): + """Replaces a function in a module or a class. + + Also stores the original function in this module, possible to be retrieved via original(key, obj, field). + If the function is already replaced by this caller (key), an exception is raised -- use undo() before that. + + Arguments: + key: identifying information for who is doing the replacement. You can use __name__. + obj: the module or the class + field: name of the function as a string + replacement: the new function + + Returns: + the original function + """ + + patch_key = (obj, field) + if patch_key in originals[key]: + raise RuntimeError(f"patch for {field} is already applied") + + original_func = getattr(obj, field) + originals[key][patch_key] = original_func + + setattr(obj, field, replacement) + + return original_func + + +def undo(key, obj, field): + """Undoes the peplacement by the patch(). + + If the function is not replaced, raises an exception. + + Arguments: + key: identifying information for who is doing the replacement. You can use __name__. + obj: the module or the class + field: name of the function as a string + + Returns: + Always None + """ + + patch_key = (obj, field) + + if patch_key not in originals[key]: + raise RuntimeError(f"there is no patch for {field} to undo") + + original_func = originals[key].pop(patch_key) + setattr(obj, field, original_func) + + return None + + +def original(key, obj, field): + """Returns the original function for the patch created by the patch() function""" + patch_key = (obj, field) + + return originals[key].get(patch_key, None) + + +originals = defaultdict(dict) -- cgit v1.2.3 From 85fcb7b8dfe7b3dd06931943f095c77f1043dc25 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 15 Aug 2023 19:24:55 +0300 Subject: lint --- extensions-builtin/Lora/scripts/lora_script.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'extensions-builtin/Lora/scripts/lora_script.py') diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 546fb55e..ef23968c 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -1,6 +1,5 @@ import re -import torch import gradio as gr from fastapi import FastAPI @@ -10,7 +9,7 @@ import lora # noqa:F401 import lora_patches import extra_networks_lora import ui_extra_networks_lora -from modules import script_callbacks, ui_extra_networks, extra_networks, shared, patches +from modules import script_callbacks, ui_extra_networks, extra_networks, shared def unload(): -- cgit v1.2.3