From a68f4690307b0f94404b85513429e18ee3936589 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 24 Jul 2023 17:54:59 -0400 Subject: Fix to parse TE in some LoRAs --- extensions-builtin/Lora/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index af8188e3..3a8cfa3b 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -161,7 +161,7 @@ def load_network(name, network_on_disk): sd_module = shared.sd_model.network_layer_mapping.get(key, None) elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts: key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model") - sd_module = shared.sd_model.network_layer_mapping.get(key, None) + sd_module = shared.sd_model.network_layer_mapping.get(key, None) or shared.sd_model.network_layer_mapping.get(key[2:], None) if sd_module is None: keys_failed_to_match[key_network] = key -- cgit v1.2.3 From d0bf509fa14babebedbaef121ef54599003aa457 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 25 Jul 2023 16:18:10 +0300 Subject: fix for #11963 --- extensions-builtin/Lora/networks.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 3a8cfa3b..17cbe1bb 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -161,7 +161,12 @@ def load_network(name, network_on_disk): sd_module = shared.sd_model.network_layer_mapping.get(key, None) elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts: key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model") - sd_module = shared.sd_model.network_layer_mapping.get(key, None) or shared.sd_model.network_layer_mapping.get(key[2:], None) + sd_module = shared.sd_model.network_layer_mapping.get(key, None) + + # some SD1 Loras also have correct compvis keys + if sd_module is None: + key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model") + sd_module = shared.sd_model.network_layer_mapping.get(key, None) if sd_module is None: keys_failed_to_match[key_network] = key -- cgit v1.2.3 From eed963e97261ee03bffe59e3d343dcf53d82dbfd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 16:54:49 +0300 Subject: Lora cache in memory --- extensions-builtin/Lora/networks.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 17cbe1bb..bc722e90 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -195,6 +195,15 @@ def load_network(name, network_on_disk): return net +def purge_networks_from_memory(): + while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0: + name = next(iter(networks_in_memory)) + networks_in_memory.pop(name, None) + + devices.torch_gc() + + + def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): already_loaded = {} @@ -212,15 +221,19 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No failed_to_load_networks = [] - for i, name in enumerate(names): + for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)): net = already_loaded.get(name, None) - network_on_disk = networks_on_disk[i] - if network_on_disk is not None: + if net is None: + net = networks_in_memory.get(name) + if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime: try: net = load_network(name, network_on_disk) + + networks_in_memory.pop(name, None) + networks_in_memory[name] = net except Exception as e: errors.display(e, f"loading network {network_on_disk.filename}") continue @@ -242,6 +255,8 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if failed_to_load_networks: sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks)) + purge_networks_from_memory() + def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): weights_backup = getattr(self, "network_weights_backup", None) @@ -462,6 +477,7 @@ def infotext_pasted(infotext, params): available_networks = {} available_network_aliases = {} loaded_networks = [] +networks_in_memory = {} available_network_hash_lookup = {} forbidden_network_aliases = {} -- cgit v1.2.3 From 4fafc34e498130dcbb2d1a44fbc55fdba31e32d4 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 10 Aug 2023 23:42:58 -0400 Subject: Fix to make LoRA old method setting work --- extensions-builtin/Lora/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index bc722e90..7e3415ac 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -357,7 +357,7 @@ def network_forward(module, input, original_forward): if module is None: continue - y = module.forward(y, input) + y = module.forward(input, y) return y -- cgit v1.2.3 From bd4da4474bef5c9c1f690c62b971704ee73d2860 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:27:39 +0800 Subject: Add extra norm module into built-in lora ext refer to LyCORIS 1.9.0.dev6 add new option and module for training norm layer (Which is reported to be good for style) --- extensions-builtin/Lora/networks.py | 64 +++++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 9 deletions(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 7e3415ac..74cefe43 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -7,6 +7,7 @@ import network_hada import network_ia3 import network_lokr import network_full +import network_norm import torch from typing import Union @@ -19,6 +20,7 @@ module_types = [ network_ia3.ModuleTypeIa3(), network_lokr.ModuleTypeLokr(), network_full.ModuleTypeFull(), + network_norm.ModuleTypeNorm(), ] @@ -31,6 +33,8 @@ suffix_conversion = { "resnets": { "conv1": "in_layers_2", "conv2": "out_layers_3", + "norm1": "in_layers_0", + "norm2": "out_layers_0", "time_emb_proj": "emb_layers_1", "conv_shortcut": "skip_connection", } @@ -258,20 +262,25 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No purge_networks_from_memory() -def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): +def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): weights_backup = getattr(self, "network_weights_backup", None) + bias_backup = getattr(self, "network_bias_backup", None) - if weights_backup is None: + if weights_backup is None and bias_backup is None: return - if isinstance(self, torch.nn.MultiheadAttention): - self.in_proj_weight.copy_(weights_backup[0]) - self.out_proj.weight.copy_(weights_backup[1]) - else: - self.weight.copy_(weights_backup) + if weights_backup is not None: + if isinstance(self, torch.nn.MultiheadAttention): + self.in_proj_weight.copy_(weights_backup[0]) + self.out_proj.weight.copy_(weights_backup[1]) + else: + self.weight.copy_(weights_backup) + if bias_backup is not None: + self.bias.copy_(bias_backup) -def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): + +def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): """ Applies the currently selected set of networks to the weights of torch layer self. If weights already have this particular set of networks applied, does nothing. @@ -294,6 +303,11 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn self.network_weights_backup = weights_backup + bias_backup = getattr(self, "network_bias_backup", None) + if bias_backup is None and getattr(self, 'bias', None) is not None: + bias_backup = self.bias.to(devices.cpu, copy=True) + self.network_bias_backup = bias_backup + if current_names != wanted_names: network_restore_weights_from_backup(self) @@ -301,13 +315,15 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn module = net.modules.get(network_layer_name, None) if module is not None and hasattr(self, 'weight'): with torch.no_grad(): - updown = module.calc_updown(self.weight) + updown, ex_bias = module.calc_updown(self.weight) if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: # inpainting model. zero pad updown to make channel[1] 4 to 9 updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) self.weight += updown + if getattr(self, 'bias', None) is not None: + self.bias += ex_bias continue module_q = net.modules.get(network_layer_name + "_q_proj", None) @@ -397,6 +413,36 @@ def network_Conv2d_load_state_dict(self, *args, **kwargs): return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs) +def network_GroupNorm_forward(self, input): + if shared.opts.lora_functional: + return network_forward(self, input, torch.nn.GroupNorm_forward_before_network) + + network_apply_weights(self) + + return torch.nn.GroupNorm_forward_before_network(self, input) + + +def network_GroupNorm_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs) + + +def network_LayerNorm_forward(self, input): + if shared.opts.lora_functional: + return network_forward(self, input, torch.nn.LayerNorm_forward_before_network) + + network_apply_weights(self) + + return torch.nn.LayerNorm_forward_before_network(self, input) + + +def network_LayerNorm_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs) + + def network_MultiheadAttention_forward(self, *args, **kwargs): network_apply_weights(self) -- cgit v1.2.3 From a2b83050965a1a117f2762d3b5fa8b4841777e8f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:35:04 +0800 Subject: return None if no ex_bias --- extensions-builtin/Lora/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 74cefe43..ba621139 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -322,7 +322,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) self.weight += updown - if getattr(self, 'bias', None) is not None: + if ex_bias is not None and getattr(self, 'bias', None) is not None: self.bias += ex_bias continue -- cgit v1.2.3 From d8419762c1454ba51baa710d9ce8e762efc056ef Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 15:07:37 +0300 Subject: Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console --- extensions-builtin/Lora/networks.py | 61 ++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 24 deletions(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index ba621139..c252ed9e 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -1,3 +1,4 @@ +import logging import os import re @@ -194,7 +195,7 @@ def load_network(name, network_on_disk): net.modules[key] = net_module if keys_failed_to_match: - print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}") + logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}") return net @@ -207,7 +208,6 @@ def purge_networks_from_memory(): devices.torch_gc() - def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): already_loaded = {} @@ -248,7 +248,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if net is None: failed_to_load_networks.append(name) - print(f"Couldn't find network with name {name}") + logging.info(f"Couldn't find network with name {name}") continue net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0 @@ -257,7 +257,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No loaded_networks.append(net) if failed_to_load_networks: - sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks)) + sd_hijack.model_hijack.comments.append("Networks not found: " + ", ".join(failed_to_load_networks)) purge_networks_from_memory() @@ -314,17 +314,22 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn for net in loaded_networks: module = net.modules.get(network_layer_name, None) if module is not None and hasattr(self, 'weight'): - with torch.no_grad(): - updown, ex_bias = module.calc_updown(self.weight) + try: + with torch.no_grad(): + updown, ex_bias = module.calc_updown(self.weight) - if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: - # inpainting model. zero pad updown to make channel[1] 4 to 9 - updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) + if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: + # inpainting model. zero pad updown to make channel[1] 4 to 9 + updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) - self.weight += updown - if ex_bias is not None and getattr(self, 'bias', None) is not None: - self.bias += ex_bias - continue + self.weight += updown + if ex_bias is not None and getattr(self, 'bias', None) is not None: + self.bias += ex_bias + except RuntimeError as e: + logging.debug(f"Network {net.name} layer {network_layer_name}: {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + + continue module_q = net.modules.get(network_layer_name + "_q_proj", None) module_k = net.modules.get(network_layer_name + "_k_proj", None) @@ -332,21 +337,28 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn module_out = net.modules.get(network_layer_name + "_out_proj", None) if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: - with torch.no_grad(): - updown_q = module_q.calc_updown(self.in_proj_weight) - updown_k = module_k.calc_updown(self.in_proj_weight) - updown_v = module_v.calc_updown(self.in_proj_weight) - updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) - updown_out = module_out.calc_updown(self.out_proj.weight) - - self.in_proj_weight += updown_qkv - self.out_proj.weight += updown_out - continue + try: + with torch.no_grad(): + updown_q = module_q.calc_updown(self.in_proj_weight) + updown_k = module_k.calc_updown(self.in_proj_weight) + updown_v = module_v.calc_updown(self.in_proj_weight) + updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) + updown_out = module_out.calc_updown(self.out_proj.weight) + + self.in_proj_weight += updown_qkv + self.out_proj.weight += updown_out + + except RuntimeError as e: + logging.debug(f"Network {net.name} layer {network_layer_name}: {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + + continue if module is None: continue - print(f'failed to calculate network weights for layer {network_layer_name}') + logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 self.network_current_names = wanted_names @@ -519,6 +531,7 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) +extra_network_lora = None available_networks = {} available_network_aliases = {} -- cgit v1.2.3 From d9cc27cb29926c9cc5dce331da8fbaf996cf4973 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 14 Aug 2023 13:32:51 +0800 Subject: Fix MHA updown err and support ex-bias for no-bias layer --- extensions-builtin/Lora/networks.py | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index ba621139..1645b822 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -277,7 +277,15 @@ def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Li self.weight.copy_(weights_backup) if bias_backup is not None: - self.bias.copy_(bias_backup) + if isinstance(self, torch.nn.MultiheadAttention): + self.out_proj.bias.copy_(bias_backup) + else: + self.bias.copy_(bias_backup) + else: + if isinstance(self, torch.nn.MultiheadAttention): + self.out_proj.bias = None + else: + self.bias = None def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): @@ -305,7 +313,12 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn bias_backup = getattr(self, "network_bias_backup", None) if bias_backup is None and getattr(self, 'bias', None) is not None: - bias_backup = self.bias.to(devices.cpu, copy=True) + if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: + bias_backup = self.out_proj.bias.to(devices.cpu, copy=True) + elif getattr(self, 'bias', None) is not None: + bias_backup = self.bias.to(devices.cpu, copy=True) + else: + bias_backup = None self.network_bias_backup = bias_backup if current_names != wanted_names: @@ -322,8 +335,11 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) self.weight += updown - if ex_bias is not None and getattr(self, 'bias', None) is not None: - self.bias += ex_bias + if ex_bias is not None and hasattr(self, 'bias'): + if self.bias is None: + self.bias = torch.nn.Parameter(ex_bias) + else: + self.bias += ex_bias continue module_q = net.modules.get(network_layer_name + "_q_proj", None) @@ -333,14 +349,19 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: with torch.no_grad(): - updown_q = module_q.calc_updown(self.in_proj_weight) - updown_k = module_k.calc_updown(self.in_proj_weight) - updown_v = module_v.calc_updown(self.in_proj_weight) + updown_q, _ = module_q.calc_updown(self.in_proj_weight) + updown_k, _ = module_k.calc_updown(self.in_proj_weight) + updown_v, _ = module_v.calc_updown(self.in_proj_weight) updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) - updown_out = module_out.calc_updown(self.out_proj.weight) + updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight) self.in_proj_weight += updown_qkv self.out_proj.weight += updown_out + if ex_bias is not None: + if self.out_proj.bias is None: + self.out_proj.bias = torch.nn.Parameter(ex_bias) + else: + self.out_proj.bias += ex_bias continue if module is None: -- cgit v1.2.3 From f70ded89365f71d42b6a60a561e8fccfdd25c159 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 14 Aug 2023 13:53:40 +0800 Subject: remove "if bias exist" check --- extensions-builtin/Lora/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 96d14344..22fdff4a 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -312,7 +312,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn self.network_weights_backup = weights_backup bias_backup = getattr(self, "network_bias_backup", None) - if bias_backup is None and getattr(self, 'bias', None) is not None: + if bias_backup is None: if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: bias_backup = self.out_proj.bias.to(devices.cpu, copy=True) elif getattr(self, 'bias', None) is not None: -- cgit v1.2.3 From f01682ee01e81e8ef84fd6fffe8f7aa17233285d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 15 Aug 2023 19:23:27 +0300 Subject: store patches for Lora in a specialized module --- extensions-builtin/Lora/networks.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 22fdff4a..9fca36b6 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -2,6 +2,7 @@ import logging import os import re +import lora_patches import network import network_lora import network_hada @@ -418,74 +419,74 @@ def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): def network_Linear_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.Linear_forward_before_network) + return network_forward(self, input, originals.Linear_forward) network_apply_weights(self) - return torch.nn.Linear_forward_before_network(self, input) + return originals.Linear_forward(self, input) def network_Linear_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.Linear_load_state_dict_before_network(self, *args, **kwargs) + return originals.Linear_load_state_dict(self, *args, **kwargs) def network_Conv2d_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.Conv2d_forward_before_network) + return network_forward(self, input, originals.Conv2d_forward) network_apply_weights(self) - return torch.nn.Conv2d_forward_before_network(self, input) + return originals.Conv2d_forward(self, input) def network_Conv2d_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs) + return originals.Conv2d_load_state_dict(self, *args, **kwargs) def network_GroupNorm_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.GroupNorm_forward_before_network) + return network_forward(self, input, originals.GroupNorm_forward) network_apply_weights(self) - return torch.nn.GroupNorm_forward_before_network(self, input) + return originals.GroupNorm_forward(self, input) def network_GroupNorm_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs) + return originals.GroupNorm_load_state_dict(self, *args, **kwargs) def network_LayerNorm_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.LayerNorm_forward_before_network) + return network_forward(self, input, originals.LayerNorm_forward) network_apply_weights(self) - return torch.nn.LayerNorm_forward_before_network(self, input) + return originals.LayerNorm_forward(self, input) def network_LayerNorm_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs) + return originals.LayerNorm_load_state_dict(self, *args, **kwargs) def network_MultiheadAttention_forward(self, *args, **kwargs): network_apply_weights(self) - return torch.nn.MultiheadAttention_forward_before_network(self, *args, **kwargs) + return originals.MultiheadAttention_forward(self, *args, **kwargs) def network_MultiheadAttention_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.MultiheadAttention_load_state_dict_before_network(self, *args, **kwargs) + return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs) def list_available_networks(): @@ -552,6 +553,9 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) + +originals: lora_patches.LoraPatches = None + extra_network_lora = None available_networks = {} -- cgit v1.2.3 From 86221269f98ef9b21a6e6c9d04b86e2fb5cb33d3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 16 Aug 2023 09:55:35 +0300 Subject: RAM optimization round 2 --- extensions-builtin/Lora/networks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/networks.py') diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 9fca36b6..96f935b2 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -304,7 +304,10 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks) weights_backup = getattr(self, "network_weights_backup", None) - if weights_backup is None: + if weights_backup is None and wanted_names != (): + if current_names != (): + raise RuntimeError("no backup weights found and current weights are not unchanged") + if isinstance(self, torch.nn.MultiheadAttention): weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True)) else: -- cgit v1.2.3