From c3eced22fc7b9da4fbb2f55f2d53a7e5e511cfbd Mon Sep 17 00:00:00 2001 From: Leo Mozoloa Date: Thu, 4 May 2023 16:14:33 +0200 Subject: Fix some Lora's not working --- extensions-builtin/Lora/lora.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/lora.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 6f246921..bcf36d77 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -165,8 +165,10 @@ def load_lora(name, filename): module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) elif type(sd_module) == torch.nn.MultiheadAttention: module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) - elif type(sd_module) == torch.nn.Conv2d: + elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (1, 1): module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) + elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (3, 3): + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (3, 3), bias=False) else: print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}') continue @@ -232,6 +234,8 @@ def lora_calc_updown(lora, module, target): if up.shape[2:] == (1, 1) and down.shape[2:] == (1, 1): updown = (up.squeeze(2).squeeze(2) @ down.squeeze(2).squeeze(2)).unsqueeze(2).unsqueeze(3) + elif up.shape[2:] == (3, 3) or down.shape[2:] == (3, 3): + updown = torch.nn.functional.conv2d(down.permute(1, 0, 2, 3), up).permute(1, 0, 2, 3) else: updown = up @ down -- cgit v1.2.3 From 2cb3b0be1def43e0d225b45a640592a7999a0d69 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 7 May 2023 08:25:34 +0300 Subject: if present, use Lora's "ss_output_name" field to refer to it in prompt --- extensions-builtin/Lora/lora.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'extensions-builtin/Lora/lora.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 6f246921..e3ca7fa2 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -93,6 +93,7 @@ class LoraOnDisk: self.metadata = m self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text + self.alias = self.metadata.get('ss_output_name', self.name) class LoraModule: @@ -199,11 +200,11 @@ def load_loras(names, multipliers=None): loaded_loras.clear() - loras_on_disk = [available_loras.get(name, None) for name in names] + loras_on_disk = [available_lora_aliases.get(name, None) for name in names] if any([x is None for x in loras_on_disk]): list_available_loras() - loras_on_disk = [available_loras.get(name, None) for name in names] + loras_on_disk = [available_lora_aliases.get(name, None) for name in names] for i, name in enumerate(names): lora = already_loaded.get(name, None) @@ -343,6 +344,7 @@ def lora_MultiheadAttention_load_state_dict(self, *args, **kwargs): def list_available_loras(): available_loras.clear() + available_lora_aliases.clear() os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) @@ -356,11 +358,16 @@ def list_available_loras(): continue name = os.path.splitext(os.path.basename(filename))[0] + entry = LoraOnDisk(name, filename) - available_loras[name] = LoraOnDisk(name, filename) + available_loras[name] = entry + + available_lora_aliases[name] = entry + available_lora_aliases[entry.alias] = entry available_loras = {} +available_lora_aliases = {} loaded_loras = [] list_available_loras() -- cgit v1.2.3 From 2473bafa67b2dd0077f752bf23e4bf8f89990a8c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 07:28:30 +0300 Subject: read infotext params from the other extension for Lora if it's not active --- extensions-builtin/Lora/lora.py | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/lora.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index e3ca7fa2..94ec021b 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -4,7 +4,7 @@ import re import torch from typing import Union -from modules import shared, devices, sd_models, errors +from modules import shared, devices, sd_models, errors, scripts metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20} @@ -366,6 +366,40 @@ def list_available_loras(): available_lora_aliases[entry.alias] = entry +re_lora_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)") + + +def infotext_pasted(infotext, params): + if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]: + return # if the other extension is active, it will handle those fields, no need to do anything + + added = [] + + for k, v in params.items(): + if not k.startswith("AddNet Model "): + continue + + num = k[13:] + + if params.get("AddNet Module " + num) != "LoRA": + continue + + name = params.get("AddNet Model " + num) + if name is None: + continue + + m = re_lora_name.match(name) + if m: + name = m.group(1) + + multiplier = params.get("AddNet Weight A " + num, "1.0") + + added.append(f"") + + if added: + params["Prompt"] += "\n" + "".join(added) + + available_loras = {} available_lora_aliases = {} loaded_loras = [] -- cgit v1.2.3 From 083dc3c76ab7dbc7b2b04f3396d4f5280b002906 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 11:33:45 +0300 Subject: directory hiding for extra networks: dirs starting with . will hide their cards on extra network tabs unless specifically searched for create HTML for extra network pages only on demand allow directories starting with . to still list their models for lora, checkpoints, etc keep "search" filter for extra networks when user refreshes the page --- extensions-builtin/Lora/lora.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'extensions-builtin/Lora/lora.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 83c1c6fd..83933639 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -352,11 +352,7 @@ def list_available_loras(): os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) - candidates = \ - glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.pt'), recursive=True) + \ - glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.safetensors'), recursive=True) + \ - glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.ckpt'), recursive=True) - + candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) for filename in sorted(candidates, key=str.lower): if os.path.isdir(filename): continue -- cgit v1.2.3 From ec0da07236d286f37c86f9cd92642e24381dd6a5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 12:07:43 +0300 Subject: Lora: add an option to use old method of applying loras --- extensions-builtin/Lora/lora.py | 56 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 6 deletions(-) (limited to 'extensions-builtin/Lora/lora.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 83933639..d488b5ae 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -245,6 +245,19 @@ def lora_calc_updown(lora, module, target): return updown +def lora_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): + weights_backup = getattr(self, "lora_weights_backup", None) + + if weights_backup is None: + return + + if isinstance(self, torch.nn.MultiheadAttention): + self.in_proj_weight.copy_(weights_backup[0]) + self.out_proj.weight.copy_(weights_backup[1]) + else: + self.weight.copy_(weights_backup) + + def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): """ Applies the currently selected set of Loras to the weights of torch layer self. @@ -269,12 +282,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu self.lora_weights_backup = weights_backup if current_names != wanted_names: - if weights_backup is not None: - if isinstance(self, torch.nn.MultiheadAttention): - self.in_proj_weight.copy_(weights_backup[0]) - self.out_proj.weight.copy_(weights_backup[1]) - else: - self.weight.copy_(weights_backup) + lora_restore_weights_from_backup(self) for lora in loaded_loras: module = lora.modules.get(lora_layer_name, None) @@ -305,12 +313,45 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu setattr(self, "lora_current_names", wanted_names) +def lora_forward(module, input, original_forward): + """ + Old way of applying Lora by executing operations during layer's forward. + Stacking many loras this way results in big performance degradation. + """ + + if len(loaded_loras) == 0: + return original_forward(module, input) + + input = devices.cond_cast_unet(input) + + lora_restore_weights_from_backup(module) + lora_reset_cached_weight(module) + + res = original_forward(module, input) + + lora_layer_name = getattr(module, 'lora_layer_name', None) + for lora in loaded_loras: + module = lora.modules.get(lora_layer_name, None) + if module is None: + continue + + module.up.to(device=devices.device) + module.down.to(device=devices.device) + + res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0) + + return res + + def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): setattr(self, "lora_current_names", ()) setattr(self, "lora_weights_backup", None) def lora_Linear_forward(self, input): + if shared.opts.lora_functional: + return lora_forward(self, input, torch.nn.Linear_forward_before_lora) + lora_apply_weights(self) return torch.nn.Linear_forward_before_lora(self, input) @@ -323,6 +364,9 @@ def lora_Linear_load_state_dict(self, *args, **kwargs): def lora_Conv2d_forward(self, input): + if shared.opts.lora_functional: + return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora) + lora_apply_weights(self) return torch.nn.Conv2d_forward_before_lora(self, input) -- cgit v1.2.3 From 34a82a345abe89faafbd43fa34f40dd110559071 Mon Sep 17 00:00:00 2001 From: Sayo Date: Mon, 8 May 2023 19:55:05 +0800 Subject: Add api method to get LoRA models --- extensions-builtin/Lora/lora.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'extensions-builtin/Lora/lora.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index d488b5ae..8fc1ddca 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -2,7 +2,9 @@ import glob import os import re import torch -from typing import Union +from typing import Union, List, Optional +from fastapi import FastAPI +import gradio as gr from modules import shared, devices, sd_models, errors, scripts @@ -443,9 +445,19 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) +def api(_: gr.Blocks, app: FastAPI): + @app.get("/sdapi/v1/loras") + async def getloras(): + return [{"name": name, "path": available_loras[name].filename, "prompt": ""} for name in available_loras] + available_loras = {} available_lora_aliases = {} loaded_loras = [] list_available_loras() +try: + import modules.script_callbacks as script_callbacks + script_callbacks.on_app_started(api) +except: + pass \ No newline at end of file -- cgit v1.2.3 From f9abe4cddcdc6704be02633d9d5ed9640d6b9008 Mon Sep 17 00:00:00 2001 From: Sayo Date: Mon, 8 May 2023 20:38:10 +0800 Subject: Add api method to get LoRA models with prompt --- extensions-builtin/Lora/lora.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'extensions-builtin/Lora/lora.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 8fc1ddca..05162e41 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -2,9 +2,8 @@ import glob import os import re import torch -from typing import Union, List, Optional -from fastapi import FastAPI -import gradio as gr +from typing import Union +import scripts.api as api from modules import shared, devices, sd_models, errors, scripts @@ -445,12 +444,6 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) -def api(_: gr.Blocks, app: FastAPI): - @app.get("/sdapi/v1/loras") - async def getloras(): - return [{"name": name, "path": available_loras[name].filename, "prompt": ""} for name in available_loras] - - available_loras = {} available_lora_aliases = {} loaded_loras = [] @@ -458,6 +451,6 @@ loaded_loras = [] list_available_loras() try: import modules.script_callbacks as script_callbacks - script_callbacks.on_app_started(api) + script_callbacks.on_app_started(api.api) except: pass \ No newline at end of file -- cgit v1.2.3 From eb95809501068a38f2b6bdb01b6ae5b86ff7ae87 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 9 May 2023 11:25:46 +0300 Subject: rework loras api --- extensions-builtin/Lora/lora.py | 6 ------ 1 file changed, 6 deletions(-) (limited to 'extensions-builtin/Lora/lora.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 05162e41..ba1293df 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -3,7 +3,6 @@ import os import re import torch from typing import Union -import scripts.api as api from modules import shared, devices, sd_models, errors, scripts @@ -449,8 +448,3 @@ available_lora_aliases = {} loaded_loras = [] list_available_loras() -try: - import modules.script_callbacks as script_callbacks - script_callbacks.on_app_started(api.api) -except: - pass \ No newline at end of file -- cgit v1.2.3