From 2473bafa67b2dd0077f752bf23e4bf8f89990a8c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 07:28:30 +0300 Subject: read infotext params from the other extension for Lora if it's not active --- extensions-builtin/Lora/scripts/lora_script.py | 1 + 1 file changed, 1 insertion(+) (limited to 'extensions-builtin/Lora/scripts/lora_script.py') diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 3fc38ab9..2f2267a2 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -49,6 +49,7 @@ torch.nn.MultiheadAttention._load_from_state_dict = lora.lora_MultiheadAttention script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules) script_callbacks.on_script_unloaded(unload) script_callbacks.on_before_ui(before_ui) +script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { -- cgit v1.2.3 From ec0da07236d286f37c86f9cd92642e24381dd6a5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 12:07:43 +0300 Subject: Lora: add an option to use old method of applying loras --- extensions-builtin/Lora/lora.py | 56 +++++++++++++++++++++++--- extensions-builtin/Lora/scripts/lora_script.py | 5 +++ 2 files changed, 55 insertions(+), 6 deletions(-) (limited to 'extensions-builtin/Lora/scripts/lora_script.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 83933639..d488b5ae 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -245,6 +245,19 @@ def lora_calc_updown(lora, module, target): return updown +def lora_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): + weights_backup = getattr(self, "lora_weights_backup", None) + + if weights_backup is None: + return + + if isinstance(self, torch.nn.MultiheadAttention): + self.in_proj_weight.copy_(weights_backup[0]) + self.out_proj.weight.copy_(weights_backup[1]) + else: + self.weight.copy_(weights_backup) + + def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): """ Applies the currently selected set of Loras to the weights of torch layer self. @@ -269,12 +282,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu self.lora_weights_backup = weights_backup if current_names != wanted_names: - if weights_backup is not None: - if isinstance(self, torch.nn.MultiheadAttention): - self.in_proj_weight.copy_(weights_backup[0]) - self.out_proj.weight.copy_(weights_backup[1]) - else: - self.weight.copy_(weights_backup) + lora_restore_weights_from_backup(self) for lora in loaded_loras: module = lora.modules.get(lora_layer_name, None) @@ -305,12 +313,45 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu setattr(self, "lora_current_names", wanted_names) +def lora_forward(module, input, original_forward): + """ + Old way of applying Lora by executing operations during layer's forward. + Stacking many loras this way results in big performance degradation. + """ + + if len(loaded_loras) == 0: + return original_forward(module, input) + + input = devices.cond_cast_unet(input) + + lora_restore_weights_from_backup(module) + lora_reset_cached_weight(module) + + res = original_forward(module, input) + + lora_layer_name = getattr(module, 'lora_layer_name', None) + for lora in loaded_loras: + module = lora.modules.get(lora_layer_name, None) + if module is None: + continue + + module.up.to(device=devices.device) + module.down.to(device=devices.device) + + res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0) + + return res + + def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): setattr(self, "lora_current_names", ()) setattr(self, "lora_weights_backup", None) def lora_Linear_forward(self, input): + if shared.opts.lora_functional: + return lora_forward(self, input, torch.nn.Linear_forward_before_lora) + lora_apply_weights(self) return torch.nn.Linear_forward_before_lora(self, input) @@ -323,6 +364,9 @@ def lora_Linear_load_state_dict(self, *args, **kwargs): def lora_Conv2d_forward(self, input): + if shared.opts.lora_functional: + return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora) + lora_apply_weights(self) return torch.nn.Conv2d_forward_before_lora(self, input) diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 2f2267a2..a67b8a69 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -55,3 +55,8 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), })) + + +shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), { + "lora_functional": shared.OptionInfo(False, "Lora: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"), +})) -- cgit v1.2.3 From eb95809501068a38f2b6bdb01b6ae5b86ff7ae87 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 9 May 2023 11:25:46 +0300 Subject: rework loras api --- extensions-builtin/Lora/lora.py | 6 ----- extensions-builtin/Lora/scripts/api.py | 31 -------------------------- extensions-builtin/Lora/scripts/lora_script.py | 21 ++++++++++++++++- 3 files changed, 20 insertions(+), 38 deletions(-) delete mode 100644 extensions-builtin/Lora/scripts/api.py (limited to 'extensions-builtin/Lora/scripts/lora_script.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 05162e41..ba1293df 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -3,7 +3,6 @@ import os import re import torch from typing import Union -import scripts.api as api from modules import shared, devices, sd_models, errors, scripts @@ -449,8 +448,3 @@ available_lora_aliases = {} loaded_loras = [] list_available_loras() -try: - import modules.script_callbacks as script_callbacks - script_callbacks.on_app_started(api.api) -except: - pass \ No newline at end of file diff --git a/extensions-builtin/Lora/scripts/api.py b/extensions-builtin/Lora/scripts/api.py deleted file mode 100644 index f1f2e2fc..00000000 --- a/extensions-builtin/Lora/scripts/api.py +++ /dev/null @@ -1,31 +0,0 @@ -from fastapi import FastAPI -import gradio as gr -import json -import os -import lora - -def get_lora_prompts(path): - directory, filename = os.path.split(path) - name_without_ext = os.path.splitext(filename)[0] - new_filename = name_without_ext + '.civitai.info' - try: - new_path = os.path.join(directory, new_filename) - if os.path.exists(new_path): - with open(new_path, 'r') as f: - data = json.load(f) - trained_words = data.get('trainedWords', []) - if len(trained_words) > 0: - result = ','.join(trained_words) - return result - else: - return '' - else: - return '' - except Exception as e: - return '' - -def api(_: gr.Blocks, app: FastAPI): - @app.get("/sdapi/v1/loras") - async def get_loras(): - return [{"name": name, "path": lora.available_loras[name].filename, "prompt": get_lora_prompts(lora.available_loras[name].filename)} for name in lora.available_loras] - diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index a67b8a69..7db971fd 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -1,12 +1,12 @@ import torch import gradio as gr +from fastapi import FastAPI import lora import extra_networks_lora import ui_extra_networks_lora from modules import script_callbacks, ui_extra_networks, extra_networks, shared - def unload(): torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_lora @@ -60,3 +60,22 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), { "lora_functional": shared.OptionInfo(False, "Lora: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"), })) + + +def create_lora_json(obj: lora.LoraOnDisk): + return { + "name": obj.name, + "alias": obj.alias, + "path": obj.filename, + "metadata": obj.metadata, + } + + +def api_loras(_: gr.Blocks, app: FastAPI): + @app.get("/sdapi/v1/loras") + async def get_loras(): + return [create_lora_json(obj) for obj in lora.available_loras.values()] + + +script_callbacks.on_app_started(api_loras) + -- cgit v1.2.3