aboutsummaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2023-05-10 18:21:32 +0000
committerAUTOMATIC <16777216c@gmail.com>2023-05-10 18:21:32 +0000
commit3ec7b705c78b7aca9569c92a419837352c7a4ec6 (patch)
tree98248bc21aa4ad9715205f0a65a654532c6cfcc0 /modules
parentd25219b7e889cf34bccae9cb88497708796efda2 (diff)
downloadstable-diffusion-webui-gfx803-3ec7b705c78b7aca9569c92a419837352c7a4ec6.tar.gz
stable-diffusion-webui-gfx803-3ec7b705c78b7aca9569c92a419837352c7a4ec6.tar.bz2
stable-diffusion-webui-gfx803-3ec7b705c78b7aca9569c92a419837352c7a4ec6.zip
suggestions and fixes from the PR
Diffstat (limited to 'modules')
-rw-r--r--modules/codeformer/codeformer_arch.py7
-rw-r--r--modules/hypernetworks/ui.py4
-rw-r--r--modules/models/diffusion/uni_pc/uni_pc.py4
-rw-r--r--modules/scripts_postprocessing.py2
-rw-r--r--modules/sd_hijack_clip.py2
-rw-r--r--modules/shared.py2
-rw-r--r--modules/textual_inversion/textual_inversion.py3
-rw-r--r--modules/ui.py4
8 files changed, 12 insertions, 16 deletions
diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py
index ff1c0b4b..45c70f84 100644
--- a/modules/codeformer/codeformer_arch.py
+++ b/modules/codeformer/codeformer_arch.py
@@ -161,13 +161,10 @@ class Fuse_sft_block(nn.Module):
class CodeFormer(VQAutoEncoder):
def __init__(self, dim_embd=512, n_head=8, n_layers=9,
codebook_size=1024, latent_size=256,
- connect_list=None,
- fix_modules=None):
+ connect_list=('32', '64', '128', '256'),
+ fix_modules=('quantize', 'generator')):
super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size)
- connect_list = connect_list or ['32', '64', '128', '256']
- fix_modules = fix_modules or ['quantize', 'generator']
-
if fix_modules is not None:
for module in fix_modules:
for param in getattr(self, module).parameters():
diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py
index e3f9eb13..8b6255e2 100644
--- a/modules/hypernetworks/ui.py
+++ b/modules/hypernetworks/ui.py
@@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork
from modules import devices, sd_hijack, shared
not_available = ["hardswish", "multiheadattention"]
-keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available]
+keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available]
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
- return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", ""
+ return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", ""
def train_hypernetwork(*args):
diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py
index f6c49f87..a227b947 100644
--- a/modules/models/diffusion/uni_pc/uni_pc.py
+++ b/modules/models/diffusion/uni_pc/uni_pc.py
@@ -275,8 +275,8 @@ def model_wrapper(
A noise prediction model that accepts the noised data and the continuous time as the inputs.
"""
- model_kwargs = model_kwargs or []
- classifier_kwargs = classifier_kwargs or []
+ model_kwargs = model_kwargs or {}
+ classifier_kwargs = classifier_kwargs or {}
def get_model_input_time(t_continuous):
"""
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py
index 6751406c..bac1335d 100644
--- a/modules/scripts_postprocessing.py
+++ b/modules/scripts_postprocessing.py
@@ -124,7 +124,7 @@ class ScriptPostprocessingRunner:
script_args = args[script.args_from:script.args_to]
process_args = {}
- for (name, component), value in zip(script.controls.items(), script_args): # noqa B007
+ for (name, _component), value in zip(script.controls.items(), script_args):
process_args[name] = value
script.process(pp, **process_args)
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index c0c350f6..cc6e8c21 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
self.hijack.fixes = [x.fixes for x in batch_chunk]
for fixes in self.hijack.fixes:
- for position, embedding in fixes: # noqa: B007
+ for _position, embedding in fixes:
used_embeddings[embedding.name] = embedding
z = self.process_tokens(tokens, multipliers)
diff --git a/modules/shared.py b/modules/shared.py
index 913c9e63..ac67adc0 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"),
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"),
- "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks),
+ "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", hypernetworks]}, refresh=reload_hypernetworks),
}))
options_templates.update(options_section(('ui', "User interface"), {
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 47035332..9e1b2b9a 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -166,8 +166,7 @@ class EmbeddingDatabase:
# textual inversion embeddings
if 'string_to_param' in data:
param_dict = data['string_to_param']
- if hasattr(param_dict, '_parameters'):
- param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11
+ param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
# diffuser concepts
diff --git a/modules/ui.py b/modules/ui.py
index 83bfb7d8..7ee99473 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1230,8 +1230,8 @@ def create_ui():
train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
- train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys()))
- create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name")
+ train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks))
+ create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name")
with FormRow():
embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")