From 762265eab58cdb8f2d6398769bab43d8b8db0075 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 07:52:45 +0300 Subject: autofixes from ruff --- modules/ui_extra_networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/ui_extra_networks.py') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 8c3dea56..49e06289 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -263,7 +263,7 @@ def create_ui(container, button, tabname): ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy()) ui.tabname = tabname - with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: + with gr.Tabs(elem_id=tabname+"_extra_tabs"): for page in ui.stored_extra_pages: page_id = page.title.lower().replace(" ", "_") -- cgit v1.2.3 From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- extensions-builtin/Lora/lora.py | 1 - extensions-builtin/ScuNET/scripts/scunet_model.py | 1 - extensions-builtin/SwinIR/scripts/swinir_model.py | 3 +-- modules/codeformer/codeformer_arch.py | 4 +--- modules/codeformer/vqgan_arch.py | 2 -- modules/codeformer_model.py | 4 +--- modules/config_states.py | 2 +- modules/esrgan_model.py | 2 +- modules/esrgan_model_arch.py | 1 - modules/extensions.py | 1 - modules/generation_parameters_copypaste.py | 4 ---- modules/hypernetworks/hypernetwork.py | 3 +-- modules/hypernetworks/ui.py | 2 -- modules/images.py | 2 +- modules/img2img.py | 5 +---- modules/mac_specific.py | 1 - modules/modelloader.py | 1 - modules/models/diffusion/uni_pc/uni_pc.py | 1 - modules/processing.py | 5 ++--- modules/sd_hijack.py | 2 +- modules/sd_hijack_inpainting.py | 6 ------ modules/sd_hijack_ip2p.py | 5 +---- modules/sd_hijack_xlmr.py | 2 -- modules/sd_models.py | 2 +- modules/sd_models_config.py | 1 - modules/sd_samplers_kdiffusion.py | 1 - modules/sd_vae.py | 3 --- modules/shared.py | 3 --- modules/styles.py | 9 --------- modules/textual_inversion/autocrop.py | 4 +--- modules/textual_inversion/image_embedding.py | 2 +- modules/textual_inversion/preprocess.py | 4 ---- modules/textual_inversion/textual_inversion.py | 1 - modules/txt2img.py | 9 +++------ modules/ui.py | 5 ++--- modules/ui_extra_networks.py | 1 - modules/ui_postprocessing.py | 2 +- modules/upscaler.py | 2 -- modules/xlmr.py | 2 +- pyproject.toml | 11 +++++++---- scripts/custom_code.py | 2 +- scripts/outpainting_mk_2.py | 4 ++-- scripts/poor_mans_outpainting.py | 4 ++-- scripts/prompt_matrix.py | 7 ++----- scripts/prompts_from_file.py | 5 +---- scripts/sd_upscale.py | 4 ++-- scripts/xyz_grid.py | 6 ++---- webui.py | 2 +- 48 files changed, 42 insertions(+), 114 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index ba1293df..0ab43229 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -1,4 +1,3 @@ -import glob import os import re import torch diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index c7fd5739..aa2fdb3a 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -13,7 +13,6 @@ import modules.upscaler from modules import devices, modelloader from scunet_model_arch import SCUNet as net from modules.shared import opts -from modules import images class UpscalerScuNET(modules.upscaler.Upscaler): diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index d77c3a92..55dd94ab 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -1,4 +1,3 @@ -import contextlib import os import numpy as np @@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url from tqdm import tqdm from modules import modelloader, devices, script_callbacks, shared -from modules.shared import cmd_opts, opts, state +from modules.shared import opts, state from swinir_model_arch import SwinIR as net from swinir_model_arch_v2 import Swin2SR as net2 from modules.upscaler import Upscaler, UpscalerData diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index f1a7cf09..00c407de 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -1,14 +1,12 @@ # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py import math -import numpy as np import torch from torch import nn, Tensor import torch.nn.functional as F -from typing import Optional, List +from typing import Optional from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock -from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY def calc_mean_std(feat, eps=1e-5): diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index e7293683..820e6b12 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py ''' -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import copy from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8d84bbc9..8e56cb89 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -33,11 +33,9 @@ def setup_model(dirname): try: from torchvision.transforms.functional import normalize from modules.codeformer.codeformer_arch import CodeFormer - from basicsr.utils.download_util import load_file_from_url - from basicsr.utils import imwrite, img2tensor, tensor2img + from basicsr.utils import img2tensor, tensor2img from facelib.utils.face_restoration_helper import FaceRestoreHelper from facelib.detection.retinaface import retinaface - from modules.shared import cmd_opts net_class = CodeFormer diff --git a/modules/config_states.py b/modules/config_states.py index 2ea00929..8f1ff428 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -14,7 +14,7 @@ from collections import OrderedDict import git from modules import shared, extensions -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir +from modules.paths_internal import script_path, config_states_dir all_config_states = OrderedDict() diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index f4369257..85aa6934 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -6,7 +6,7 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url import modules.esrgan_model_arch as arch -from modules import shared, modelloader, images, devices +from modules import modelloader, images, devices from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 7f8bc7c0..4de9dd8d 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -2,7 +2,6 @@ from collections import OrderedDict import math -import functools import torch import torch.nn as nn import torch.nn.functional as F diff --git a/modules/extensions.py b/modules/extensions.py index 34d9d654..829f8cd9 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,7 +3,6 @@ import sys import traceback import time -from datetime import datetime import git from modules import shared diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fe8b18b2..f1c59c46 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,15 +1,11 @@ import base64 -import html import io -import math import os import re -from pathlib import Path import gradio as gr from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks -import tempfile from PIL import Image re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 1fc49537..9fe749b7 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -1,4 +1,3 @@ -import csv import datetime import glob import html @@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ -from collections import defaultdict, deque +from collections import deque from statistics import stdev, mean diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index 76599f5a..be168736 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -1,6 +1,4 @@ import html -import os -import re import gradio as gr import modules.hypernetworks.hypernetwork diff --git a/modules/images.py b/modules/images.py index 5eb6d855..7392cb8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -19,7 +19,7 @@ import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors -from modules.shared import opts, cmd_opts +from modules.shared import opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) diff --git a/modules/img2img.py b/modules/img2img.py index 32b1ecd6..d704bf90 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,12 +1,9 @@ -import math import os -import sys -import traceback import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError -from modules import devices, sd_samplers +from modules import sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 40ce2101..5c2f92a1 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,6 +1,5 @@ import torch import platform -from modules import paths from modules.sd_hijack_utils import CondFunc from packaging import version diff --git a/modules/modelloader.py b/modules/modelloader.py index cf685000..92ada694 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -1,4 +1,3 @@ -import glob import os import shutil import importlib diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index 11b330bc..a4c4ef4e 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -1,5 +1,4 @@ import torch -import torch.nn.functional as F import math from tqdm.auto import trange diff --git a/modules/processing.py b/modules/processing.py index 6f5233c1..c3932d6b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -2,7 +2,6 @@ import json import math import os import sys -import warnings import hashlib import torch @@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d8135211..81573b78 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -3,7 +3,7 @@ from torch.nn.functional import silu from types import MethodType import modules.textual_inversion.textual_inversion -from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint +from modules import devices, sd_hijack_optimizations, shared from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 55a2ce4d..344d75c8 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -1,15 +1,9 @@ -import os import torch -from einops import repeat -from omegaconf import ListConfig - import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 41ed54a2..6fe6b6ff 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -1,8 +1,5 @@ -import collections import os.path -import sys -import gc -import time + def should_hijack_ip2p(checkpoint_info): from modules import sd_models_config diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py index 4ac51c38..28528329 100644 --- a/modules/sd_hijack_xlmr.py +++ b/modules/sd_hijack_xlmr.py @@ -1,8 +1,6 @@ -import open_clip.tokenizer import torch from modules import sd_hijack_clip, devices -from modules.shared import opts class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): diff --git a/modules/sd_models.py b/modules/sd_models.py index 11c1a344..1c09c709 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -565,7 +565,7 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - from modules import lowvram, devices, sd_hijack + from modules import devices, sd_hijack timer = Timer() if model_data.sd_model: diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 7a79925a..9bfe1237 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -1,4 +1,3 @@ -import re import os import torch diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0fc9f456..3b8e9622 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,7 +1,6 @@ from collections import deque import torch import inspect -import einops import k_diffusion.sampling from modules import prompt_parser, devices, sd_samplers_common diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 521e485a..b7176125 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,8 +1,5 @@ -import torch -import safetensors.torch import os import collections -from collections import namedtuple from modules import paths, shared, devices, script_callbacks, sd_models import glob from copy import deepcopy diff --git a/modules/shared.py b/modules/shared.py index 4631965b..44cd2c0c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,12 +1,9 @@ -import argparse import datetime import json import os import sys import time -import requests -from PIL import Image import gradio as gr import tqdm diff --git a/modules/styles.py b/modules/styles.py index 11642075..c22769cf 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,18 +1,9 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - import csv import os import os.path import typing -import collections.abc as abc -import tempfile import shutil -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - class PromptStyle(typing.NamedTuple): name: str diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index d7d8d2e3..7770d22f 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -1,10 +1,8 @@ import cv2 import requests import os -from collections import defaultdict -from math import log, sqrt import numpy as np -from PIL import Image, ImageDraw +from PIL import ImageDraw GREEN = "#0F0" BLUE = "#00F" diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index 5593f88c..ee0e850a 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -2,7 +2,7 @@ import base64 import json import numpy as np import zlib -from PIL import Image, PngImagePlugin, ImageDraw, ImageFont +from PIL import Image, ImageDraw, ImageFont from fonts.ttf import Roboto import torch from modules.shared import opts diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index da0bcb26..d0cad09e 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,13 +1,9 @@ import os from PIL import Image, ImageOps import math -import platform -import sys import tqdm -import time from modules import paths, shared, images, deepbooru -from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f753b75f..9ed9ba45 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,7 +1,6 @@ import os import sys import traceback -import inspect from collections import namedtuple import torch diff --git a/modules/txt2img.py b/modules/txt2img.py index 16841d0f..f022381c 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,18 +1,15 @@ import modules.scripts -from modules import sd_samplers +from modules import sd_samplers, processing from modules.generation_parameters_copypaste import create_override_settings_dict -from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \ - StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, cmd_opts import modules.shared as shared -import modules.processing as processing from modules.ui import plaintext_to_html def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) - p = StableDiffusionProcessingTxt2Img( + p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, @@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step processed = modules.scripts.scripts_txt2img.run(p, *args) if processed is None: - processed = process_images(p) + processed = processing.process_images(p) p.close() diff --git a/modules/ui.py b/modules/ui.py index 6beda76f..f7e57593 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,10 +14,10 @@ from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress -from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path -from modules.shared import opts, cmd_opts, restricted_opts +from modules.shared import opts, cmd_opts import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste @@ -28,7 +28,6 @@ import modules.shared as shared import modules.styles import modules.textual_inversion.ui from modules import prompt_parser -from modules.images import save_image from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img from modules.textual_inversion import textual_inversion diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 49e06289..800e467a 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,4 +1,3 @@ -import glob import os.path import urllib.parse from pathlib import Path diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index f25639e5..c7dc1154 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -1,5 +1,5 @@ import gradio as gr -from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue +from modules import scripts, shared, ui_common, postprocessing, call_queue import modules.generation_parameters_copypaste as parameters_copypaste diff --git a/modules/upscaler.py b/modules/upscaler.py index 0ad4fe99..777593b0 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -2,8 +2,6 @@ import os from abc import abstractmethod import PIL -import numpy as np -import torch from PIL import Image import modules.shared diff --git a/modules/xlmr.py b/modules/xlmr.py index beab3fdf..e056c3f6 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -1,4 +1,4 @@ -from transformers import BertPreTrainedModel,BertModel,BertConfig +from transformers import BertPreTrainedModel, BertConfig import torch.nn as nn import torch from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig diff --git a/pyproject.toml b/pyproject.toml index 1e164abc..9caa9ba2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,13 @@ [tool.ruff] +exclude = ["extensions"] + ignore = [ "E501", - "E731", - "E402", # Module level import not at top of file - "F401" # Module imported but unused + + "F401", # Module imported but unused ] -exclude = ["extensions"] + +[tool.ruff.per-file-ignores] +"webui.py" = ["E402"] # Module level import not at top of file \ No newline at end of file diff --git a/scripts/custom_code.py b/scripts/custom_code.py index f36a3675..cc6f0d49 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -4,7 +4,7 @@ import ast import copy from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import cmd_opts def convertExpr2Expression(expr): diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index b10fed6c..665dbe89 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -7,9 +7,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state # this function is taken from https://github.com/parlance-zz/g-diffuser-bot diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index ddcbd2d3..c0bbecc1 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images, devices from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index e9b11517..fb06beab 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -1,14 +1,11 @@ import math -from collections import namedtuple -from copy import copy -import random import modules.scripts as scripts import gradio as gr from modules import images -from modules.processing import process_images, Processed -from modules.shared import opts, cmd_opts, state +from modules.processing import process_images +from modules.shared import opts, state import modules.sd_samplers diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 76dc5778..149bc85f 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -1,6 +1,4 @@ import copy -import math -import os import random import sys import traceback @@ -11,8 +9,7 @@ import gradio as gr from modules import sd_samplers from modules.processing import Processed, process_images -from PIL import Image -from modules.shared import opts, cmd_opts, state +from modules.shared import state def process_string_tag(tag): diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 332d76d9..d873a09c 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image -from modules import processing, shared, sd_samplers, images, devices +from modules import processing, shared, images, devices from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 2ff42ef8..332e0ecd 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -10,15 +10,13 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images, paths, sd_samplers, processing, sd_models, sd_vae +from modules import images, sd_samplers, processing, sd_models, sd_vae from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state import modules.shared as shared import modules.sd_samplers import modules.sd_models import modules.sd_vae -import glob -import os import re from modules.ui_components import ToolButton diff --git a/webui.py b/webui.py index ec3d2aba..48277075 100644 --- a/webui.py +++ b/webui.py @@ -43,7 +43,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states +from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer import modules.face_restoration import modules.gfpgan_model as gfpgan -- cgit v1.2.3 From 028d3f6425d85f122027c127fba8bcbf4f66ee75 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:05:02 +0300 Subject: ruff auto fixes --- extensions-builtin/LDSR/sd_hijack_autoencoder.py | 4 ++-- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 12 ++++++------ extensions-builtin/Lora/lora.py | 12 ++++++------ extensions-builtin/Lora/scripts/lora_script.py | 2 +- modules/config_states.py | 2 +- modules/deepbooru.py | 2 +- modules/devices.py | 2 +- modules/hypernetworks/hypernetwork.py | 2 +- modules/hypernetworks/ui.py | 4 ++-- modules/interrogate.py | 2 +- modules/modelloader.py | 2 +- modules/models/diffusion/ddpm_edit.py | 4 ++-- modules/scripts_auto_postprocessing.py | 2 +- modules/sd_hijack.py | 2 +- modules/sd_hijack_optimizations.py | 14 +++++++------- modules/sd_samplers_compvis.py | 2 +- modules/sd_samplers_kdiffusion.py | 2 +- modules/shared.py | 6 +++--- modules/textual_inversion/textual_inversion.py | 2 +- modules/ui.py | 8 ++++---- modules/ui_extra_networks.py | 4 ++-- modules/ui_tempdir.py | 2 +- 22 files changed, 47 insertions(+), 47 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py index 6303fed5..f457ca93 100644 --- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py @@ -288,5 +288,5 @@ class VQModelInterface(VQModel): dec = self.decoder(quant) return dec -setattr(ldm.models.autoencoder, "VQModel", VQModel) -setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface) +ldm.models.autoencoder.VQModel = VQModel +ldm.models.autoencoder.VQModelInterface = VQModelInterface diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 4d3f6c56..d8fc30e3 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -1116,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] @@ -1215,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, @@ -1437,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1): logs['bbox_image'] = cond_img return logs -setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1) -setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1) -setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1) -setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1) +ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1 +ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1 +ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1 +ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1 diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 0ab43229..9795540f 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -172,7 +172,7 @@ def load_lora(name, filename): else: print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}') continue - assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' + raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}") with torch.no_grad(): module.weight.copy_(weight) @@ -184,7 +184,7 @@ def load_lora(name, filename): elif lora_key == "lora_down.weight": lora_module.down = module else: - assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha' + raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha") if len(keys_failed_to_match) > 0: print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}") @@ -202,7 +202,7 @@ def load_loras(names, multipliers=None): loaded_loras.clear() loras_on_disk = [available_lora_aliases.get(name, None) for name in names] - if any([x is None for x in loras_on_disk]): + if any(x is None for x in loras_on_disk): list_available_loras() loras_on_disk = [available_lora_aliases.get(name, None) for name in names] @@ -309,7 +309,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu print(f'failed to calculate lora weights for layer {lora_layer_name}') - setattr(self, "lora_current_names", wanted_names) + self.lora_current_names = wanted_names def lora_forward(module, input, original_forward): @@ -343,8 +343,8 @@ def lora_forward(module, input, original_forward): def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): - setattr(self, "lora_current_names", ()) - setattr(self, "lora_weights_backup", None) + self.lora_current_names = () + self.lora_weights_backup = None def lora_Linear_forward(self, input): diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 7db971fd..b70e2de7 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { - "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), + "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras), })) diff --git a/modules/config_states.py b/modules/config_states.py index 8f1ff428..75da862a 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -35,7 +35,7 @@ def list_config_states(): j["filepath"] = path config_states.append(j) - config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)) + config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True) for cs in config_states: timestamp = time.asctime(time.gmtime(cs["created_at"])) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 1c4554a2..547e1b4c 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -78,7 +78,7 @@ class DeepDanbooru: res = [] - filtertags = set([x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")]) + filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")} for tag in [x for x in tags if x not in filtertags]: probability = probability_dict[tag] diff --git a/modules/devices.py b/modules/devices.py index c705a3cb..d8a34a0f 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -65,7 +65,7 @@ def enable_tf32(): # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407 - if any([torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())]): + if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())): torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 9fe749b7..6ef0bfdf 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -403,7 +403,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None): k = self.to_k(context_k) v = self.to_v(context_v) - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) sim = einsum('b i d, b j d -> b i j', q, k) * self.scale diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index be168736..e3f9eb13 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork from modules import devices, sd_hijack, shared not_available = ["hardswish", "multiheadattention"] -keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) +keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available] def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) - return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", "" + return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", "" def train_hypernetwork(*args): diff --git a/modules/interrogate.py b/modules/interrogate.py index 22df9216..a1c8e537 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -159,7 +159,7 @@ class InterrogateModels: text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)] top_count = min(top_count, len(text_array)) - text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate) + text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate) text_features = self.clip_model.encode_text(text_tokens).type(self.dtype) text_features /= text_features.norm(dim=-1, keepdim=True) diff --git a/modules/modelloader.py b/modules/modelloader.py index 92ada694..25612bf8 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -39,7 +39,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None if os.path.islink(full_path) and not os.path.exists(full_path): print(f"Skipping broken symlink: {full_path}") continue - if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): + if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist): continue if full_path not in output: output.append(full_path) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 611c2b69..09432117 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -1130,7 +1130,7 @@ class LatentDiffusion(DDPM): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] @@ -1229,7 +1229,7 @@ class LatentDiffusion(DDPM): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, diff --git a/modules/scripts_auto_postprocessing.py b/modules/scripts_auto_postprocessing.py index 30d6d658..d63078de 100644 --- a/modules/scripts_auto_postprocessing.py +++ b/modules/scripts_auto_postprocessing.py @@ -17,7 +17,7 @@ class ScriptPostprocessingForMainUI(scripts.Script): return self.postprocessing_controls.values() def postprocess_image(self, p, script_pp, *args): - args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)} + args_dict = dict(zip(self.postprocessing_controls, args)) pp = scripts_postprocessing.PostprocessedImage(script_pp.image) pp.info = {} diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 81573b78..e374aeb8 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -37,7 +37,7 @@ def apply_optimizations(): optimization_method = None - can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp + can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)): print("Applying xformers cross attention optimization.") diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index b623d53d..a174bbe1 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -49,7 +49,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None): v_in = self.to_v(context_v) del context, context_k, context_v, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in dtype = q.dtype @@ -98,7 +98,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None): del context, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) @@ -229,7 +229,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k = k * self.scale - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) r = einsum_op(q, k, v) r = r.to(dtype) return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h)) @@ -334,7 +334,7 @@ def xformers_attention_forward(self, x, context=None, mask=None): k_in = self.to_k(context_k) v_in = self.to_v(context_v) - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in dtype = q.dtype @@ -460,7 +460,7 @@ def xformers_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) dtype = q.dtype if shared.opts.upcast_attn: q, k = q.float(), k.float() @@ -482,7 +482,7 @@ def sdp_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) dtype = q.dtype if shared.opts.upcast_attn: q, k = q.float(), k.float() @@ -506,7 +506,7 @@ def sub_quad_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) q = q.contiguous() k = k.contiguous() v = v.contiguous() diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index bfcc5574..7427648f 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -83,7 +83,7 @@ class VanillaStableDiffusionSampler: conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step) - assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers' + assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers' cond = tensor # for DDIM, shapes must match, we can't just process cond and uncond independently; diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 3b8e9622..2f733cf5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -86,7 +86,7 @@ class CFGDenoiser(torch.nn.Module): conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" batch_size = len(conds_list) repeats = [len(conds_list[i]) for i in range(batch_size)] diff --git a/modules/shared.py b/modules/shared.py index 7d70f041..e2691585 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { @@ -403,7 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}), - "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}), + "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), @@ -583,7 +583,7 @@ class Options: if item.section not in section_ids: section_ids[item.section] = len(section_ids) - self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])} + self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) def cast_value(self, key, value): """casts an arbitrary to the same type as this setting's value with key diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 9ed9ba45..c37bb2ad 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -167,7 +167,7 @@ class EmbeddingDatabase: if 'string_to_param' in data: param_dict = data['string_to_param'] if hasattr(param_dict, '_parameters'): - param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11 + param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11 assert len(param_dict) == 1, 'embedding file has multiple terms in it' emb = next(iter(param_dict.items()))[1] # diffuser concepts diff --git a/modules/ui.py b/modules/ui.py index 782b569d..84d661b2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1222,7 +1222,7 @@ def create_ui(): ) def get_textual_inversion_template_names(): - return sorted([x for x in textual_inversion.textual_inversion_templates]) + return sorted(textual_inversion.textual_inversion_templates) with gr.Tab(label="Train", id="train"): gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") @@ -1230,8 +1230,8 @@ def create_ui(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()]) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name") + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys())) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name") with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") @@ -1808,7 +1808,7 @@ def create_ui(): if type(x) == gr.Dropdown: def check_dropdown(val): if getattr(x, 'multiselect', False): - return all([value in x.choices for value in val]) + return all(value in x.choices for value in val) else: return val in x.choices diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 800e467a..ab585917 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -26,7 +26,7 @@ def register_page(page): def fetch_file(filename: str = ""): from starlette.responses import FileResponse - if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]): + if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs): raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") ext = os.path.splitext(filename)[1].lower() @@ -326,7 +326,7 @@ def setup_ui(ui, gallery): is_allowed = False for extra_page in ui.stored_extra_pages: - if any([path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()]): + if any(path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()): is_allowed = True break diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 46fa9cb0..cac73c51 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -23,7 +23,7 @@ def register_tmp_file(gradio, filename): def check_tmp_file(gradio, filename): if hasattr(gradio, 'temp_file_sets'): - return any([filename in fileset for fileset in gradio.temp_file_sets]) + return any(filename in fileset for fileset in gradio.temp_file_sets) if hasattr(gradio, 'temp_dirs'): return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) -- cgit v1.2.3 From a5121e7a0623db328a9462d340d389ed6737374a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:37:18 +0300 Subject: fixes for B007 --- extensions-builtin/LDSR/ldsr_model_arch.py | 2 +- extensions-builtin/Lora/lora.py | 2 +- extensions-builtin/ScuNET/scripts/scunet_model.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch_v2.py | 2 +- modules/codeformer_model.py | 2 +- modules/esrgan_model.py | 8 ++------ modules/extra_networks.py | 2 +- modules/generation_parameters_copypaste.py | 2 +- modules/hypernetworks/hypernetwork.py | 12 ++++++------ modules/images.py | 2 +- modules/interrogate.py | 4 ++-- modules/prompt_parser.py | 14 +++++++------- modules/safe.py | 4 ++-- modules/scripts.py | 10 +++++----- modules/scripts_postprocessing.py | 8 ++++---- modules/sd_hijack_clip.py | 2 +- modules/shared.py | 6 +++--- modules/textual_inversion/learn_schedule.py | 2 +- modules/textual_inversion/textual_inversion.py | 10 +++++----- modules/ui.py | 6 +++--- modules/ui_extra_networks.py | 2 +- modules/ui_tempdir.py | 2 +- modules/upscaler.py | 2 +- pyproject.toml | 1 - scripts/prompts_from_file.py | 2 +- scripts/sd_upscale.py | 4 ++-- scripts/xyz_grid.py | 2 +- 28 files changed, 57 insertions(+), 62 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index a5fb8907..27e38549 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -88,7 +88,7 @@ class LDSR: x_t = None logs = None - for n in range(n_runs): + for _ in range(n_runs): if custom_shape is not None: x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device) x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0]) diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 9795540f..7b56136f 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -418,7 +418,7 @@ def infotext_pasted(infotext, params): added = [] - for k, v in params.items(): + for k in params: if not k.startswith("AddNet Model "): continue diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index aa2fdb3a..1f5ea0d3 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -132,7 +132,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) model.load_state_dict(torch.load(filename), strict=True) model.eval() - for k, v in model.named_parameters(): + for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py index 75f7bedc..de195d9b 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch.py +++ b/extensions-builtin/SwinIR/swinir_model_arch.py @@ -848,7 +848,7 @@ class SwinIR(nn.Module): H, W = self.patches_resolution flops += H * W * 3 * self.embed_dim * 9 flops += self.patch_embed.flops() - for i, layer in enumerate(self.layers): + for layer in self.layers: flops += layer.flops() flops += H * W * 3 * self.embed_dim * self.embed_dim flops += self.upsample.flops() diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py index d4c0b0da..15777af9 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py +++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py @@ -1001,7 +1001,7 @@ class Swin2SR(nn.Module): H, W = self.patches_resolution flops += H * W * 3 * self.embed_dim * 9 flops += self.patch_embed.flops() - for i, layer in enumerate(self.layers): + for layer in self.layers: flops += layer.flops() flops += H * W * 3 * self.embed_dim * self.embed_dim flops += self.upsample.flops() diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8e56cb89..ececdbae 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -94,7 +94,7 @@ def setup_model(dirname): self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) self.face_helper.align_warp_face() - for idx, cropped_face in enumerate(self.face_helper.cropped_faces): + for cropped_face in self.face_helper.cropped_faces: cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer) diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 85aa6934..a009eb42 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -16,9 +16,7 @@ def mod2normal(state_dict): # this code is copied from https://github.com/victorca25/iNNfer if 'conv_first.weight' in state_dict: crt_net = {} - items = [] - for k, v in state_dict.items(): - items.append(k) + items = list(state_dict) crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.bias'] = state_dict['conv_first.bias'] @@ -52,9 +50,7 @@ def resrgan2normal(state_dict, nb=23): if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: re8x = 0 crt_net = {} - items = [] - for k, v in state_dict.items(): - items.append(k) + items = list(state_dict) crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.bias'] = state_dict['conv_first.bias'] diff --git a/modules/extra_networks.py b/modules/extra_networks.py index 1978673d..f9db41bc 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -91,7 +91,7 @@ def deactivate(p, extra_network_data): """call deactivate for extra networks in extra_network_data in specified order, then call deactivate for all remaining registered networks""" - for extra_network_name, extra_network_args in extra_network_data.items(): + for extra_network_name in extra_network_data: extra_network = extra_network_registry.get(extra_network_name, None) if extra_network is None: continue diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 7fbbe707..b0e945a1 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -247,7 +247,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model lines.append(lastline) lastline = '' - for i, line in enumerate(lines): + for line in lines: line = line.strip() if line.startswith("Negative prompt:"): done_with_prompt = True diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 6ef0bfdf..38ef074f 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -177,34 +177,34 @@ class Hypernetwork: def weights(self): res = [] - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: res += layer.parameters() return res def train(self, mode=True): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.train(mode=mode) for param in layer.parameters(): param.requires_grad = mode def to(self, device): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.to(device) return self def set_multiplier(self, multiplier): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.multiplier = multiplier return self def eval(self): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.eval() for param in layer.parameters(): @@ -619,7 +619,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi try: sd_hijack_checkpoint.add() - for i in range((steps-initial_step) * gradient_step): + for _ in range((steps-initial_step) * gradient_step): if scheduler.finished: break if shared.state.interrupted: diff --git a/modules/images.py b/modules/images.py index 7392cb8b..c4e98c75 100644 --- a/modules/images.py +++ b/modules/images.py @@ -149,7 +149,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0): return ImageFont.truetype(Roboto, fontsize) def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize): - for i, line in enumerate(lines): + for line in lines: fnt = initial_fnt fontsize = initial_fontsize while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0: diff --git a/modules/interrogate.py b/modules/interrogate.py index a1c8e537..111b1322 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -207,8 +207,8 @@ class InterrogateModels: image_features /= image_features.norm(dim=-1, keepdim=True) - for name, topn, items in self.categories(): - matches = self.rank(image_features, items, top_count=topn) + for cat in self.categories(): + matches = self.rank(image_features, cat.items, top_count=cat.topn) for match, score in matches: if shared.opts.interrogate_return_ranks: res += f", ({match}:{score/100:.3f})" diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 3a720721..b4aff704 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -143,7 +143,7 @@ def get_learned_conditioning(model, prompts, steps): conds = model.get_learned_conditioning(texts) cond_schedule = [] - for i, (end_at_step, text) in enumerate(prompt_schedule): + for i, (end_at_step, _) in enumerate(prompt_schedule): cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i])) cache[prompt] = cond_schedule @@ -219,8 +219,8 @@ def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_s res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) for i, cond_schedule in enumerate(c): target_index = 0 - for current, (end_at, cond) in enumerate(cond_schedule): - if current_step <= end_at: + for current, entry in enumerate(cond_schedule): + if current_step <= entry.end_at_step: target_index = current break res[i] = cond_schedule[target_index].cond @@ -234,13 +234,13 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step): tensors = [] conds_list = [] - for batch_no, composable_prompts in enumerate(c.batch): + for composable_prompts in c.batch: conds_for_batch = [] - for cond_index, composable_prompt in enumerate(composable_prompts): + for composable_prompt in composable_prompts: target_index = 0 - for current, (end_at, cond) in enumerate(composable_prompt.schedules): - if current_step <= end_at: + for current, entry in enumerate(composable_prompt.schedules): + if current_step <= entry.end_at_step: target_index = current break diff --git a/modules/safe.py b/modules/safe.py index 2d5b972f..1e791c5b 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -95,11 +95,11 @@ def check_pt(filename, extra_handler): except zipfile.BadZipfile: - # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle + # if it's not a zip file, it's an old pytorch format, with five objects written to pickle with open(filename, "rb") as file: unpickler = RestrictedUnpickler(file) unpickler.extra_handler = extra_handler - for i in range(5): + for _ in range(5): unpickler.load() diff --git a/modules/scripts.py b/modules/scripts.py index d945b89f..0c12ebd5 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -231,7 +231,7 @@ def load_scripts(): syspath = sys.path def register_scripts_from_module(module): - for key, script_class in module.__dict__.items(): + for script_class in module.__dict__.values(): if type(script_class) != type: continue @@ -295,9 +295,9 @@ class ScriptRunner: auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data() - for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data: - script = script_class() - script.filename = path + for script_data in auto_processing_scripts + scripts_data: + script = script_data.script_class() + script.filename = script_data.path script.is_txt2img = not is_img2img script.is_img2img = is_img2img @@ -492,7 +492,7 @@ class ScriptRunner: module = script_loading.load_module(script.filename) cache[filename] = module - for key, script_class in module.__dict__.items(): + for script_class in module.__dict__.values(): if type(script_class) == type and issubclass(script_class, Script): self.scripts[si] = script_class() self.scripts[si].filename = filename diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py index b11568c0..6751406c 100644 --- a/modules/scripts_postprocessing.py +++ b/modules/scripts_postprocessing.py @@ -66,9 +66,9 @@ class ScriptPostprocessingRunner: def initialize_scripts(self, scripts_data): self.scripts = [] - for script_class, path, basedir, script_module in scripts_data: - script: ScriptPostprocessing = script_class() - script.filename = path + for script_data in scripts_data: + script: ScriptPostprocessing = script_data.script_class() + script.filename = script_data.path if script.name == "Simple Upscale": continue @@ -124,7 +124,7 @@ class ScriptPostprocessingRunner: script_args = args[script.args_from:script.args_to] process_args = {} - for (name, component), value in zip(script.controls.items(), script_args): + for (name, component), value in zip(script.controls.items(), script_args): # noqa B007 process_args[name] = value script.process(pp, **process_args) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 9fa5c5c5..c0c350f6 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): self.hijack.fixes = [x.fixes for x in batch_chunk] for fixes in self.hijack.fixes: - for position, embedding in fixes: + for position, embedding in fixes: # noqa: B007 used_embeddings[embedding.name] = embedding z = self.process_tokens(tokens, multipliers) diff --git a/modules/shared.py b/modules/shared.py index e2691585..913c9e63 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -211,7 +211,7 @@ class OptionInfo: def options_section(section_identifier, options_dict): - for k, v in options_dict.items(): + for v in options_dict.values(): v.section = section_identifier return options_dict @@ -579,7 +579,7 @@ class Options: section_ids = {} settings_items = self.data_labels.items() - for k, item in settings_items: + for _, item in settings_items: if item.section not in section_ids: section_ids[item.section] = len(section_ids) @@ -740,7 +740,7 @@ def walk_files(path, allowed_extensions=None): if allowed_extensions is not None: allowed_extensions = set(allowed_extensions) - for root, dirs, files in os.walk(path): + for root, _, files in os.walk(path): for filename in files: if allowed_extensions is not None: _, ext = os.path.splitext(filename) diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index fda58898..c56bea45 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -12,7 +12,7 @@ class LearnScheduleIterator: self.it = 0 self.maxit = 0 try: - for i, pair in enumerate(pairs): + for pair in pairs: if not pair.strip(): continue tmp = pair.split(':') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c37bb2ad..47035332 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -29,7 +29,7 @@ textual_inversion_templates = {} def list_textual_inversion_templates(): textual_inversion_templates.clear() - for root, dirs, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir): + for root, _, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir): for fn in fns: path = os.path.join(root, fn) @@ -198,7 +198,7 @@ class EmbeddingDatabase: if not os.path.isdir(embdir.path): return - for root, dirs, fns in os.walk(embdir.path, followlinks=True): + for root, _, fns in os.walk(embdir.path, followlinks=True): for fn in fns: try: fullfn = os.path.join(root, fn) @@ -215,7 +215,7 @@ class EmbeddingDatabase: def load_textual_inversion_embeddings(self, force_reload=False): if not force_reload: need_reload = False - for path, embdir in self.embedding_dirs.items(): + for embdir in self.embedding_dirs.values(): if embdir.has_changed(): need_reload = True break @@ -228,7 +228,7 @@ class EmbeddingDatabase: self.skipped_embeddings.clear() self.expected_shape = self.get_expected_shape() - for path, embdir in self.embedding_dirs.items(): + for embdir in self.embedding_dirs.values(): self.load_from_dir(embdir) embdir.update() @@ -469,7 +469,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st try: sd_hijack_checkpoint.add() - for i in range((steps-initial_step) * gradient_step): + for _ in range((steps-initial_step) * gradient_step): if scheduler.finished: break if shared.state.interrupted: diff --git a/modules/ui.py b/modules/ui.py index 84d661b2..83bfb7d8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -416,7 +416,7 @@ def create_sampler_and_steps_selection(choices, tabname): def ordered_ui_categories(): user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))} - for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): + for _, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): yield category @@ -1646,7 +1646,7 @@ def create_ui(): with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings", variant="compact"): - for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): + for _i, k, _item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) component_dict[k] = component @@ -1673,7 +1673,7 @@ def create_ui(): outputs=[text_settings, result], ) - for i, k, item in quicksettings_list: + for _i, k, _item in quicksettings_list: component = component_dict[k] info = opts.data_labels[k] diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index ab585917..2fd82e8e 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -90,7 +90,7 @@ class ExtraNetworksPage: subdirs = {} for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: - for root, dirs, files in os.walk(parentdir): + for root, dirs, _ in os.walk(parentdir): for dirname in dirs: x = os.path.join(root, dirname) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index cac73c51..f05049e1 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -72,7 +72,7 @@ def cleanup_tmpdr(): if temp_dir == "" or not os.path.isdir(temp_dir): return - for root, dirs, files in os.walk(temp_dir, topdown=False): + for root, _, files in os.walk(temp_dir, topdown=False): for name in files: _, extension = os.path.splitext(name) if extension != ".png": diff --git a/modules/upscaler.py b/modules/upscaler.py index e145be30..8acb6e96 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -55,7 +55,7 @@ class Upscaler: dest_w = int(img.width * scale) dest_h = int(img.height * scale) - for i in range(3): + for _ in range(3): shape = (img.width, img.height) img = self.do_upscale(img, selected_model) diff --git a/pyproject.toml b/pyproject.toml index 346a0cde..c88907be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,6 @@ ignore = [ "I001", # Import block is un-sorted or un-formatted "C901", # Function is too complex "C408", # Rewrite as a literal - "B007", # Loop control variable not used within loop body ] diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 149bc85f..27af5ff6 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -156,7 +156,7 @@ class Script(scripts.Script): images = [] all_prompts = [] infotexts = [] - for n, args in enumerate(jobs): + for args in jobs: state.job = f"{state.job_no + 1} out of {state.job_count}" copy_p = copy.copy(p) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index d873a09c..0b1d3096 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -56,7 +56,7 @@ class Script(scripts.Script): work = [] - for y, h, row in grid.tiles: + for _y, _h, row in grid.tiles: for tiledata in row: work.append(tiledata[2]) @@ -85,7 +85,7 @@ class Script(scripts.Script): work_results += processed.images image_index = 0 - for y, h, row in grid.tiles: + for _y, _h, row in grid.tiles: for tiledata in row: tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height)) image_index += 1 diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 332e0ecd..38a20381 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -704,7 +704,7 @@ class Script(scripts.Script): if not include_sub_grids: # Done with sub-grids, drop all related information: - for sg in range(z_count): + for _ in range(z_count): del processed.images[1] del processed.all_prompts[1] del processed.all_seeds[1] -- cgit v1.2.3 From cb3f8ff59fe8f142c3ca074b8cbaaf83357f9dc1 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 11 May 2023 15:55:43 +0000 Subject: Fix symlink scanning --- modules/shared.py | 2 +- modules/ui_extra_networks.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/modules/shared.py b/modules/shared.py index f387b5ae..210424ac 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -741,7 +741,7 @@ def walk_files(path, allowed_extensions=None): if allowed_extensions is not None: allowed_extensions = set(allowed_extensions) - for root, _, files in os.walk(path): + for root, _, files in os.walk(path, followlinks=True): for filename in files: if allowed_extensions is not None: _, ext = os.path.splitext(filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 2fd82e8e..e35d0bfe 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -90,7 +90,7 @@ class ExtraNetworksPage: subdirs = {} for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: - for root, dirs, _ in os.walk(parentdir): + for root, dirs, _ in os.walk(parentdir, followlinks=True): for dirname in dirs: x = os.path.join(root, dirname) -- cgit v1.2.3 From a423f23d289225a39d6f93a03bfda13eddbb42b7 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 14 May 2023 16:22:40 +0900 Subject: allow jpeg for extra network preview --- modules/ui_extra_networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index e35d0bfe..0baccf56 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -30,7 +30,7 @@ def fetch_file(filename: str = ""): raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") ext = os.path.splitext(filename)[1].lower() - if ext not in (".png", ".jpg", ".webp"): + if ext not in (".png", ".jpg", ".jpeg", ".webp"): raise ValueError(f"File cannot be fetched: {filename}. Only png and jpg and webp.") # would profit from returning 304 @@ -194,7 +194,7 @@ class ExtraNetworksPage: Find a preview PNG for a given path (without extension) and call link_preview on it. """ - preview_extensions = ["png", "jpg", "webp"] + preview_extensions = ["png", "jpg", "jpeg", "webp"] if shared.opts.samples_format not in preview_extensions: preview_extensions.append(shared.opts.samples_format) -- cgit v1.2.3 From f517838c75014f981ae1c41f1bc776d74daf9a23 Mon Sep 17 00:00:00 2001 From: Keith <1868690+wk5ovc@users.noreply.github.com> Date: Mon, 15 May 2023 10:47:01 +0800 Subject: Fix extra networks save preview image geninfo --- modules/ui_extra_networks.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 8c3dea56..9e6e0531 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -334,9 +334,19 @@ def setup_ui(ui, gallery): assert is_allowed, f'writing to {filename} is not allowed' if geninfo: - pnginfo_data = PngImagePlugin.PngInfo() - pnginfo_data.add_text('parameters', geninfo) - image.save(filename, pnginfo=pnginfo_data) + ext = os.path.splitext(filename)[1].lower() + if ext == '.png': + pnginfo_data = PngImagePlugin.PngInfo() + pnginfo_data.add_text('parameters', geninfo) + image.save(filename, pnginfo=pnginfo_data) + elif ext in ('.jpg', '.jpeg', '.webp'): + exif_bytes = piexif.dump({ + 'Exif': {piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or '', + encoding='unicode')} + }) + image.save(filename, exif=exif_bytes, quality=shared.opts.jpeg_quality) + else: + image.save(filename) else: image.save(filename) -- cgit v1.2.3 From 0d3a80e2692fb72da9798367b882f45312202f2e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 15 May 2023 20:33:44 +0300 Subject: Show "Loading..." for extra networks when displaying for the first time --- modules/ui_extra_networks.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 0baccf56..752cf2b8 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -268,7 +268,7 @@ def create_ui(container, button, tabname): with gr.Tab(page.title, id=page_id): elem_id = f"{tabname}_{page_id}_cards_html" - page_elem = gr.HTML('', elem_id=elem_id) + page_elem = gr.HTML('Loading...', elem_id=elem_id) ui.pages.append(page_elem) page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + json.dumps(tabname) + '); return []}', inputs=[], outputs=[]) @@ -282,13 +282,24 @@ def create_ui(container, button, tabname): def toggle_visibility(is_visible): is_visible = not is_visible - if is_visible and not ui.pages_contents: + return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")) + + def fill_tabs(is_empty): + """Creates HTML for extra networks' tabs when the extra networks button is clicked for the first time.""" + + if not ui.pages_contents: refresh() - return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")), *ui.pages_contents + if is_empty: + return True, *ui.pages_contents + + return True, *[gr.update() for _ in ui.pages_contents] state_visible = gr.State(value=False) - button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button, *ui.pages]) + button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button], show_progress=False) + + state_empty = gr.State(value=True) + button.click(fn=fill_tabs, inputs=[state_empty], outputs=[state_empty, *ui.pages], show_progress=False) def refresh(): for pg in ui.stored_extra_pages: -- cgit v1.2.3 From f5092164e8e452debc889475dbaa163e4f877fda Mon Sep 17 00:00:00 2001 From: Iheuzio <97270760+Iheuzio@users.noreply.github.com> Date: Wed, 17 May 2023 12:51:54 -0400 Subject: Fix typo in syntax --- modules/ui_extra_networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 8c3dea56..4f754f43 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -172,7 +172,7 @@ class ExtraNetworksPage: # if this is true, the item must not be show in the default view, and must instead only be # shown when searching for it - serach_only = "/." in local_path or "\\." in local_path + search_only = "/." in local_path or "\\." in local_path args = { "style": f"'display: none; {height}{width}{background_image}'", @@ -185,7 +185,7 @@ class ExtraNetworksPage: "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"', "search_term": item.get("search_term", ""), "metadata_button": metadata_button, - "serach_only": " search_only" if serach_only else "", + "search_only": " search_only" if search_only else "", } return self.card_page.format(**args) -- cgit v1.2.3 From a6b618d07248267de36f0e8f4a847d997285e272 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 21:03:41 +0300 Subject: use a single function for saving images with metadata both in extra networks and main mode for #10395 --- modules/images.py | 70 ++++++++++++++++++++++++++------------------ modules/ui_extra_networks.py | 19 ++---------- 2 files changed, 43 insertions(+), 46 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/modules/images.py b/modules/images.py index b2de3662..4e8cd993 100644 --- a/modules/images.py +++ b/modules/images.py @@ -482,6 +482,43 @@ def get_next_sequence_number(path, basename): return result + 1 +def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_pnginfo=None): + if extension is None: + extension = os.path.splitext(filename)[1] + + image_format = Image.registered_extensions()[extension] + + existing_pnginfo = existing_pnginfo or {} + if opts.enable_pnginfo: + existing_pnginfo['parameters'] = geninfo + + if extension.lower() == '.png': + pnginfo_data = PngImagePlugin.PngInfo() + for k, v in (existing_pnginfo or {}).items(): + pnginfo_data.add_text(k, str(v)) + + image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data) + + elif extension.lower() in (".jpg", ".jpeg", ".webp"): + if image.mode == 'RGBA': + image = image.convert("RGB") + elif image.mode == 'I;16': + image = image.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L") + + image.save(filename, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless) + + if opts.enable_pnginfo and geninfo is not None: + exif_bytes = piexif.dump({ + "Exif": { + piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or "", encoding="unicode") + }, + }) + + piexif.insert(exif_bytes, filename) + else: + image.save(filename, format=image_format, quality=opts.jpeg_quality) + + def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None): """Save an image. @@ -566,38 +603,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i info = params.pnginfo.get(pnginfo_section_name, None) def _atomically_save_image(image_to_save, filename_without_extension, extension): - # save image with .tmp extension to avoid race condition when another process detects new image in the directory + """ + save image with .tmp extension to avoid race condition when another process detects new image in the directory + """ temp_file_path = f"{filename_without_extension}.tmp" - image_format = Image.registered_extensions()[extension] - - if extension.lower() == '.png': - pnginfo_data = PngImagePlugin.PngInfo() - if opts.enable_pnginfo: - for k, v in params.pnginfo.items(): - pnginfo_data.add_text(k, str(v)) - - image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data) - elif extension.lower() in (".jpg", ".jpeg", ".webp"): - if image_to_save.mode == 'RGBA': - image_to_save = image_to_save.convert("RGB") - elif image_to_save.mode == 'I;16': - image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L") - - image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless) - - if opts.enable_pnginfo and info is not None: - exif_bytes = piexif.dump({ - "Exif": { - piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode") - }, - }) - - piexif.insert(exif_bytes, temp_file_path) - else: - image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) + save_image_with_geninfo(image_to_save, info, temp_file_path, extension, params.pnginfo) - # atomically rename the file with correct extension os.replace(temp_file_path, filename_without_extension + extension) fullfn_without_extension, extension = os.path.splitext(params.filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 471df23b..c6e45fb1 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -4,7 +4,7 @@ from pathlib import Path from PIL import PngImagePlugin from modules import shared -from modules.images import read_info_from_image +from modules.images import read_info_from_image, save_image_with_geninfo import gradio as gr import json import html @@ -343,22 +343,7 @@ def setup_ui(ui, gallery): assert is_allowed, f'writing to {filename} is not allowed' - if geninfo: - ext = os.path.splitext(filename)[1].lower() - if ext == '.png': - pnginfo_data = PngImagePlugin.PngInfo() - pnginfo_data.add_text('parameters', geninfo) - image.save(filename, pnginfo=pnginfo_data) - elif ext in ('.jpg', '.jpeg', '.webp'): - exif_bytes = piexif.dump({ - 'Exif': {piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or '', - encoding='unicode')} - }) - image.save(filename, exif=exif_bytes, quality=shared.opts.jpeg_quality) - else: - image.save(filename) - else: - image.save(filename) + save_image_with_geninfo(image, geninfo, filename) return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] -- cgit v1.2.3 From 8fe9ea7f4d8fd76038db61e1fd2b1cc5927ce108 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 21:45:26 +0300 Subject: add options to show/hide hidden files and dirs, and to not list models/files in hidden directories --- modules/shared.py | 6 ++++++ modules/ui_extra_networks.py | 17 +++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) (limited to 'modules/ui_extra_networks.py') diff --git a/modules/shared.py b/modules/shared.py index 76af8b9c..23563582 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -378,6 +378,7 @@ options_templates.update(options_section(('system', "System"), { "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), + "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), })) options_templates.update(options_section(('training', "Training"), { @@ -446,6 +447,8 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), })) options_templates.update(options_section(('extra_networks', "Extra Networks"), { + "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), + "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, {"choices": ["cards", "thumbs"]}), "extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), @@ -825,4 +828,7 @@ def walk_files(path, allowed_extensions=None): if ext not in allowed_extensions: continue + if not opts.list_hidden_files and ("/." in root or "\\." in root): + continue + yield os.path.join(root, filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index c6e45fb1..8669cc1a 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -105,6 +105,9 @@ class ExtraNetworksPage: if not is_empty and not subdir.endswith("/"): subdir = subdir + "/" + if ("/." in subdir or subdir.startswith(".")) and not shared.opts.extra_networks_show_hidden_directories: + continue + subdirs[subdir] = 1 if subdirs: @@ -147,6 +150,10 @@ class ExtraNetworksPage: return [] def create_html_for_item(self, item, tabname): + """ + Create HTML for card item in tab tabname; can return empty string if the item is not meant to be shown. + """ + preview = item.get("preview", None) onclick = item.get("onclick", None) @@ -169,9 +176,15 @@ class ExtraNetworksPage: if filename.startswith(absdir): local_path = filename[len(absdir):] - # if this is true, the item must not be show in the default view, and must instead only be + # if this is true, the item must not be shown in the default view, and must instead only be # shown when searching for it - search_only = "/." in local_path or "\\." in local_path + if shared.opts.extra_networks_hidden_models == "Always": + search_only = False + else: + search_only = "/." in local_path or "\\." in local_path + + if search_only and shared.opts.extra_networks_hidden_models == "Never": + return "" args = { "style": f"'display: none; {height}{width}{background_image}'", -- cgit v1.2.3