From 3ba6c3c83c0983a025c7bddc08bb7f49481b3cbb Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 9 May 2023 22:17:58 +0300 Subject: Fix up string formatting/concatenation to f-strings where feasible --- modules/textual_inversion/autocrop.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'modules/textual_inversion/autocrop.py') diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 68e1103c..ba1bdcd4 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -111,7 +111,7 @@ def focal_point(im, settings): if corner_centroid is not None: color = BLUE box = corner_centroid.bounding(max_size * corner_centroid.weight) - d.text((box[0], box[1]-15), "Edge: %.02f" % corner_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(corner_points) > 1: for f in corner_points: @@ -119,7 +119,7 @@ def focal_point(im, settings): if entropy_centroid is not None: color = "#ff0" box = entropy_centroid.bounding(max_size * entropy_centroid.weight) - d.text((box[0], box[1]-15), "Entropy: %.02f" % entropy_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(entropy_points) > 1: for f in entropy_points: @@ -127,7 +127,7 @@ def focal_point(im, settings): if face_centroid is not None: color = RED box = face_centroid.bounding(max_size * face_centroid.weight) - d.text((box[0], box[1]-15), "Face: %.02f" % face_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(face_points) > 1: for f in face_points: -- cgit v1.2.3 From 96d6ca4199e7c5eee8d451618de5161cea317c40 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:25:25 +0300 Subject: manual fixes for ruff --- extensions-builtin/LDSR/ldsr_model_arch.py | 2 +- extensions-builtin/LDSR/scripts/ldsr_model.py | 3 +- extensions-builtin/LDSR/sd_hijack_autoencoder.py | 10 +- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 26 ++--- extensions-builtin/ScuNET/scunet_model_arch.py | 9 +- extensions-builtin/SwinIR/scripts/swinir_model.py | 2 +- modules/api/api.py | 129 +++++++++++----------- modules/api/models.py | 5 +- modules/codeformer/codeformer_arch.py | 2 +- modules/esrgan_model_arch.py | 2 + modules/extra_networks_hypernet.py | 2 +- modules/images.py | 4 +- modules/img2img.py | 1 - modules/interrogate.py | 1 - modules/modelloader.py | 6 +- modules/models/diffusion/ddpm_edit.py | 26 ++--- modules/models/diffusion/uni_pc/sampler.py | 3 +- modules/processing.py | 2 +- modules/prompt_parser.py | 11 +- modules/textual_inversion/autocrop.py | 2 +- modules/ui.py | 8 +- modules/upscaler.py | 2 +- 22 files changed, 129 insertions(+), 129 deletions(-) (limited to 'modules/textual_inversion/autocrop.py') diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index 2339de7f..a5fb8907 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -243,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True) log["sample_noquant"] = x_sample_noquant log["sample_diff"] = torch.abs(x_sample_noquant - x_sample) - except: + except Exception: pass log["sample"] = x_sample diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index da19cff1..e8dc083c 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -7,7 +7,8 @@ from basicsr.utils.download_util import load_file_from_url from modules.upscaler import Upscaler, UpscalerData from ldsr_model_arch import LDSR from modules import shared, script_callbacks -import sd_hijack_autoencoder, sd_hijack_ddpm_v1 +import sd_hijack_autoencoder +import sd_hijack_ddpm_v1 class UpscalerLDSR(Upscaler): diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py index db2231dd..6303fed5 100644 --- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py @@ -1,16 +1,21 @@ # The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo # The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo # As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder - +import numpy as np import torch import pytorch_lightning as pl import torch.nn.functional as F from contextlib import contextmanager + +from torch.optim.lr_scheduler import LambdaLR + +from ldm.modules.ema import LitEma from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from ldm.modules.diffusionmodules.model import Encoder, Decoder from ldm.util import instantiate_from_config import ldm.models.autoencoder +from packaging import version class VQModel(pl.LightningModule): def __init__(self, @@ -249,7 +254,8 @@ class VQModel(pl.LightningModule): if plot_ema: with self.ema_scope(): xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) + if x.shape[1] > 3: + xrec_ema = self.to_rgb(xrec_ema) log["reconstructions_ema"] = xrec_ema return log diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 5c0488e5..4d3f6c56 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -450,7 +450,7 @@ class LatentDiffusionV1(DDPMV1): self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: + except Exception: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor @@ -877,16 +877,6 @@ class LatentDiffusionV1(DDPMV1): c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): @@ -1157,8 +1147,10 @@ class LatentDiffusionV1(DDPMV1): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) return img, intermediates @torch.no_grad() @@ -1205,8 +1197,10 @@ class LatentDiffusionV1(DDPMV1): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) if return_intermediates: return img, intermediates @@ -1322,7 +1316,7 @@ class LatentDiffusionV1(DDPMV1): if inpaint: # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] + h, w = z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py index 43ca8d36..8028918a 100644 --- a/extensions-builtin/ScuNET/scunet_model_arch.py +++ b/extensions-builtin/ScuNET/scunet_model_arch.py @@ -61,7 +61,9 @@ class WMSA(nn.Module): Returns: output: tensor shape [b h w c] """ - if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) + if self.type != 'W': + x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) + x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size) h_windows = x.size(1) w_windows = x.size(2) @@ -85,8 +87,9 @@ class WMSA(nn.Module): output = self.linear(output) output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size) - if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), - dims=(1, 2)) + if self.type != 'W': + output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2)) + return output def relative_embedding(self): diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index e8783bca..d77c3a92 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -45,7 +45,7 @@ class UpscalerSwinIR(Upscaler): img = upscale(img, model) try: torch.cuda.empty_cache() - except: + except Exception: pass return img diff --git a/modules/api/api.py b/modules/api/api.py index d47c39fc..f52d371b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -15,7 +15,8 @@ from secrets import compare_digest import modules.shared as shared from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing -from modules.api.models import * +from modules.api import models +from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.textual_inversion.preprocess import preprocess @@ -25,20 +26,21 @@ from modules.sd_models import checkpoints_list, unload_model_weights, reload_mod from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices -from typing import List +from typing import Dict, List, Any import piexif import piexif.helper + def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) - except: - raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}") + except Exception: + raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") def script_name_to_index(name, scripts): try: return [script.title().lower() for script in scripts].index(name.lower()) - except: + except Exception: raise HTTPException(status_code=422, detail=f"Script '{name}' not found") def validate_sampler_name(name): @@ -99,7 +101,7 @@ def api_middleware(app: FastAPI): import starlette # importing just so it can be placed on silent list from rich.console import Console console = Console() - except: + except Exception: import traceback rich_available = False @@ -166,36 +168,36 @@ class Api: self.app = app self.queue_lock = queue_lock api_middleware(self.app) - self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) - self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) - self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) - self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) - self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) - self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) + self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse) + self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse) + self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse) + self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse) + self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse) + self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse) self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"]) self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"]) - self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel) + self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel) self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) - self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel) - self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem]) - self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem]) - self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem]) - self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem]) - self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem]) - self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem]) - self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) - self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse) + self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel) + self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem]) + self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem]) + self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem]) + self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem]) + self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem]) + self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem]) + self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem]) + self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) - self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse) - self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse) - self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse) - self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) - self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) - self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse) + self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse) + self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse) + self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse) + self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse) + self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse) + self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse) self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) - self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList) + self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] @@ -224,7 +226,7 @@ class Api: t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles] i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles] - return ScriptsList(txt2img = t2ilist, img2img = i2ilist) + return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist) def get_script(self, script_name, script_runner): if script_name is None or script_name == "": @@ -276,7 +278,7 @@ class Api: script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] return script_args - def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): + def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI): script_runner = scripts.scripts_txt2img if not script_runner.scripts: script_runner.initialize_scripts(False) @@ -320,9 +322,9 @@ class Api: b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] - return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) + return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) - def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): + def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI): init_images = img2imgreq.init_images if init_images is None: raise HTTPException(status_code=404, detail="Init image not found") @@ -381,9 +383,9 @@ class Api: img2imgreq.init_images = None img2imgreq.mask = None - return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) + return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) - def extras_single_image_api(self, req: ExtrasSingleImageRequest): + def extras_single_image_api(self, req: models.ExtrasSingleImageRequest): reqDict = setUpscalers(req) reqDict['image'] = decode_base64_to_image(reqDict['image']) @@ -391,9 +393,9 @@ class Api: with self.queue_lock: result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict) - return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) + return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) - def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): + def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest): reqDict = setUpscalers(req) image_list = reqDict.pop('imageList', []) @@ -402,15 +404,15 @@ class Api: with self.queue_lock: result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict) - return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) + return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) - def pnginfoapi(self, req: PNGInfoRequest): + def pnginfoapi(self, req: models.PNGInfoRequest): if(not req.image.strip()): - return PNGInfoResponse(info="") + return models.PNGInfoResponse(info="") image = decode_base64_to_image(req.image.strip()) if image is None: - return PNGInfoResponse(info="") + return models.PNGInfoResponse(info="") geninfo, items = images.read_info_from_image(image) if geninfo is None: @@ -418,13 +420,13 @@ class Api: items = {**{'parameters': geninfo}, **items} - return PNGInfoResponse(info=geninfo, items=items) + return models.PNGInfoResponse(info=geninfo, items=items) - def progressapi(self, req: ProgressRequest = Depends()): + def progressapi(self, req: models.ProgressRequest = Depends()): # copy from check_progress_call of ui.py if shared.state.job_count == 0: - return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) + return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) # avoid dividing zero progress = 0.01 @@ -446,9 +448,9 @@ class Api: if shared.state.current_image and not req.skip_current_image: current_image = encode_pil_to_base64(shared.state.current_image) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) + return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) - def interrogateapi(self, interrogatereq: InterrogateRequest): + def interrogateapi(self, interrogatereq: models.InterrogateRequest): image_b64 = interrogatereq.image if image_b64 is None: raise HTTPException(status_code=404, detail="Image not found") @@ -465,7 +467,7 @@ class Api: else: raise HTTPException(status_code=404, detail="Model not found") - return InterrogateResponse(caption=processed) + return models.InterrogateResponse(caption=processed) def interruptapi(self): shared.state.interrupt() @@ -570,36 +572,36 @@ class Api: filename = create_embedding(**args) # create empty embedding sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used shared.state.end() - return CreateResponse(info=f"create embedding filename: {filename}") + return models.CreateResponse(info=f"create embedding filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info=f"create embedding error: {e}") + return models.TrainResponse(info=f"create embedding error: {e}") def create_hypernetwork(self, args: dict): try: shared.state.begin() filename = create_hypernetwork(**args) # create empty embedding shared.state.end() - return CreateResponse(info=f"create hypernetwork filename: {filename}") + return models.CreateResponse(info=f"create hypernetwork filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info=f"create hypernetwork error: {e}") + return models.TrainResponse(info=f"create hypernetwork error: {e}") def preprocess(self, args: dict): try: shared.state.begin() preprocess(**args) # quick operation unless blip/booru interrogation is enabled shared.state.end() - return PreprocessResponse(info = 'preprocess complete') + return models.PreprocessResponse(info = 'preprocess complete') except KeyError as e: shared.state.end() - return PreprocessResponse(info=f"preprocess error: invalid token: {e}") + return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}") except AssertionError as e: shared.state.end() - return PreprocessResponse(info=f"preprocess error: {e}") + return models.PreprocessResponse(info=f"preprocess error: {e}") except FileNotFoundError as e: shared.state.end() - return PreprocessResponse(info=f'preprocess error: {e}') + return models.PreprocessResponse(info=f'preprocess error: {e}') def train_embedding(self, args: dict): try: @@ -617,10 +619,10 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") + return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError as msg: shared.state.end() - return TrainResponse(info=f"train embedding error: {msg}") + return models.TrainResponse(info=f"train embedding error: {msg}") def train_hypernetwork(self, args: dict): try: @@ -641,14 +643,15 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") + return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError: shared.state.end() - return TrainResponse(info=f"train embedding error: {error}") + return models.TrainResponse(info=f"train embedding error: {error}") def get_memory(self): try: - import os, psutil + import os + import psutil process = psutil.Process(os.getpid()) res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe @@ -675,10 +678,10 @@ class Api: 'events': warnings, } else: - cuda = { 'error': 'unavailable' } + cuda = {'error': 'unavailable'} except Exception as err: - cuda = { 'error': f'{err}' } - return MemoryResponse(ram = ram, cuda = cuda) + cuda = {'error': f'{err}'} + return models.MemoryResponse(ram=ram, cuda=cuda) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 4a70f440..4d291076 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -223,8 +223,9 @@ for key in _options: if(_options[key].dest != 'help'): flag = _options[key] _type = str - if _options[key].default is not None: _type = type(_options[key].default) - flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))}) + if _options[key].default is not None: + _type = type(_options[key].default) + flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))}) FlagsModel = create_model("Flags", **flags) diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index 11dcc3ee..f1a7cf09 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -7,7 +7,7 @@ from torch import nn, Tensor import torch.nn.functional as F from typing import Optional, List -from modules.codeformer.vqgan_arch import * +from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 6071fea7..7f8bc7c0 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -438,9 +438,11 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias= padding = padding if pad_type == 'zero' else 0 if convtype=='PartialConv2D': + from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) elif convtype=='DeformConv2D': + from torchvision.ops import DeformConv2d # not tested c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) elif convtype=='Conv3D': diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py index 04f27c9f..aa2a14ef 100644 --- a/modules/extra_networks_hypernet.py +++ b/modules/extra_networks_hypernet.py @@ -1,4 +1,4 @@ -from modules import extra_networks, shared, extra_networks +from modules import extra_networks, shared from modules.hypernetworks import hypernetwork diff --git a/modules/images.py b/modules/images.py index 3d5d76cc..5eb6d855 100644 --- a/modules/images.py +++ b/modules/images.py @@ -472,9 +472,9 @@ def get_next_sequence_number(path, basename): prefix_length = len(basename) for p in os.listdir(path): if p.startswith(basename): - l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element) + parts = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element) try: - result = max(int(l[0]), result) + result = max(int(parts[0]), result) except ValueError: pass diff --git a/modules/img2img.py b/modules/img2img.py index cdae301a..32b1ecd6 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -13,7 +13,6 @@ from modules.shared import opts, state import modules.shared as shared import modules.processing as processing from modules.ui import plaintext_to_html -import modules.images as images import modules.scripts diff --git a/modules/interrogate.py b/modules/interrogate.py index 9f7d657f..22df9216 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -11,7 +11,6 @@ import torch.hub from torchvision import transforms from torchvision.transforms.functional import InterpolationMode -import modules.shared as shared from modules import devices, paths, shared, lowvram, modelloader, errors blip_image_eval_size = 384 diff --git a/modules/modelloader.py b/modules/modelloader.py index cb85ac4f..cf685000 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -108,12 +108,12 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): print(f"Moving {file} from {src_path} to {dest_path}.") try: shutil.move(fullpath, dest_path) - except: + except Exception: pass if len(os.listdir(src_path)) == 0: print(f"Removing empty folder: {src_path}") shutil.rmtree(src_path, True) - except: + except Exception: pass @@ -141,7 +141,7 @@ def load_upscalers(): full_model = f"modules.{model_name}_model" try: importlib.import_module(full_model) - except: + except Exception: pass datas = [] diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index f880bc3c..611c2b69 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -479,7 +479,7 @@ class LatentDiffusion(DDPM): self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: + except Exception: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor @@ -891,16 +891,6 @@ class LatentDiffusion(DDPM): c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): @@ -1171,8 +1161,10 @@ class LatentDiffusion(DDPM): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) return img, intermediates @torch.no_grad() @@ -1219,8 +1211,10 @@ class LatentDiffusion(DDPM): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) if return_intermediates: return img, intermediates @@ -1337,7 +1331,7 @@ class LatentDiffusion(DDPM): if inpaint: # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] + h, w = z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. diff --git a/modules/models/diffusion/uni_pc/sampler.py b/modules/models/diffusion/uni_pc/sampler.py index a241c8a7..0a9defa1 100644 --- a/modules/models/diffusion/uni_pc/sampler.py +++ b/modules/models/diffusion/uni_pc/sampler.py @@ -54,7 +54,8 @@ class UniPCSampler(object): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] + while isinstance(ctmp, list): + ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") diff --git a/modules/processing.py b/modules/processing.py index 1a76e552..6f5233c1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -664,7 +664,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not shared.opts.dont_fix_second_order_samplers_schedule: try: step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except: + except Exception: pass uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index e084e948..3a720721 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -54,18 +54,21 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): """ def collect_steps(steps, tree): - l = [steps] + res = [steps] + class CollectSteps(lark.Visitor): def scheduled(self, tree): tree.children[-1] = float(tree.children[-1]) if tree.children[-1] < 1: tree.children[-1] *= steps tree.children[-1] = min(steps, int(tree.children[-1])) - l.append(tree.children[-1]) + res.append(tree.children[-1]) + def alternate(self, tree): - l.extend(range(1, steps+1)) + res.extend(range(1, steps+1)) + CollectSteps().visit(tree) - return sorted(set(l)) + return sorted(set(res)) def at_step(step, tree): class AtStep(lark.Transformer): diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index ba1bdcd4..d7d8d2e3 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -185,7 +185,7 @@ def image_face_points(im, settings): try: faces = classifier.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) - except: + except Exception: continue if len(faces) > 0: diff --git a/modules/ui.py b/modules/ui.py index 2171f3aa..6beda76f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1,15 +1,9 @@ -import html import json -import math import mimetypes import os -import platform -import random import sys -import tempfile -import time import traceback -from functools import partial, reduce +from functools import reduce import warnings import gradio as gr diff --git a/modules/upscaler.py b/modules/upscaler.py index e2eaa730..0ad4fe99 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -45,7 +45,7 @@ class Upscaler: try: import cv2 self.can_tile = True - except: + except Exception: pass @abstractmethod -- cgit v1.2.3 From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- extensions-builtin/Lora/lora.py | 1 - extensions-builtin/ScuNET/scripts/scunet_model.py | 1 - extensions-builtin/SwinIR/scripts/swinir_model.py | 3 +-- modules/codeformer/codeformer_arch.py | 4 +--- modules/codeformer/vqgan_arch.py | 2 -- modules/codeformer_model.py | 4 +--- modules/config_states.py | 2 +- modules/esrgan_model.py | 2 +- modules/esrgan_model_arch.py | 1 - modules/extensions.py | 1 - modules/generation_parameters_copypaste.py | 4 ---- modules/hypernetworks/hypernetwork.py | 3 +-- modules/hypernetworks/ui.py | 2 -- modules/images.py | 2 +- modules/img2img.py | 5 +---- modules/mac_specific.py | 1 - modules/modelloader.py | 1 - modules/models/diffusion/uni_pc/uni_pc.py | 1 - modules/processing.py | 5 ++--- modules/sd_hijack.py | 2 +- modules/sd_hijack_inpainting.py | 6 ------ modules/sd_hijack_ip2p.py | 5 +---- modules/sd_hijack_xlmr.py | 2 -- modules/sd_models.py | 2 +- modules/sd_models_config.py | 1 - modules/sd_samplers_kdiffusion.py | 1 - modules/sd_vae.py | 3 --- modules/shared.py | 3 --- modules/styles.py | 9 --------- modules/textual_inversion/autocrop.py | 4 +--- modules/textual_inversion/image_embedding.py | 2 +- modules/textual_inversion/preprocess.py | 4 ---- modules/textual_inversion/textual_inversion.py | 1 - modules/txt2img.py | 9 +++------ modules/ui.py | 5 ++--- modules/ui_extra_networks.py | 1 - modules/ui_postprocessing.py | 2 +- modules/upscaler.py | 2 -- modules/xlmr.py | 2 +- pyproject.toml | 11 +++++++---- scripts/custom_code.py | 2 +- scripts/outpainting_mk_2.py | 4 ++-- scripts/poor_mans_outpainting.py | 4 ++-- scripts/prompt_matrix.py | 7 ++----- scripts/prompts_from_file.py | 5 +---- scripts/sd_upscale.py | 4 ++-- scripts/xyz_grid.py | 6 ++---- webui.py | 2 +- 48 files changed, 42 insertions(+), 114 deletions(-) (limited to 'modules/textual_inversion/autocrop.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index ba1293df..0ab43229 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -1,4 +1,3 @@ -import glob import os import re import torch diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index c7fd5739..aa2fdb3a 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -13,7 +13,6 @@ import modules.upscaler from modules import devices, modelloader from scunet_model_arch import SCUNet as net from modules.shared import opts -from modules import images class UpscalerScuNET(modules.upscaler.Upscaler): diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index d77c3a92..55dd94ab 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -1,4 +1,3 @@ -import contextlib import os import numpy as np @@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url from tqdm import tqdm from modules import modelloader, devices, script_callbacks, shared -from modules.shared import cmd_opts, opts, state +from modules.shared import opts, state from swinir_model_arch import SwinIR as net from swinir_model_arch_v2 import Swin2SR as net2 from modules.upscaler import Upscaler, UpscalerData diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index f1a7cf09..00c407de 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -1,14 +1,12 @@ # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py import math -import numpy as np import torch from torch import nn, Tensor import torch.nn.functional as F -from typing import Optional, List +from typing import Optional from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock -from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY def calc_mean_std(feat, eps=1e-5): diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index e7293683..820e6b12 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py ''' -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import copy from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8d84bbc9..8e56cb89 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -33,11 +33,9 @@ def setup_model(dirname): try: from torchvision.transforms.functional import normalize from modules.codeformer.codeformer_arch import CodeFormer - from basicsr.utils.download_util import load_file_from_url - from basicsr.utils import imwrite, img2tensor, tensor2img + from basicsr.utils import img2tensor, tensor2img from facelib.utils.face_restoration_helper import FaceRestoreHelper from facelib.detection.retinaface import retinaface - from modules.shared import cmd_opts net_class = CodeFormer diff --git a/modules/config_states.py b/modules/config_states.py index 2ea00929..8f1ff428 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -14,7 +14,7 @@ from collections import OrderedDict import git from modules import shared, extensions -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir +from modules.paths_internal import script_path, config_states_dir all_config_states = OrderedDict() diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index f4369257..85aa6934 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -6,7 +6,7 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url import modules.esrgan_model_arch as arch -from modules import shared, modelloader, images, devices +from modules import modelloader, images, devices from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 7f8bc7c0..4de9dd8d 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -2,7 +2,6 @@ from collections import OrderedDict import math -import functools import torch import torch.nn as nn import torch.nn.functional as F diff --git a/modules/extensions.py b/modules/extensions.py index 34d9d654..829f8cd9 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,7 +3,6 @@ import sys import traceback import time -from datetime import datetime import git from modules import shared diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fe8b18b2..f1c59c46 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,15 +1,11 @@ import base64 -import html import io -import math import os import re -from pathlib import Path import gradio as gr from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks -import tempfile from PIL import Image re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 1fc49537..9fe749b7 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -1,4 +1,3 @@ -import csv import datetime import glob import html @@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ -from collections import defaultdict, deque +from collections import deque from statistics import stdev, mean diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index 76599f5a..be168736 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -1,6 +1,4 @@ import html -import os -import re import gradio as gr import modules.hypernetworks.hypernetwork diff --git a/modules/images.py b/modules/images.py index 5eb6d855..7392cb8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -19,7 +19,7 @@ import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors -from modules.shared import opts, cmd_opts +from modules.shared import opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) diff --git a/modules/img2img.py b/modules/img2img.py index 32b1ecd6..d704bf90 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,12 +1,9 @@ -import math import os -import sys -import traceback import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError -from modules import devices, sd_samplers +from modules import sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 40ce2101..5c2f92a1 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,6 +1,5 @@ import torch import platform -from modules import paths from modules.sd_hijack_utils import CondFunc from packaging import version diff --git a/modules/modelloader.py b/modules/modelloader.py index cf685000..92ada694 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -1,4 +1,3 @@ -import glob import os import shutil import importlib diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index 11b330bc..a4c4ef4e 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -1,5 +1,4 @@ import torch -import torch.nn.functional as F import math from tqdm.auto import trange diff --git a/modules/processing.py b/modules/processing.py index 6f5233c1..c3932d6b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -2,7 +2,6 @@ import json import math import os import sys -import warnings import hashlib import torch @@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d8135211..81573b78 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -3,7 +3,7 @@ from torch.nn.functional import silu from types import MethodType import modules.textual_inversion.textual_inversion -from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint +from modules import devices, sd_hijack_optimizations, shared from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 55a2ce4d..344d75c8 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -1,15 +1,9 @@ -import os import torch -from einops import repeat -from omegaconf import ListConfig - import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 41ed54a2..6fe6b6ff 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -1,8 +1,5 @@ -import collections import os.path -import sys -import gc -import time + def should_hijack_ip2p(checkpoint_info): from modules import sd_models_config diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py index 4ac51c38..28528329 100644 --- a/modules/sd_hijack_xlmr.py +++ b/modules/sd_hijack_xlmr.py @@ -1,8 +1,6 @@ -import open_clip.tokenizer import torch from modules import sd_hijack_clip, devices -from modules.shared import opts class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): diff --git a/modules/sd_models.py b/modules/sd_models.py index 11c1a344..1c09c709 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -565,7 +565,7 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - from modules import lowvram, devices, sd_hijack + from modules import devices, sd_hijack timer = Timer() if model_data.sd_model: diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 7a79925a..9bfe1237 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -1,4 +1,3 @@ -import re import os import torch diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0fc9f456..3b8e9622 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,7 +1,6 @@ from collections import deque import torch import inspect -import einops import k_diffusion.sampling from modules import prompt_parser, devices, sd_samplers_common diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 521e485a..b7176125 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,8 +1,5 @@ -import torch -import safetensors.torch import os import collections -from collections import namedtuple from modules import paths, shared, devices, script_callbacks, sd_models import glob from copy import deepcopy diff --git a/modules/shared.py b/modules/shared.py index 4631965b..44cd2c0c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,12 +1,9 @@ -import argparse import datetime import json import os import sys import time -import requests -from PIL import Image import gradio as gr import tqdm diff --git a/modules/styles.py b/modules/styles.py index 11642075..c22769cf 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,18 +1,9 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - import csv import os import os.path import typing -import collections.abc as abc -import tempfile import shutil -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - class PromptStyle(typing.NamedTuple): name: str diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index d7d8d2e3..7770d22f 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -1,10 +1,8 @@ import cv2 import requests import os -from collections import defaultdict -from math import log, sqrt import numpy as np -from PIL import Image, ImageDraw +from PIL import ImageDraw GREEN = "#0F0" BLUE = "#00F" diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index 5593f88c..ee0e850a 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -2,7 +2,7 @@ import base64 import json import numpy as np import zlib -from PIL import Image, PngImagePlugin, ImageDraw, ImageFont +from PIL import Image, ImageDraw, ImageFont from fonts.ttf import Roboto import torch from modules.shared import opts diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index da0bcb26..d0cad09e 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,13 +1,9 @@ import os from PIL import Image, ImageOps import math -import platform -import sys import tqdm -import time from modules import paths, shared, images, deepbooru -from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f753b75f..9ed9ba45 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,7 +1,6 @@ import os import sys import traceback -import inspect from collections import namedtuple import torch diff --git a/modules/txt2img.py b/modules/txt2img.py index 16841d0f..f022381c 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,18 +1,15 @@ import modules.scripts -from modules import sd_samplers +from modules import sd_samplers, processing from modules.generation_parameters_copypaste import create_override_settings_dict -from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \ - StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, cmd_opts import modules.shared as shared -import modules.processing as processing from modules.ui import plaintext_to_html def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) - p = StableDiffusionProcessingTxt2Img( + p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, @@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step processed = modules.scripts.scripts_txt2img.run(p, *args) if processed is None: - processed = process_images(p) + processed = processing.process_images(p) p.close() diff --git a/modules/ui.py b/modules/ui.py index 6beda76f..f7e57593 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,10 +14,10 @@ from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress -from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path -from modules.shared import opts, cmd_opts, restricted_opts +from modules.shared import opts, cmd_opts import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste @@ -28,7 +28,6 @@ import modules.shared as shared import modules.styles import modules.textual_inversion.ui from modules import prompt_parser -from modules.images import save_image from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img from modules.textual_inversion import textual_inversion diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 49e06289..800e467a 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,4 +1,3 @@ -import glob import os.path import urllib.parse from pathlib import Path diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index f25639e5..c7dc1154 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -1,5 +1,5 @@ import gradio as gr -from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue +from modules import scripts, shared, ui_common, postprocessing, call_queue import modules.generation_parameters_copypaste as parameters_copypaste diff --git a/modules/upscaler.py b/modules/upscaler.py index 0ad4fe99..777593b0 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -2,8 +2,6 @@ import os from abc import abstractmethod import PIL -import numpy as np -import torch from PIL import Image import modules.shared diff --git a/modules/xlmr.py b/modules/xlmr.py index beab3fdf..e056c3f6 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -1,4 +1,4 @@ -from transformers import BertPreTrainedModel,BertModel,BertConfig +from transformers import BertPreTrainedModel, BertConfig import torch.nn as nn import torch from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig diff --git a/pyproject.toml b/pyproject.toml index 1e164abc..9caa9ba2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,13 @@ [tool.ruff] +exclude = ["extensions"] + ignore = [ "E501", - "E731", - "E402", # Module level import not at top of file - "F401" # Module imported but unused + + "F401", # Module imported but unused ] -exclude = ["extensions"] + +[tool.ruff.per-file-ignores] +"webui.py" = ["E402"] # Module level import not at top of file \ No newline at end of file diff --git a/scripts/custom_code.py b/scripts/custom_code.py index f36a3675..cc6f0d49 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -4,7 +4,7 @@ import ast import copy from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import cmd_opts def convertExpr2Expression(expr): diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index b10fed6c..665dbe89 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -7,9 +7,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state # this function is taken from https://github.com/parlance-zz/g-diffuser-bot diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index ddcbd2d3..c0bbecc1 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images, devices from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index e9b11517..fb06beab 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -1,14 +1,11 @@ import math -from collections import namedtuple -from copy import copy -import random import modules.scripts as scripts import gradio as gr from modules import images -from modules.processing import process_images, Processed -from modules.shared import opts, cmd_opts, state +from modules.processing import process_images +from modules.shared import opts, state import modules.sd_samplers diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 76dc5778..149bc85f 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -1,6 +1,4 @@ import copy -import math -import os import random import sys import traceback @@ -11,8 +9,7 @@ import gradio as gr from modules import sd_samplers from modules.processing import Processed, process_images -from PIL import Image -from modules.shared import opts, cmd_opts, state +from modules.shared import state def process_string_tag(tag): diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 332d76d9..d873a09c 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image -from modules import processing, shared, sd_samplers, images, devices +from modules import processing, shared, images, devices from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 2ff42ef8..332e0ecd 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -10,15 +10,13 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images, paths, sd_samplers, processing, sd_models, sd_vae +from modules import images, sd_samplers, processing, sd_models, sd_vae from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state import modules.shared as shared import modules.sd_samplers import modules.sd_models import modules.sd_vae -import glob -import os import re from modules.ui_components import ToolButton diff --git a/webui.py b/webui.py index ec3d2aba..48277075 100644 --- a/webui.py +++ b/webui.py @@ -43,7 +43,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states +from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer import modules.face_restoration import modules.gfpgan_model as gfpgan -- cgit v1.2.3 From 098d2fda5250f9f418fc641bd0f185cb461ee6d9 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 18:13:35 +0300 Subject: Reindent autocrop with 4 spaces --- modules/textual_inversion/autocrop.py | 202 +++++++++++++++++----------------- 1 file changed, 102 insertions(+), 100 deletions(-) (limited to 'modules/textual_inversion/autocrop.py') diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 7770d22f..8e667a4d 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -10,63 +10,64 @@ RED = "#F00" def crop_image(im, settings): - """ Intelligently crop an image to the subject matter """ - - scale_by = 1 - if is_landscape(im.width, im.height): - scale_by = settings.crop_height / im.height - elif is_portrait(im.width, im.height): - scale_by = settings.crop_width / im.width - elif is_square(im.width, im.height): - if is_square(settings.crop_width, settings.crop_height): - scale_by = settings.crop_width / im.width - elif is_landscape(settings.crop_width, settings.crop_height): - scale_by = settings.crop_width / im.width - elif is_portrait(settings.crop_width, settings.crop_height): - scale_by = settings.crop_height / im.height - - im = im.resize((int(im.width * scale_by), int(im.height * scale_by))) - im_debug = im.copy() - - focus = focal_point(im_debug, settings) - - # take the focal point and turn it into crop coordinates that try to center over the focal - # point but then get adjusted back into the frame - y_half = int(settings.crop_height / 2) - x_half = int(settings.crop_width / 2) - - x1 = focus.x - x_half - if x1 < 0: - x1 = 0 - elif x1 + settings.crop_width > im.width: - x1 = im.width - settings.crop_width - - y1 = focus.y - y_half - if y1 < 0: - y1 = 0 - elif y1 + settings.crop_height > im.height: - y1 = im.height - settings.crop_height - - x2 = x1 + settings.crop_width - y2 = y1 + settings.crop_height - - crop = [x1, y1, x2, y2] - - results = [] - - results.append(im.crop(tuple(crop))) - - if settings.annotate_image: - d = ImageDraw.Draw(im_debug) - rect = list(crop) - rect[2] -= 1 - rect[3] -= 1 - d.rectangle(rect, outline=GREEN) - results.append(im_debug) - if settings.destop_view_image: - im_debug.show() - - return results + """ Intelligently crop an image to the subject matter """ + + scale_by = 1 + if is_landscape(im.width, im.height): + scale_by = settings.crop_height / im.height + elif is_portrait(im.width, im.height): + scale_by = settings.crop_width / im.width + elif is_square(im.width, im.height): + if is_square(settings.crop_width, settings.crop_height): + scale_by = settings.crop_width / im.width + elif is_landscape(settings.crop_width, settings.crop_height): + scale_by = settings.crop_width / im.width + elif is_portrait(settings.crop_width, settings.crop_height): + scale_by = settings.crop_height / im.height + + + im = im.resize((int(im.width * scale_by), int(im.height * scale_by))) + im_debug = im.copy() + + focus = focal_point(im_debug, settings) + + # take the focal point and turn it into crop coordinates that try to center over the focal + # point but then get adjusted back into the frame + y_half = int(settings.crop_height / 2) + x_half = int(settings.crop_width / 2) + + x1 = focus.x - x_half + if x1 < 0: + x1 = 0 + elif x1 + settings.crop_width > im.width: + x1 = im.width - settings.crop_width + + y1 = focus.y - y_half + if y1 < 0: + y1 = 0 + elif y1 + settings.crop_height > im.height: + y1 = im.height - settings.crop_height + + x2 = x1 + settings.crop_width + y2 = y1 + settings.crop_height + + crop = [x1, y1, x2, y2] + + results = [] + + results.append(im.crop(tuple(crop))) + + if settings.annotate_image: + d = ImageDraw.Draw(im_debug) + rect = list(crop) + rect[2] -= 1 + rect[3] -= 1 + d.rectangle(rect, outline=GREEN) + results.append(im_debug) + if settings.destop_view_image: + im_debug.show() + + return results def focal_point(im, settings): corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else [] @@ -86,7 +87,7 @@ def focal_point(im, settings): corner_centroid = None if len(corner_points) > 0: corner_centroid = centroid(corner_points) - corner_centroid.weight = settings.corner_points_weight / weight_pref_total + corner_centroid.weight = settings.corner_points_weight / weight_pref_total pois.append(corner_centroid) entropy_centroid = None @@ -98,7 +99,7 @@ def focal_point(im, settings): face_centroid = None if len(face_points) > 0: face_centroid = centroid(face_points) - face_centroid.weight = settings.face_points_weight / weight_pref_total + face_centroid.weight = settings.face_points_weight / weight_pref_total pois.append(face_centroid) average_point = poi_average(pois, settings) @@ -132,7 +133,7 @@ def focal_point(im, settings): d.rectangle(f.bounding(4), outline=color) d.ellipse(average_point.bounding(max_size), outline=GREEN) - + return average_point @@ -260,10 +261,11 @@ def image_entropy(im): hist = hist[hist > 0] return -np.log2(hist / hist.sum()).sum() + def centroid(pois): - x = [poi.x for poi in pois] - y = [poi.y for poi in pois] - return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois)) + x = [poi.x for poi in pois] + y = [poi.y for poi in pois] + return PointOfInterest(sum(x) / len(pois), sum(y) / len(pois)) def poi_average(pois, settings): @@ -281,59 +283,59 @@ def poi_average(pois, settings): def is_landscape(w, h): - return w > h + return w > h def is_portrait(w, h): - return h > w + return h > w def is_square(w, h): - return w == h + return w == h def download_and_cache_models(dirname): - download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true' - model_file_name = 'face_detection_yunet.onnx' + download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true' + model_file_name = 'face_detection_yunet.onnx' - if not os.path.exists(dirname): - os.makedirs(dirname) + if not os.path.exists(dirname): + os.makedirs(dirname) - cache_file = os.path.join(dirname, model_file_name) - if not os.path.exists(cache_file): - print(f"downloading face detection model from '{download_url}' to '{cache_file}'") - response = requests.get(download_url) - with open(cache_file, "wb") as f: - f.write(response.content) + cache_file = os.path.join(dirname, model_file_name) + if not os.path.exists(cache_file): + print(f"downloading face detection model from '{download_url}' to '{cache_file}'") + response = requests.get(download_url) + with open(cache_file, "wb") as f: + f.write(response.content) - if os.path.exists(cache_file): - return cache_file - return None + if os.path.exists(cache_file): + return cache_file + return None class PointOfInterest: - def __init__(self, x, y, weight=1.0, size=10): - self.x = x - self.y = y - self.weight = weight - self.size = size + def __init__(self, x, y, weight=1.0, size=10): + self.x = x + self.y = y + self.weight = weight + self.size = size - def bounding(self, size): - return [ - self.x - size//2, - self.y - size//2, - self.x + size//2, - self.y + size//2 - ] + def bounding(self, size): + return [ + self.x - size // 2, + self.y - size // 2, + self.x + size // 2, + self.y + size // 2 + ] class Settings: - def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None): - self.crop_width = crop_width - self.crop_height = crop_height - self.corner_points_weight = corner_points_weight - self.entropy_points_weight = entropy_points_weight - self.face_points_weight = face_points_weight - self.annotate_image = annotate_image - self.destop_view_image = False - self.dnn_model_path = dnn_model_path + def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None): + self.crop_width = crop_width + self.crop_height = crop_height + self.corner_points_weight = corner_points_weight + self.entropy_points_weight = entropy_points_weight + self.face_points_weight = face_points_weight + self.annotate_image = annotate_image + self.destop_view_image = False + self.dnn_model_path = dnn_model_path -- cgit v1.2.3