"
+ p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
names = []
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 78248ed2..fe8b18b2 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -269,8 +269,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
v = v[1:-1] if v[0] == '"' and v[-1] == '"' else v
m = re_imagesize.match(v)
if m is not None:
- res[k+"-1"] = m.group(1)
- res[k+"-2"] = m.group(2)
+ res[f"{k}-1"] = m.group(1)
+ res[f"{k}-2"] = m.group(2)
else:
res[k] = v
diff --git a/modules/hashes.py b/modules/hashes.py
index 83272a07..032120f4 100644
--- a/modules/hashes.py
+++ b/modules/hashes.py
@@ -13,7 +13,7 @@ cache_data = None
def dump_cache():
- with filelock.FileLock(cache_filename+".lock"):
+ with filelock.FileLock(f"{cache_filename}.lock"):
with open(cache_filename, "w", encoding="utf8") as file:
json.dump(cache_data, file, indent=4)
@@ -22,7 +22,7 @@ def cache(subsection):
global cache_data
if cache_data is None:
- with filelock.FileLock(cache_filename+".lock"):
+ with filelock.FileLock(f"{cache_filename}.lock"):
if not os.path.isfile(cache_filename):
cache_data = {}
else:
diff --git a/modules/images.py b/modules/images.py
index 6ceb7c7c..a41965ab 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -467,7 +467,7 @@ def get_next_sequence_number(path, basename):
"""
result = -1
if basename != '':
- basename = basename + "-"
+ basename = f"{basename}-"
prefix_length = len(basename)
for p in os.listdir(path):
@@ -536,7 +536,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
add_number = opts.save_images_add_number or file_decoration == ''
if file_decoration != "" and add_number:
- file_decoration = "-" + file_decoration
+ file_decoration = f"-{file_decoration}"
file_decoration = namegen.apply(file_decoration) + suffix
@@ -566,7 +566,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
def _atomically_save_image(image_to_save, filename_without_extension, extension):
# save image with .tmp extension to avoid race condition when another process detects new image in the directory
- temp_file_path = filename_without_extension + ".tmp"
+ temp_file_path = f"{filename_without_extension}.tmp"
image_format = Image.registered_extensions()[extension]
if extension.lower() == '.png':
@@ -626,7 +626,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if opts.save_txt and info is not None:
txt_fullfn = f"{fullfn_without_extension}.txt"
with open(txt_fullfn, "w", encoding="utf8") as file:
- file.write(info + "\n")
+ file.write(f"{info}\n")
else:
txt_fullfn = None
diff --git a/modules/interrogate.py b/modules/interrogate.py
index e1665708..9f7d657f 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -28,7 +28,7 @@ def category_types():
def download_default_clip_interrogate_categories(content_dir):
print("Downloading CLIP categories...")
- tmpdir = content_dir + "_tmp"
+ tmpdir = f"{content_dir}_tmp"
category_types = ["artists", "flavors", "mediums", "movements"]
try:
@@ -214,7 +214,7 @@ class InterrogateModels:
if shared.opts.interrogate_return_ranks:
res += f", ({match}:{score/100:.3f})"
else:
- res += ", " + match
+ res += f", {match}"
except Exception:
print("Error interrogating", file=sys.stderr)
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
index f3d49c44..f880bc3c 100644
--- a/modules/models/diffusion/ddpm_edit.py
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -223,7 +223,7 @@ class DDPM(pl.LightningModule):
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
+ print(f"Deleting key {k} from state_dict.")
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False)
@@ -386,7 +386,7 @@ class DDPM(pl.LightningModule):
_, loss_dict_no_ema = self.shared_step(batch)
with self.ema_scope():
_, loss_dict_ema = self.shared_step(batch)
- loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
+ loss_dict_ema = {f"{key}_ema": loss_dict_ema[key] for key in loss_dict_ema}
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py
index eb5f4e76..11b330bc 100644
--- a/modules/models/diffusion/uni_pc/uni_pc.py
+++ b/modules/models/diffusion/uni_pc/uni_pc.py
@@ -94,7 +94,7 @@ class NoiseScheduleVP:
"""
if schedule not in ['discrete', 'linear', 'cosine']:
- raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule))
+ raise ValueError(f"Unsupported noise schedule {schedule}. The schedule needs to be 'discrete' or 'linear' or 'cosine'")
self.schedule = schedule
if schedule == 'discrete':
@@ -469,7 +469,7 @@ class UniPC:
t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
return t
else:
- raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
+ raise ValueError(f"Unsupported skip_type {skip_type}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'")
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
"""
diff --git a/modules/ngrok.py b/modules/ngrok.py
index 1ad7989b..7a7b4b26 100644
--- a/modules/ngrok.py
+++ b/modules/ngrok.py
@@ -7,8 +7,8 @@ def connect(token, port, region):
else:
if ':' in token:
# token = authtoken:username:password
- account = token.split(':')[1] + ':' + token.split(':')[-1]
- token = token.split(':')[0]
+ token, username, password = token.split(':', 2)
+ account = f"{username}:{password}"
config = conf.PyngrokConfig(
auth_token=token, region=region
diff --git a/modules/paths.py b/modules/paths.py
index 0e1e00e7..acf1894b 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -16,7 +16,7 @@ for possible_sd_path in possible_sd_paths:
sd_path = os.path.abspath(possible_sd_path)
break
-assert sd_path is not None, "Couldn't find Stable Diffusion in any of: " + str(possible_sd_paths)
+assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possible_sd_paths}"
path_dirs = [
(sd_path, 'ldm', 'Stable Diffusion', []),
diff --git a/modules/processing.py b/modules/processing.py
index e786791a..1a76e552 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -500,7 +500,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
- negative_prompt_text = "\nNegative prompt: " + p.all_negative_prompts[index] if p.all_negative_prompts[index] else ""
+ negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else ""
return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
@@ -780,7 +780,16 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
devices.torch_gc()
- res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts)
+ res = Processed(
+ p,
+ images_list=output_images,
+ seed=p.all_seeds[0],
+ info=infotext(),
+ comments="".join(f"\n\n{comment}" for comment in comments),
+ subseed=p.all_subseeds[0],
+ index_of_first_image=index_of_first_image,
+ infotexts=infotexts,
+ )
if p.scripts is not None:
p.scripts.postprocess(p, res)
diff --git a/modules/progress.py b/modules/progress.py
index 5655346b..948e6f00 100644
--- a/modules/progress.py
+++ b/modules/progress.py
@@ -96,7 +96,8 @@ def progressapi(req: ProgressRequest):
if image is not None:
buffered = io.BytesIO()
image.save(buffered, format="png")
- live_preview = 'data:image/png;base64,' + base64.b64encode(buffered.getvalue()).decode("ascii")
+ base64_image = base64.b64encode(buffered.getvalue()).decode('ascii')
+ live_preview = f"data:image/png;base64,{base64_image}"
id_live_preview = shared.state.id_live_preview
else:
live_preview = None
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index d6079433..efd7fca5 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -28,9 +28,9 @@ class UpscalerRealESRGAN(Upscaler):
for scaler in scalers:
if scaler.local_data_path.startswith("http"):
filename = modelloader.friendly_name(scaler.local_data_path)
- local = next(iter([local_model for local_model in local_model_paths if local_model.endswith(filename + '.pth')]), None)
- if local:
- scaler.local_data_path = local
+ local_model_candidates = [local_model for local_model in local_model_paths if local_model.endswith(f"{filename}.pth")]
+ if local_model_candidates:
+ scaler.local_data_path = local_model_candidates[0]
if scaler.name in opts.realesrgan_enabled_models:
self.scalers.append(scaler)
@@ -47,7 +47,7 @@ class UpscalerRealESRGAN(Upscaler):
info = self.load_model(path)
if not os.path.exists(info.local_data_path):
- print("Unable to load RealESRGAN model: %s" % info.name)
+ print(f"Unable to load RealESRGAN model: {info.name}")
return img
upsampler = RealESRGANer(
diff --git a/modules/scripts.py b/modules/scripts.py
index 4d0bbd66..d945b89f 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -163,7 +163,8 @@ class Script:
"""helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id"""
need_tabname = self.show(True) == self.show(False)
- tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else ""
+ tabkind = 'img2img' if self.is_img2img else 'txt2txt'
+ tabname = f"{tabkind}_" if need_tabname else ""
title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower()))
return f'script_{tabname}{title}_{item_id}'
@@ -526,7 +527,7 @@ def add_classes_to_gradio_component(comp):
this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
"""
- comp.elem_classes = ["gradio-" + comp.get_block_name(), *(comp.elem_classes or [])]
+ comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
if getattr(comp, 'multiselect', False):
comp.elem_classes.append('multiselect')
diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py
index 6d9fbbe6..a3476e95 100644
--- a/modules/sd_hijack_clip_old.py
+++ b/modules/sd_hijack_clip_old.py
@@ -75,7 +75,8 @@ def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, text
self.hijack.comments += hijack_comments
if len(used_custom_terms) > 0:
- self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
+ embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms)
+ self.hijack.comments.append(f"Used embeddings: {embedding_names}")
self.hijack.fixes = hijack_fixes
return self.process_tokens(remade_batch_tokens, batch_multipliers)
diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py
index 15858263..ca1daf45 100644
--- a/modules/sd_hijack_unet.py
+++ b/modules/sd_hijack_unet.py
@@ -18,7 +18,7 @@ class TorchHijackForUnet:
if hasattr(torch, item):
return getattr(torch, item)
- raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
def cat(self, tensors, *args, **kwargs):
if len(tensors) == 2:
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 59adc7cc..36f643e1 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -47,7 +47,7 @@ class CheckpointInfo:
self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
self.hash = model_hash(filename)
- self.sha256 = hashes.sha256_from_cache(self.filename, "checkpoint/" + name)
+ self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name}")
self.shorthash = self.sha256[0:10] if self.sha256 else None
self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
@@ -69,7 +69,7 @@ class CheckpointInfo:
checkpoint_alisases[id] = self
def calculate_shorthash(self):
- self.sha256 = hashes.sha256(self.filename, "checkpoint/" + self.name)
+ self.sha256 = hashes.sha256(self.filename, f"checkpoint/{self.name}")
if self.sha256 is None:
return
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
index 9398f528..7a79925a 100644
--- a/modules/sd_models_config.py
+++ b/modules/sd_models_config.py
@@ -111,7 +111,7 @@ def find_checkpoint_config_near_filename(info):
if info is None:
return None
- config = os.path.splitext(info.filename)[0] + ".yaml"
+ config = f"{os.path.splitext(info.filename)[0]}.yaml"
if os.path.exists(config):
return config
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index eb98e599..0fc9f456 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -198,7 +198,7 @@ class TorchHijack:
if hasattr(torch, item):
return getattr(torch, item)
- raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
def randn_like(self, x):
if self.sampler_noises:
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index 9b00f76e..521e485a 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -89,7 +89,7 @@ def refresh_vae_list():
def find_vae_near_checkpoint(checkpoint_file):
checkpoint_path = os.path.splitext(checkpoint_file)[0]
- for vae_location in [checkpoint_path + ".vae.pt", checkpoint_path + ".vae.ckpt", checkpoint_path + ".vae.safetensors"]:
+ for vae_location in [f"{checkpoint_path}.vae.pt", f"{checkpoint_path}.vae.ckpt", f"{checkpoint_path}.vae.safetensors"]:
if os.path.isfile(vae_location):
return vae_location
diff --git a/modules/styles.py b/modules/styles.py
index 9ed85991..11642075 100644
--- a/modules/styles.py
+++ b/modules/styles.py
@@ -74,7 +74,7 @@ class StyleDatabase:
def save_styles(self, path: str) -> None:
# Always keep a backup file around
if os.path.exists(path):
- shutil.copy(path, path + ".bak")
+ shutil.copy(path, f"{path}.bak")
fd = os.open(path, os.O_RDWR|os.O_CREAT)
with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file:
diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py
index 68e1103c..ba1bdcd4 100644
--- a/modules/textual_inversion/autocrop.py
+++ b/modules/textual_inversion/autocrop.py
@@ -111,7 +111,7 @@ def focal_point(im, settings):
if corner_centroid is not None:
color = BLUE
box = corner_centroid.bounding(max_size * corner_centroid.weight)
- d.text((box[0], box[1]-15), "Edge: %.02f" % corner_centroid.weight, fill=color)
+ d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
d.ellipse(box, outline=color)
if len(corner_points) > 1:
for f in corner_points:
@@ -119,7 +119,7 @@ def focal_point(im, settings):
if entropy_centroid is not None:
color = "#ff0"
box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
- d.text((box[0], box[1]-15), "Entropy: %.02f" % entropy_centroid.weight, fill=color)
+ d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
d.ellipse(box, outline=color)
if len(entropy_points) > 1:
for f in entropy_points:
@@ -127,7 +127,7 @@ def focal_point(im, settings):
if face_centroid is not None:
color = RED
box = face_centroid.bounding(max_size * face_centroid.weight)
- d.text((box[0], box[1]-15), "Face: %.02f" % face_centroid.weight, fill=color)
+ d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color)
d.ellipse(box, outline=color)
if len(face_points) > 1:
for f in face_points:
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py
index af9fbcf2..41610e03 100644
--- a/modules/textual_inversion/dataset.py
+++ b/modules/textual_inversion/dataset.py
@@ -72,7 +72,7 @@ class PersonalizedBase(Dataset):
except Exception:
continue
- text_filename = os.path.splitext(path)[0] + ".txt"
+ text_filename = f"{os.path.splitext(path)[0]}.txt"
filename = os.path.basename(path)
if os.path.exists(text_filename):
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index 4a29151d..da0bcb26 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -63,9 +63,9 @@ def save_pic_with_caption(image, index, params: PreprocessParams, existing_capti
image.save(os.path.join(params.dstdir, f"{basename}.png"))
if params.preprocess_txt_action == 'prepend' and existing_caption:
- caption = existing_caption + ' ' + caption
+ caption = f"{existing_caption} {caption}"
elif params.preprocess_txt_action == 'append' and existing_caption:
- caption = caption + ' ' + existing_caption
+ caption = f"{caption} {existing_caption}"
elif params.preprocess_txt_action == 'copy' and existing_caption:
caption = existing_caption
@@ -174,7 +174,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
params.src = filename
existing_caption = None
- existing_caption_filename = os.path.splitext(filename)[0] + '.txt'
+ existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt"
if os.path.exists(existing_caption_filename):
with open(existing_caption_filename, 'r', encoding="utf8") as file:
existing_caption = file.read()
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 379df243..4368eb63 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -69,7 +69,7 @@ class Embedding:
'hash': self.checksum(),
'optimizer_state_dict': self.optimizer_state_dict,
}
- torch.save(optimizer_saved_dict, filename + '.optim')
+ torch.save(optimizer_saved_dict, f"{filename}.optim")
def checksum(self):
if self.cached_checksum is not None:
@@ -437,8 +437,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
if shared.opts.save_optimizer_state:
optimizer_state_dict = None
- if os.path.exists(filename + '.optim'):
- optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu')
+ if os.path.exists(f"{filename}.optim"):
+ optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu')
if embedding.checksum() == optimizer_saved_dict.get('hash', None):
optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
@@ -599,7 +599,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
data = torch.load(last_saved_file)
info.add_text("sd-ti-embedding", embedding_to_b64(data))
- title = "<{}>".format(data.get('name', '???'))
+ title = f"<{data.get('name', '???')}>"
try:
vectorSize = list(data['string_to_param'].values())[0].shape[0]
@@ -608,8 +608,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name
- footer_mid = '[{}]'.format(checkpoint.shorthash)
- footer_right = '{}v {}s'.format(vectorSize, steps_done)
+ footer_mid = f'[{checkpoint.shorthash}]'
+ footer_right = f'{vectorSize}v {steps_done}s'
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
captioned_image = insert_image_data_embed(captioned_image, data)
diff --git a/modules/ui.py b/modules/ui.py
index 34b2aaff..d02f6e82 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -101,7 +101,7 @@ def visit(x, func, path=""):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
- func(path + "/" + str(x.label), x)
+ func(f"{path}/{x.label}", x)
def add_style(name: str, prompt: str, negative_prompt: str):
@@ -166,7 +166,7 @@ def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_di
img = Image.open(image)
filename = os.path.basename(image)
left, _ = os.path.splitext(filename)
- print(interrogation_function(img), file=open(os.path.join(ii_output_dir, left + ".txt"), 'a'))
+ print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a'))
return [gr.update(), None]
@@ -182,29 +182,29 @@ def interrogate_deepbooru(image):
def create_seed_inputs(target_interface):
- with FormRow(elem_id=target_interface + '_seed_row', variant="compact"):
- seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed')
+ with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"):
+ seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed")
seed.style(container=False)
- random_seed = ToolButton(random_symbol, elem_id=target_interface + '_random_seed', label='Random seed')
- reuse_seed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_seed', label='Reuse seed')
+ random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed')
+ reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed')
- seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False)
+ seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
- with FormRow(visible=False, elem_id=target_interface + '_subseed_row') as seed_extra_row_1:
+ with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
- subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed')
+ subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed")
subseed.style(container=False)
- random_subseed = ToolButton(random_symbol, elem_id=target_interface + '_random_subseed')
- reuse_subseed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_subseed')
- subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength')
+ random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed")
+ reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed")
+ subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength")
with FormRow(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
- seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w')
- seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h')
+ seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w")
+ seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h")
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
@@ -765,7 +765,7 @@ def create_ui():
)
button.click(
fn=lambda: None,
- _js="switch_to_"+name.replace(" ", "_"),
+ _js=f"switch_to_{name.replace(' ', '_')}",
inputs=[],
outputs=[],
)
@@ -1462,18 +1462,18 @@ def create_ui():
elif t == bool:
comp = gr.Checkbox
else:
- raise Exception(f'bad options item type: {str(t)} for key {key}')
+ raise Exception(f'bad options item type: {t} for key {key}')
- elem_id = "setting_"+key
+ elem_id = f"setting_{key}"
if info.refresh is not None:
if is_quicksettings:
res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
- create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
+ create_refresh_button(res, info.refresh, info.component_args, f"refresh_{key}")
else:
with FormRow():
res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
- create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
+ create_refresh_button(res, info.refresh, info.component_args, f"refresh_{key}")
else:
res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
@@ -1545,7 +1545,7 @@ def create_ui():
current_tab.__exit__()
gr.Group()
- current_tab = gr.TabItem(elem_id="settings_{}".format(elem_id), label=text)
+ current_tab = gr.TabItem(elem_id=f"settings_{elem_id}", label=text)
current_tab.__enter__()
current_row = gr.Column(variant='compact')
current_row.__enter__()
@@ -1664,7 +1664,7 @@ def create_ui():
for interface, label, ifid in interfaces:
if label in shared.opts.hidden_tabs:
continue
- with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid):
+ with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
@@ -1771,10 +1771,10 @@ def create_ui():
def loadsave(path, x):
def apply_field(obj, field, condition=None, init_field=None):
- key = path + "/" + field
+ key = f"{path}/{field}"
if getattr(obj, 'custom_script_source', None) is not None:
- key = 'customscript/' + obj.custom_script_source + '/' + key
+ key = f"customscript/{obj.custom_script_source}/{key}"
if getattr(obj, 'do_not_save_to_config', False):
return
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index 99ac8756..d9faf85a 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -61,7 +61,8 @@ def save_config_state(name):
if not name:
name = "Config"
current_config_state["name"] = name
- filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + "_" + name + ".json")
+ timestamp = datetime.now().strftime('%Y_%m_%d-%H_%M_%S')
+ filename = os.path.join(config_states_dir, f"{timestamp}_{name}.json")
print(f"Saving backup of webui/extension state to {filename}.")
with open(filename, "w", encoding="utf-8") as f:
json.dump(current_config_state, f)
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index 86c05a55..8c3dea56 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -69,7 +69,9 @@ class ExtraNetworksPage:
pass
def link_preview(self, filename):
- return "./sd_extra_networks/thumb?filename=" + urllib.parse.quote(filename.replace('\\', '/')) + "&mtime=" + str(os.path.getmtime(filename))
+ quoted_filename = urllib.parse.quote(filename.replace('\\', '/'))
+ mtime = os.path.getmtime(filename)
+ return f"./sd_extra_networks/thumb?filename={quoted_filename}&mtime={mtime}"
def search_terms_from_path(self, filename, possible_directories=None):
abspath = os.path.abspath(filename)
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index 4071d86d..f36a3675 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -77,7 +77,7 @@ return process_images(p)
module.display = display
indent = " " * indent_level
- indented = code.replace('\n', '\n' + indent)
+ indented = code.replace('\n', f"\n{indent}")
body = f"""def __webuitemp__():
{indent}{indented}
__webuitemp__()"""
diff --git a/scripts/loopback.py b/scripts/loopback.py
index d3065fe6..ad6609be 100644
--- a/scripts/loopback.py
+++ b/scripts/loopback.py
@@ -84,7 +84,7 @@ class Script(scripts.Script):
p.color_corrections = initial_color_corrections
if append_interrogation != "None":
- p.prompt = original_prompt + ", " if original_prompt != "" else ""
+ p.prompt = f"{original_prompt}, " if original_prompt else ""
if append_interrogation == "CLIP":
p.prompt += shared.interrogator.interrogate(p.init_images[0])
elif append_interrogation == "DeepBooru":
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 01d97791..a725d74a 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -439,7 +439,7 @@ class Script(scripts.Script):
z_type.change(fn=select_axis, inputs=[z_type,z_values_dropdown], outputs=[fill_z_button,z_values,z_values_dropdown])
def get_dropdown_update_from_params(axis,params):
- val_key = axis + " Values"
+ val_key = f"{axis} Values"
vals = params.get(val_key,"")
valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x]
return gr.update(value = valslist)
--
cgit v1.2.3
From 762265eab58cdb8f2d6398769bab43d8b8db0075 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 10 May 2023 07:52:45 +0300
Subject: autofixes from ruff
---
extensions-builtin/LDSR/ldsr_model_arch.py | 1 -
extensions-builtin/LDSR/sd_hijack_autoencoder.py | 2 +-
modules/api/api.py | 14 +++++++-------
modules/extras.py | 4 ++--
modules/images.py | 4 ++--
modules/img2img.py | 2 +-
modules/prompt_parser.py | 2 +-
modules/realesrgan_model.py | 2 +-
modules/sd_disable_initialization.py | 2 +-
modules/sd_hijack.py | 4 ++--
modules/sd_hijack_ip2p.py | 2 +-
modules/sd_hijack_optimizations.py | 1 -
modules/sd_models.py | 6 +++---
modules/textual_inversion/textual_inversion.py | 2 +-
modules/ui.py | 13 ++++++-------
modules/ui_extensions.py | 2 +-
modules/ui_extra_networks.py | 2 +-
pyproject.toml | 4 +++-
scripts/outpainting_mk_2.py | 2 +-
scripts/postprocessing_upscale.py | 6 +++---
scripts/xyz_grid.py | 2 +-
webui.py | 2 +-
22 files changed, 40 insertions(+), 41 deletions(-)
(limited to 'modules/ui.py')
diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py
index bc11cc6e..2339de7f 100644
--- a/extensions-builtin/LDSR/ldsr_model_arch.py
+++ b/extensions-builtin/LDSR/ldsr_model_arch.py
@@ -110,7 +110,6 @@ class LDSR:
diffusion_steps = int(steps)
eta = 1.0
- down_sample_method = 'Lanczos'
gc.collect()
if torch.cuda.is_available:
diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
index 8e03c7f8..db2231dd 100644
--- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py
+++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
@@ -165,7 +165,7 @@ class VQModel(pl.LightningModule):
def validation_step(self, batch, batch_idx):
log_dict = self._validation_step(batch, batch_idx)
with self.ema_scope():
- log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
+ self._validation_step(batch, batch_idx, suffix="_ema")
return log_dict
def _validation_step(self, batch, batch_idx, suffix=""):
diff --git a/modules/api/api.py b/modules/api/api.py
index 9bb95dfd..d47c39fc 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -60,7 +60,7 @@ def decode_base64_to_image(encoding):
try:
image = Image.open(BytesIO(base64.b64decode(encoding)))
return image
- except Exception as err:
+ except Exception:
raise HTTPException(status_code=500, detail="Invalid encoded image")
def encode_pil_to_base64(image):
@@ -264,11 +264,11 @@ class Api:
if request.alwayson_scripts and (len(request.alwayson_scripts) > 0):
for alwayson_script_name in request.alwayson_scripts.keys():
alwayson_script = self.get_script(alwayson_script_name, script_runner)
- if alwayson_script == None:
+ if alwayson_script is None:
raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
# Selectable script in always on script param check
- if alwayson_script.alwayson == False:
- raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params")
+ if alwayson_script.alwayson is False:
+ raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params")
# always on script with no arg should always run so you don't really need to add them to the requests
if "args" in request.alwayson_scripts[alwayson_script_name]:
# min between arg length in scriptrunner and arg length in the request
@@ -310,7 +310,7 @@ class Api:
p.outpath_samples = opts.outdir_txt2img_samples
shared.state.begin()
- if selectable_scripts != None:
+ if selectable_scripts is not None:
p.script_args = script_args
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
else:
@@ -367,7 +367,7 @@ class Api:
p.outpath_samples = opts.outdir_img2img_samples
shared.state.begin()
- if selectable_scripts != None:
+ if selectable_scripts is not None:
p.script_args = script_args
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
else:
@@ -642,7 +642,7 @@ class Api:
sd_hijack.apply_optimizations()
shared.state.end()
return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
- except AssertionError as msg:
+ except AssertionError:
shared.state.end()
return TrainResponse(info=f"train embedding error: {error}")
diff --git a/modules/extras.py b/modules/extras.py
index ff4e9c4e..eb4f0b42 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -136,14 +136,14 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
result_is_instruct_pix2pix_model = False
if theta_func2:
- shared.state.textinfo = f"Loading B"
+ shared.state.textinfo = "Loading B"
print(f"Loading {secondary_model_info.filename}...")
theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
else:
theta_1 = None
if theta_func1:
- shared.state.textinfo = f"Loading C"
+ shared.state.textinfo = "Loading C"
print(f"Loading {tertiary_model_info.filename}...")
theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu')
diff --git a/modules/images.py b/modules/images.py
index a41965ab..3d5d76cc 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -409,13 +409,13 @@ class FilenameGenerator:
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
try:
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
- except pytz.exceptions.UnknownTimeZoneError as _:
+ except pytz.exceptions.UnknownTimeZoneError:
time_zone = None
time_zone_time = time_datetime.astimezone(time_zone)
try:
formatted_time = time_zone_time.strftime(time_format)
- except (ValueError, TypeError) as _:
+ except (ValueError, TypeError):
formatted_time = time_zone_time.strftime(self.default_time_format)
return sanitize_filename_part(formatted_time, replace_spaces=False)
diff --git a/modules/img2img.py b/modules/img2img.py
index 9fc3a698..cdae301a 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -59,7 +59,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
# try to find corresponding mask for an image using simple filename matching
mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
# if not found use first one ("same mask for all images" use-case)
- if not mask_image_path in inpaint_masks:
+ if mask_image_path not in inpaint_masks:
mask_image_path = inpaint_masks[0]
mask_image = Image.open(mask_image_path)
p.image_mask = mask_image
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 69665372..e084e948 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -92,7 +92,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
def get_schedule(prompt):
try:
tree = schedule_parser.parse(prompt)
- except lark.exceptions.LarkError as e:
+ except lark.exceptions.LarkError:
if 0:
import traceback
traceback.print_exc()
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index efd7fca5..9ec1adf2 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -134,6 +134,6 @@ def get_realesrgan_models(scaler):
),
]
return models
- except Exception as e:
+ except Exception:
print("Error making Real-ESRGAN models list:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py
index c4a09d15..9fc89dc6 100644
--- a/modules/sd_disable_initialization.py
+++ b/modules/sd_disable_initialization.py
@@ -61,7 +61,7 @@ class DisableInitialization:
if res is None:
res = original(url, *args, local_files_only=False, **kwargs)
return res
- except Exception as e:
+ except Exception:
return original(url, *args, local_files_only=False, **kwargs)
def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs):
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index f4bb0266..d8135211 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -118,7 +118,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs):
try:
#Delete temporary weights if appended
del sd_model._custom_loss_weight
- except AttributeError as e:
+ except AttributeError:
pass
#If we have an old loss function, reset the loss function to the original one
@@ -133,7 +133,7 @@ def apply_weighted_forward(sd_model):
def undo_weighted_forward(sd_model):
try:
del sd_model.weighted_forward
- except AttributeError as e:
+ except AttributeError:
pass
diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py
index 3c727d3b..41ed54a2 100644
--- a/modules/sd_hijack_ip2p.py
+++ b/modules/sd_hijack_ip2p.py
@@ -10,4 +10,4 @@ def should_hijack_ip2p(checkpoint_info):
ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower()
- return "pix2pix" in ckpt_basename and not "pix2pix" in cfg_basename
+ return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index f10865cd..b623d53d 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -296,7 +296,6 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_
if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
# the big matmul fits into our memory limit; do everything in 1 chunk,
# i.e. send it down the unchunked fast-path
- query_chunk_size = q_tokens
kv_chunk_size = k_tokens
with devices.without_autocast(disable=q.dtype == v.dtype):
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 36f643e1..11c1a344 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -239,7 +239,7 @@ def read_metadata_from_safetensors(filename):
if isinstance(v, str) and v[0:1] == '{':
try:
res[k] = json.loads(v)
- except Exception as e:
+ except Exception:
pass
return res
@@ -467,7 +467,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
try:
with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
sd_model = instantiate_from_config(sd_config.model)
- except Exception as e:
+ except Exception:
pass
if sd_model is None:
@@ -544,7 +544,7 @@ def reload_model_weights(sd_model=None, info=None):
try:
load_model_weights(sd_model, checkpoint_info, state_dict, timer)
- except Exception as e:
+ except Exception:
print("Failed to load checkpoint, restoring previous")
load_model_weights(sd_model, current_checkpoint_info, None, timer)
raise
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 4368eb63..f753b75f 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -603,7 +603,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
try:
vectorSize = list(data['string_to_param'].values())[0].shape[0]
- except Exception as e:
+ except Exception:
vectorSize = '?'
checkpoint = sd_models.select_checkpoint()
diff --git a/modules/ui.py b/modules/ui.py
index d02f6e82..2171f3aa 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -246,7 +246,7 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
- except json.decoder.JSONDecodeError as e:
+ except json.decoder.JSONDecodeError:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
@@ -736,8 +736,8 @@ def create_ui():
with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(
- f"Process images in a directory on the same machine where the server is running." +
- f"
Use an empty output directory to save pictures normally instead of writing to the output directory." +
+ "
Process images in a directory on the same machine where the server is running." +
+ "
Use an empty output directory to save pictures normally instead of writing to the output directory." +
f"
Add inpaint batch mask directory to enable inpaint batch processing."
f"{hidden}
"
)
@@ -746,7 +746,6 @@ def create_ui():
img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
- img2img_image_inputs = [init_img, sketch, init_img_with_mask, inpaint_color_sketch]
for i, tab in enumerate(img2img_tabs):
tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
@@ -1290,8 +1289,8 @@ def create_ui():
with gr.Column(elem_id='ti_gallery_container'):
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
- ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
- ti_progress = gr.HTML(elem_id="ti_progress", value="")
+ gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
+ gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
create_embedding.click(
@@ -1668,7 +1667,7 @@ def create_ui():
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
- audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
+ gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
footer = shared.html("footer.html")
footer = footer.format(versions=versions_html())
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index d9faf85a..ed70abe5 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -490,7 +490,7 @@ def create_ui():
config_states.list_config_states()
with gr.Blocks(analytics_enabled=False) as ui:
- with gr.Tabs(elem_id="tabs_extensions") as tabs:
+ with gr.Tabs(elem_id="tabs_extensions"):
with gr.TabItem("Installed", id="installed"):
with gr.Row(elem_id="extensions_installed_top"):
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index 8c3dea56..49e06289 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -263,7 +263,7 @@ def create_ui(container, button, tabname):
ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy())
ui.tabname = tabname
- with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs:
+ with gr.Tabs(elem_id=tabname+"_extra_tabs"):
for page in ui.stored_extra_pages:
page_id = page.title.lower().replace(" ", "_")
diff --git a/pyproject.toml b/pyproject.toml
index 9e9662ad..1e164abc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -2,7 +2,9 @@
ignore = [
"E501",
- "E731"
+ "E731",
+ "E402", # Module level import not at top of file
+ "F401" # Module imported but unused
]
exclude = ["extensions"]
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index 670bb8ac..b10fed6c 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -72,7 +72,7 @@ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.0
height = _np_src_image.shape[1]
num_channels = _np_src_image.shape[2]
- np_src_image = _np_src_image[:] * (1. - np_mask_rgb)
+ _np_src_image[:] * (1. - np_mask_rgb)
np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.)
img_mask = np_mask_grey > 1e-6
ref_mask = np_mask_grey < 1e-3
diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py
index ef1186ac..edb70ac0 100644
--- a/scripts/postprocessing_upscale.py
+++ b/scripts/postprocessing_upscale.py
@@ -98,13 +98,13 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}'
upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
- pp.info[f"Postprocess upscaler"] = upscaler1.name
+ pp.info["Postprocess upscaler"] = upscaler1.name
if upscaler2 and upscaler_2_visibility > 0:
second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility)
- pp.info[f"Postprocess upscaler 2"] = upscaler2.name
+ pp.info["Postprocess upscaler 2"] = upscaler2.name
pp.image = upscaled_image
@@ -134,4 +134,4 @@ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
assert upscaler1, f'could not find upscaler named {upscaler_name}'
pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
- pp.info[f"Postprocess upscaler"] = upscaler1.name
+ pp.info["Postprocess upscaler"] = upscaler1.name
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index a725d74a..2ff42ef8 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -316,7 +316,7 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
return Processed(p, [])
z_count = len(zs)
- sub_grids = [None] * z_count
+
for i in range(z_count):
start_index = (i * len(xs) * len(ys)) + i
end_index = start_index + len(xs) * len(ys)
diff --git a/webui.py b/webui.py
index 727ebd31..ec3d2aba 100644
--- a/webui.py
+++ b/webui.py
@@ -360,7 +360,7 @@ def webui():
if cmd_opts.subpath:
redirector = FastAPI()
redirector.get("/")
- mounted_app = gradio.mount_gradio_app(redirector, shared.demo, path=f"/{cmd_opts.subpath}")
+ gradio.mount_gradio_app(redirector, shared.demo, path=f"/{cmd_opts.subpath}")
wait_on_server(shared.demo)
print('Restarting UI...')
--
cgit v1.2.3
From 96d6ca4199e7c5eee8d451618de5161cea317c40 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 10 May 2023 08:25:25 +0300
Subject: manual fixes for ruff
---
extensions-builtin/LDSR/ldsr_model_arch.py | 2 +-
extensions-builtin/LDSR/scripts/ldsr_model.py | 3 +-
extensions-builtin/LDSR/sd_hijack_autoencoder.py | 10 +-
extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 26 ++---
extensions-builtin/ScuNET/scunet_model_arch.py | 9 +-
extensions-builtin/SwinIR/scripts/swinir_model.py | 2 +-
modules/api/api.py | 129 +++++++++++-----------
modules/api/models.py | 5 +-
modules/codeformer/codeformer_arch.py | 2 +-
modules/esrgan_model_arch.py | 2 +
modules/extra_networks_hypernet.py | 2 +-
modules/images.py | 4 +-
modules/img2img.py | 1 -
modules/interrogate.py | 1 -
modules/modelloader.py | 6 +-
modules/models/diffusion/ddpm_edit.py | 26 ++---
modules/models/diffusion/uni_pc/sampler.py | 3 +-
modules/processing.py | 2 +-
modules/prompt_parser.py | 11 +-
modules/textual_inversion/autocrop.py | 2 +-
modules/ui.py | 8 +-
modules/upscaler.py | 2 +-
22 files changed, 129 insertions(+), 129 deletions(-)
(limited to 'modules/ui.py')
diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py
index 2339de7f..a5fb8907 100644
--- a/extensions-builtin/LDSR/ldsr_model_arch.py
+++ b/extensions-builtin/LDSR/ldsr_model_arch.py
@@ -243,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
log["sample_noquant"] = x_sample_noquant
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
- except:
+ except Exception:
pass
log["sample"] = x_sample
diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py
index da19cff1..e8dc083c 100644
--- a/extensions-builtin/LDSR/scripts/ldsr_model.py
+++ b/extensions-builtin/LDSR/scripts/ldsr_model.py
@@ -7,7 +7,8 @@ from basicsr.utils.download_util import load_file_from_url
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
from modules import shared, script_callbacks
-import sd_hijack_autoencoder, sd_hijack_ddpm_v1
+import sd_hijack_autoencoder
+import sd_hijack_ddpm_v1
class UpscalerLDSR(Upscaler):
diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
index db2231dd..6303fed5 100644
--- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py
+++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
@@ -1,16 +1,21 @@
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
-
+import numpy as np
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
+
+from torch.optim.lr_scheduler import LambdaLR
+
+from ldm.modules.ema import LitEma
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.util import instantiate_from_config
import ldm.models.autoencoder
+from packaging import version
class VQModel(pl.LightningModule):
def __init__(self,
@@ -249,7 +254,8 @@ class VQModel(pl.LightningModule):
if plot_ema:
with self.ema_scope():
xrec_ema, _ = self(x)
- if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
+ if x.shape[1] > 3:
+ xrec_ema = self.to_rgb(xrec_ema)
log["reconstructions_ema"] = xrec_ema
return log
diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
index 5c0488e5..4d3f6c56 100644
--- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
+++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
@@ -450,7 +450,7 @@ class LatentDiffusionV1(DDPMV1):
self.cond_stage_key = cond_stage_key
try:
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
- except:
+ except Exception:
self.num_downs = 0
if not scale_by_std:
self.scale_factor = scale_factor
@@ -877,16 +877,6 @@ class LatentDiffusionV1(DDPMV1):
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
return self.p_losses(x, c, t, *args, **kwargs)
- def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
- def rescale_bbox(bbox):
- x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
- y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
- w = min(bbox[2] / crop_coordinates[2], 1 - x0)
- h = min(bbox[3] / crop_coordinates[3], 1 - y0)
- return x0, y0, w, h
-
- return [rescale_bbox(b) for b in bboxes]
-
def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict):
@@ -1157,8 +1147,10 @@ class LatentDiffusionV1(DDPMV1):
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(x0_partial)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
+ if callback:
+ callback(i)
+ if img_callback:
+ img_callback(img, i)
return img, intermediates
@torch.no_grad()
@@ -1205,8 +1197,10 @@ class LatentDiffusionV1(DDPMV1):
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(img)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
+ if callback:
+ callback(i)
+ if img_callback:
+ img_callback(img, i)
if return_intermediates:
return img, intermediates
@@ -1322,7 +1316,7 @@ class LatentDiffusionV1(DDPMV1):
if inpaint:
# make a simple center square
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
+ h, w = z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py
index 43ca8d36..8028918a 100644
--- a/extensions-builtin/ScuNET/scunet_model_arch.py
+++ b/extensions-builtin/ScuNET/scunet_model_arch.py
@@ -61,7 +61,9 @@ class WMSA(nn.Module):
Returns:
output: tensor shape [b h w c]
"""
- if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
+ if self.type != 'W':
+ x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
+
x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
h_windows = x.size(1)
w_windows = x.size(2)
@@ -85,8 +87,9 @@ class WMSA(nn.Module):
output = self.linear(output)
output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
- if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2),
- dims=(1, 2))
+ if self.type != 'W':
+ output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
+
return output
def relative_embedding(self):
diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py
index e8783bca..d77c3a92 100644
--- a/extensions-builtin/SwinIR/scripts/swinir_model.py
+++ b/extensions-builtin/SwinIR/scripts/swinir_model.py
@@ -45,7 +45,7 @@ class UpscalerSwinIR(Upscaler):
img = upscale(img, model)
try:
torch.cuda.empty_cache()
- except:
+ except Exception:
pass
return img
diff --git a/modules/api/api.py b/modules/api/api.py
index d47c39fc..f52d371b 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -15,7 +15,8 @@ from secrets import compare_digest
import modules.shared as shared
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
-from modules.api.models import *
+from modules.api import models
+from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
from modules.textual_inversion.preprocess import preprocess
@@ -25,20 +26,21 @@ from modules.sd_models import checkpoints_list, unload_model_weights, reload_mod
from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
-from typing import List
+from typing import Dict, List, Any
import piexif
import piexif.helper
+
def upscaler_to_index(name: str):
try:
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
- except:
- raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}")
+ except Exception:
+ raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}")
def script_name_to_index(name, scripts):
try:
return [script.title().lower() for script in scripts].index(name.lower())
- except:
+ except Exception:
raise HTTPException(status_code=422, detail=f"Script '{name}' not found")
def validate_sampler_name(name):
@@ -99,7 +101,7 @@ def api_middleware(app: FastAPI):
import starlette # importing just so it can be placed on silent list
from rich.console import Console
console = Console()
- except:
+ except Exception:
import traceback
rich_available = False
@@ -166,36 +168,36 @@ class Api:
self.app = app
self.queue_lock = queue_lock
api_middleware(self.app)
- self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
- self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
- self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
- self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
- self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
- self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
+ self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse)
+ self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse)
+ self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse)
+ self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse)
+ self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse)
+ self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse)
self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"])
self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"])
- self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel)
+ self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel)
self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
- self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel)
- self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem])
- self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem])
- self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem])
- self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem])
- self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem])
- self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem])
- self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem])
- self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse)
+ self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
+ self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
+ self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
+ self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
+ self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
+ self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
+ self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
+ self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem])
+ self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
- self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse)
- self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse)
- self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse)
- self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse)
- self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse)
- self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse)
+ self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
+ self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
+ self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
+ self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
+ self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
+ self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
- self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList)
+ self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList)
self.default_script_arg_txt2img = []
self.default_script_arg_img2img = []
@@ -224,7 +226,7 @@ class Api:
t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles]
i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles]
- return ScriptsList(txt2img = t2ilist, img2img = i2ilist)
+ return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist)
def get_script(self, script_name, script_runner):
if script_name is None or script_name == "":
@@ -276,7 +278,7 @@ class Api:
script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx]
return script_args
- def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
+ def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
script_runner = scripts.scripts_txt2img
if not script_runner.scripts:
script_runner.initialize_scripts(False)
@@ -320,9 +322,9 @@ class Api:
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
- return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
+ return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
- def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI):
+ def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
init_images = img2imgreq.init_images
if init_images is None:
raise HTTPException(status_code=404, detail="Init image not found")
@@ -381,9 +383,9 @@ class Api:
img2imgreq.init_images = None
img2imgreq.mask = None
- return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
+ return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
- def extras_single_image_api(self, req: ExtrasSingleImageRequest):
+ def extras_single_image_api(self, req: models.ExtrasSingleImageRequest):
reqDict = setUpscalers(req)
reqDict['image'] = decode_base64_to_image(reqDict['image'])
@@ -391,9 +393,9 @@ class Api:
with self.queue_lock:
result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict)
- return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
+ return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
- def extras_batch_images_api(self, req: ExtrasBatchImagesRequest):
+ def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest):
reqDict = setUpscalers(req)
image_list = reqDict.pop('imageList', [])
@@ -402,15 +404,15 @@ class Api:
with self.queue_lock:
result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict)
- return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
+ return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
- def pnginfoapi(self, req: PNGInfoRequest):
+ def pnginfoapi(self, req: models.PNGInfoRequest):
if(not req.image.strip()):
- return PNGInfoResponse(info="")
+ return models.PNGInfoResponse(info="")
image = decode_base64_to_image(req.image.strip())
if image is None:
- return PNGInfoResponse(info="")
+ return models.PNGInfoResponse(info="")
geninfo, items = images.read_info_from_image(image)
if geninfo is None:
@@ -418,13 +420,13 @@ class Api:
items = {**{'parameters': geninfo}, **items}
- return PNGInfoResponse(info=geninfo, items=items)
+ return models.PNGInfoResponse(info=geninfo, items=items)
- def progressapi(self, req: ProgressRequest = Depends()):
+ def progressapi(self, req: models.ProgressRequest = Depends()):
# copy from check_progress_call of ui.py
if shared.state.job_count == 0:
- return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
+ return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
# avoid dividing zero
progress = 0.01
@@ -446,9 +448,9 @@ class Api:
if shared.state.current_image and not req.skip_current_image:
current_image = encode_pil_to_base64(shared.state.current_image)
- return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
+ return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
- def interrogateapi(self, interrogatereq: InterrogateRequest):
+ def interrogateapi(self, interrogatereq: models.InterrogateRequest):
image_b64 = interrogatereq.image
if image_b64 is None:
raise HTTPException(status_code=404, detail="Image not found")
@@ -465,7 +467,7 @@ class Api:
else:
raise HTTPException(status_code=404, detail="Model not found")
- return InterrogateResponse(caption=processed)
+ return models.InterrogateResponse(caption=processed)
def interruptapi(self):
shared.state.interrupt()
@@ -570,36 +572,36 @@ class Api:
filename = create_embedding(**args) # create empty embedding
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
shared.state.end()
- return CreateResponse(info=f"create embedding filename: {filename}")
+ return models.CreateResponse(info=f"create embedding filename: {filename}")
except AssertionError as e:
shared.state.end()
- return TrainResponse(info=f"create embedding error: {e}")
+ return models.TrainResponse(info=f"create embedding error: {e}")
def create_hypernetwork(self, args: dict):
try:
shared.state.begin()
filename = create_hypernetwork(**args) # create empty embedding
shared.state.end()
- return CreateResponse(info=f"create hypernetwork filename: {filename}")
+ return models.CreateResponse(info=f"create hypernetwork filename: {filename}")
except AssertionError as e:
shared.state.end()
- return TrainResponse(info=f"create hypernetwork error: {e}")
+ return models.TrainResponse(info=f"create hypernetwork error: {e}")
def preprocess(self, args: dict):
try:
shared.state.begin()
preprocess(**args) # quick operation unless blip/booru interrogation is enabled
shared.state.end()
- return PreprocessResponse(info = 'preprocess complete')
+ return models.PreprocessResponse(info = 'preprocess complete')
except KeyError as e:
shared.state.end()
- return PreprocessResponse(info=f"preprocess error: invalid token: {e}")
+ return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
except AssertionError as e:
shared.state.end()
- return PreprocessResponse(info=f"preprocess error: {e}")
+ return models.PreprocessResponse(info=f"preprocess error: {e}")
except FileNotFoundError as e:
shared.state.end()
- return PreprocessResponse(info=f'preprocess error: {e}')
+ return models.PreprocessResponse(info=f'preprocess error: {e}')
def train_embedding(self, args: dict):
try:
@@ -617,10 +619,10 @@ class Api:
if not apply_optimizations:
sd_hijack.apply_optimizations()
shared.state.end()
- return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
+ return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
except AssertionError as msg:
shared.state.end()
- return TrainResponse(info=f"train embedding error: {msg}")
+ return models.TrainResponse(info=f"train embedding error: {msg}")
def train_hypernetwork(self, args: dict):
try:
@@ -641,14 +643,15 @@ class Api:
if not apply_optimizations:
sd_hijack.apply_optimizations()
shared.state.end()
- return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
+ return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
except AssertionError:
shared.state.end()
- return TrainResponse(info=f"train embedding error: {error}")
+ return models.TrainResponse(info=f"train embedding error: {error}")
def get_memory(self):
try:
- import os, psutil
+ import os
+ import psutil
process = psutil.Process(os.getpid())
res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
@@ -675,10 +678,10 @@ class Api:
'events': warnings,
}
else:
- cuda = { 'error': 'unavailable' }
+ cuda = {'error': 'unavailable'}
except Exception as err:
- cuda = { 'error': f'{err}' }
- return MemoryResponse(ram = ram, cuda = cuda)
+ cuda = {'error': f'{err}'}
+ return models.MemoryResponse(ram=ram, cuda=cuda)
def launch(self, server_name, port):
self.app.include_router(self.router)
diff --git a/modules/api/models.py b/modules/api/models.py
index 4a70f440..4d291076 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -223,8 +223,9 @@ for key in _options:
if(_options[key].dest != 'help'):
flag = _options[key]
_type = str
- if _options[key].default is not None: _type = type(_options[key].default)
- flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))})
+ if _options[key].default is not None:
+ _type = type(_options[key].default)
+ flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))})
FlagsModel = create_model("Flags", **flags)
diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py
index 11dcc3ee..f1a7cf09 100644
--- a/modules/codeformer/codeformer_arch.py
+++ b/modules/codeformer/codeformer_arch.py
@@ -7,7 +7,7 @@ from torch import nn, Tensor
import torch.nn.functional as F
from typing import Optional, List
-from modules.codeformer.vqgan_arch import *
+from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY
diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py
index 6071fea7..7f8bc7c0 100644
--- a/modules/esrgan_model_arch.py
+++ b/modules/esrgan_model_arch.py
@@ -438,9 +438,11 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=
padding = padding if pad_type == 'zero' else 0
if convtype=='PartialConv2D':
+ from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer
c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=groups)
elif convtype=='DeformConv2D':
+ from torchvision.ops import DeformConv2d # not tested
c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=groups)
elif convtype=='Conv3D':
diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py
index 04f27c9f..aa2a14ef 100644
--- a/modules/extra_networks_hypernet.py
+++ b/modules/extra_networks_hypernet.py
@@ -1,4 +1,4 @@
-from modules import extra_networks, shared, extra_networks
+from modules import extra_networks, shared
from modules.hypernetworks import hypernetwork
diff --git a/modules/images.py b/modules/images.py
index 3d5d76cc..5eb6d855 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -472,9 +472,9 @@ def get_next_sequence_number(path, basename):
prefix_length = len(basename)
for p in os.listdir(path):
if p.startswith(basename):
- l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
+ parts = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
try:
- result = max(int(l[0]), result)
+ result = max(int(parts[0]), result)
except ValueError:
pass
diff --git a/modules/img2img.py b/modules/img2img.py
index cdae301a..32b1ecd6 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -13,7 +13,6 @@ from modules.shared import opts, state
import modules.shared as shared
import modules.processing as processing
from modules.ui import plaintext_to_html
-import modules.images as images
import modules.scripts
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 9f7d657f..22df9216 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -11,7 +11,6 @@ import torch.hub
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
-import modules.shared as shared
from modules import devices, paths, shared, lowvram, modelloader, errors
blip_image_eval_size = 384
diff --git a/modules/modelloader.py b/modules/modelloader.py
index cb85ac4f..cf685000 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -108,12 +108,12 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None):
print(f"Moving {file} from {src_path} to {dest_path}.")
try:
shutil.move(fullpath, dest_path)
- except:
+ except Exception:
pass
if len(os.listdir(src_path)) == 0:
print(f"Removing empty folder: {src_path}")
shutil.rmtree(src_path, True)
- except:
+ except Exception:
pass
@@ -141,7 +141,7 @@ def load_upscalers():
full_model = f"modules.{model_name}_model"
try:
importlib.import_module(full_model)
- except:
+ except Exception:
pass
datas = []
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
index f880bc3c..611c2b69 100644
--- a/modules/models/diffusion/ddpm_edit.py
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -479,7 +479,7 @@ class LatentDiffusion(DDPM):
self.cond_stage_key = cond_stage_key
try:
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
- except:
+ except Exception:
self.num_downs = 0
if not scale_by_std:
self.scale_factor = scale_factor
@@ -891,16 +891,6 @@ class LatentDiffusion(DDPM):
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
return self.p_losses(x, c, t, *args, **kwargs)
- def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
- def rescale_bbox(bbox):
- x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
- y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
- w = min(bbox[2] / crop_coordinates[2], 1 - x0)
- h = min(bbox[3] / crop_coordinates[3], 1 - y0)
- return x0, y0, w, h
-
- return [rescale_bbox(b) for b in bboxes]
-
def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict):
@@ -1171,8 +1161,10 @@ class LatentDiffusion(DDPM):
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(x0_partial)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
+ if callback:
+ callback(i)
+ if img_callback:
+ img_callback(img, i)
return img, intermediates
@torch.no_grad()
@@ -1219,8 +1211,10 @@ class LatentDiffusion(DDPM):
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(img)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
+ if callback:
+ callback(i)
+ if img_callback:
+ img_callback(img, i)
if return_intermediates:
return img, intermediates
@@ -1337,7 +1331,7 @@ class LatentDiffusion(DDPM):
if inpaint:
# make a simple center square
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
+ h, w = z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
diff --git a/modules/models/diffusion/uni_pc/sampler.py b/modules/models/diffusion/uni_pc/sampler.py
index a241c8a7..0a9defa1 100644
--- a/modules/models/diffusion/uni_pc/sampler.py
+++ b/modules/models/diffusion/uni_pc/sampler.py
@@ -54,7 +54,8 @@ class UniPCSampler(object):
if conditioning is not None:
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
- while isinstance(ctmp, list): ctmp = ctmp[0]
+ while isinstance(ctmp, list):
+ ctmp = ctmp[0]
cbs = ctmp.shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
diff --git a/modules/processing.py b/modules/processing.py
index 1a76e552..6f5233c1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -664,7 +664,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if not shared.opts.dont_fix_second_order_samplers_schedule:
try:
step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1
- except:
+ except Exception:
pass
uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc)
c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index e084e948..3a720721 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -54,18 +54,21 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
"""
def collect_steps(steps, tree):
- l = [steps]
+ res = [steps]
+
class CollectSteps(lark.Visitor):
def scheduled(self, tree):
tree.children[-1] = float(tree.children[-1])
if tree.children[-1] < 1:
tree.children[-1] *= steps
tree.children[-1] = min(steps, int(tree.children[-1]))
- l.append(tree.children[-1])
+ res.append(tree.children[-1])
+
def alternate(self, tree):
- l.extend(range(1, steps+1))
+ res.extend(range(1, steps+1))
+
CollectSteps().visit(tree)
- return sorted(set(l))
+ return sorted(set(res))
def at_step(step, tree):
class AtStep(lark.Transformer):
diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py
index ba1bdcd4..d7d8d2e3 100644
--- a/modules/textual_inversion/autocrop.py
+++ b/modules/textual_inversion/autocrop.py
@@ -185,7 +185,7 @@ def image_face_points(im, settings):
try:
faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
- except:
+ except Exception:
continue
if len(faces) > 0:
diff --git a/modules/ui.py b/modules/ui.py
index 2171f3aa..6beda76f 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1,15 +1,9 @@
-import html
import json
-import math
import mimetypes
import os
-import platform
-import random
import sys
-import tempfile
-import time
import traceback
-from functools import partial, reduce
+from functools import reduce
import warnings
import gradio as gr
diff --git a/modules/upscaler.py b/modules/upscaler.py
index e2eaa730..0ad4fe99 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -45,7 +45,7 @@ class Upscaler:
try:
import cv2
self.can_tile = True
- except:
+ except Exception:
pass
@abstractmethod
--
cgit v1.2.3
From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 10 May 2023 08:43:42 +0300
Subject: imports cleanup for ruff
---
extensions-builtin/Lora/lora.py | 1 -
extensions-builtin/ScuNET/scripts/scunet_model.py | 1 -
extensions-builtin/SwinIR/scripts/swinir_model.py | 3 +--
modules/codeformer/codeformer_arch.py | 4 +---
modules/codeformer/vqgan_arch.py | 2 --
modules/codeformer_model.py | 4 +---
modules/config_states.py | 2 +-
modules/esrgan_model.py | 2 +-
modules/esrgan_model_arch.py | 1 -
modules/extensions.py | 1 -
modules/generation_parameters_copypaste.py | 4 ----
modules/hypernetworks/hypernetwork.py | 3 +--
modules/hypernetworks/ui.py | 2 --
modules/images.py | 2 +-
modules/img2img.py | 5 +----
modules/mac_specific.py | 1 -
modules/modelloader.py | 1 -
modules/models/diffusion/uni_pc/uni_pc.py | 1 -
modules/processing.py | 5 ++---
modules/sd_hijack.py | 2 +-
modules/sd_hijack_inpainting.py | 6 ------
modules/sd_hijack_ip2p.py | 5 +----
modules/sd_hijack_xlmr.py | 2 --
modules/sd_models.py | 2 +-
modules/sd_models_config.py | 1 -
modules/sd_samplers_kdiffusion.py | 1 -
modules/sd_vae.py | 3 ---
modules/shared.py | 3 ---
modules/styles.py | 9 ---------
modules/textual_inversion/autocrop.py | 4 +---
modules/textual_inversion/image_embedding.py | 2 +-
modules/textual_inversion/preprocess.py | 4 ----
modules/textual_inversion/textual_inversion.py | 1 -
modules/txt2img.py | 9 +++------
modules/ui.py | 5 ++---
modules/ui_extra_networks.py | 1 -
modules/ui_postprocessing.py | 2 +-
modules/upscaler.py | 2 --
modules/xlmr.py | 2 +-
pyproject.toml | 11 +++++++----
scripts/custom_code.py | 2 +-
scripts/outpainting_mk_2.py | 4 ++--
scripts/poor_mans_outpainting.py | 4 ++--
scripts/prompt_matrix.py | 7 ++-----
scripts/prompts_from_file.py | 5 +----
scripts/sd_upscale.py | 4 ++--
scripts/xyz_grid.py | 6 ++----
webui.py | 2 +-
48 files changed, 42 insertions(+), 114 deletions(-)
(limited to 'modules/ui.py')
diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py
index ba1293df..0ab43229 100644
--- a/extensions-builtin/Lora/lora.py
+++ b/extensions-builtin/Lora/lora.py
@@ -1,4 +1,3 @@
-import glob
import os
import re
import torch
diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py
index c7fd5739..aa2fdb3a 100644
--- a/extensions-builtin/ScuNET/scripts/scunet_model.py
+++ b/extensions-builtin/ScuNET/scripts/scunet_model.py
@@ -13,7 +13,6 @@ import modules.upscaler
from modules import devices, modelloader
from scunet_model_arch import SCUNet as net
from modules.shared import opts
-from modules import images
class UpscalerScuNET(modules.upscaler.Upscaler):
diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py
index d77c3a92..55dd94ab 100644
--- a/extensions-builtin/SwinIR/scripts/swinir_model.py
+++ b/extensions-builtin/SwinIR/scripts/swinir_model.py
@@ -1,4 +1,3 @@
-import contextlib
import os
import numpy as np
@@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url
from tqdm import tqdm
from modules import modelloader, devices, script_callbacks, shared
-from modules.shared import cmd_opts, opts, state
+from modules.shared import opts, state
from swinir_model_arch import SwinIR as net
from swinir_model_arch_v2 import Swin2SR as net2
from modules.upscaler import Upscaler, UpscalerData
diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py
index f1a7cf09..00c407de 100644
--- a/modules/codeformer/codeformer_arch.py
+++ b/modules/codeformer/codeformer_arch.py
@@ -1,14 +1,12 @@
# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
import math
-import numpy as np
import torch
from torch import nn, Tensor
import torch.nn.functional as F
-from typing import Optional, List
+from typing import Optional
from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
-from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY
def calc_mean_std(feat, eps=1e-5):
diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py
index e7293683..820e6b12 100644
--- a/modules/codeformer/vqgan_arch.py
+++ b/modules/codeformer/vqgan_arch.py
@@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut
https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
'''
-import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
-import copy
from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index 8d84bbc9..8e56cb89 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -33,11 +33,9 @@ def setup_model(dirname):
try:
from torchvision.transforms.functional import normalize
from modules.codeformer.codeformer_arch import CodeFormer
- from basicsr.utils.download_util import load_file_from_url
- from basicsr.utils import imwrite, img2tensor, tensor2img
+ from basicsr.utils import img2tensor, tensor2img
from facelib.utils.face_restoration_helper import FaceRestoreHelper
from facelib.detection.retinaface import retinaface
- from modules.shared import cmd_opts
net_class = CodeFormer
diff --git a/modules/config_states.py b/modules/config_states.py
index 2ea00929..8f1ff428 100644
--- a/modules/config_states.py
+++ b/modules/config_states.py
@@ -14,7 +14,7 @@ from collections import OrderedDict
import git
from modules import shared, extensions
-from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir
+from modules.paths_internal import script_path, config_states_dir
all_config_states = OrderedDict()
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index f4369257..85aa6934 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -6,7 +6,7 @@ from PIL import Image
from basicsr.utils.download_util import load_file_from_url
import modules.esrgan_model_arch as arch
-from modules import shared, modelloader, images, devices
+from modules import modelloader, images, devices
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import opts
diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py
index 7f8bc7c0..4de9dd8d 100644
--- a/modules/esrgan_model_arch.py
+++ b/modules/esrgan_model_arch.py
@@ -2,7 +2,6 @@
from collections import OrderedDict
import math
-import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
diff --git a/modules/extensions.py b/modules/extensions.py
index 34d9d654..829f8cd9 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -3,7 +3,6 @@ import sys
import traceback
import time
-from datetime import datetime
import git
from modules import shared
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index fe8b18b2..f1c59c46 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -1,15 +1,11 @@
import base64
-import html
import io
-import math
import os
import re
-from pathlib import Path
import gradio as gr
from modules.paths import data_path
from modules import shared, ui_tempdir, script_callbacks
-import tempfile
from PIL import Image
re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 1fc49537..9fe749b7 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -1,4 +1,3 @@
-import csv
import datetime
import glob
import html
@@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
-from collections import defaultdict, deque
+from collections import deque
from statistics import stdev, mean
diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py
index 76599f5a..be168736 100644
--- a/modules/hypernetworks/ui.py
+++ b/modules/hypernetworks/ui.py
@@ -1,6 +1,4 @@
import html
-import os
-import re
import gradio as gr
import modules.hypernetworks.hypernetwork
diff --git a/modules/images.py b/modules/images.py
index 5eb6d855..7392cb8b 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -19,7 +19,7 @@ import json
import hashlib
from modules import sd_samplers, shared, script_callbacks, errors
-from modules.shared import opts, cmd_opts
+from modules.shared import opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
diff --git a/modules/img2img.py b/modules/img2img.py
index 32b1ecd6..d704bf90 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -1,12 +1,9 @@
-import math
import os
-import sys
-import traceback
import numpy as np
from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
-from modules import devices, sd_samplers
+from modules import sd_samplers
from modules.generation_parameters_copypaste import create_override_settings_dict
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state
diff --git a/modules/mac_specific.py b/modules/mac_specific.py
index 40ce2101..5c2f92a1 100644
--- a/modules/mac_specific.py
+++ b/modules/mac_specific.py
@@ -1,6 +1,5 @@
import torch
import platform
-from modules import paths
from modules.sd_hijack_utils import CondFunc
from packaging import version
diff --git a/modules/modelloader.py b/modules/modelloader.py
index cf685000..92ada694 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -1,4 +1,3 @@
-import glob
import os
import shutil
import importlib
diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py
index 11b330bc..a4c4ef4e 100644
--- a/modules/models/diffusion/uni_pc/uni_pc.py
+++ b/modules/models/diffusion/uni_pc/uni_pc.py
@@ -1,5 +1,4 @@
import torch
-import torch.nn.functional as F
import math
from tqdm.auto import trange
diff --git a/modules/processing.py b/modules/processing.py
index 6f5233c1..c3932d6b 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -2,7 +2,6 @@ import json
import math
import os
import sys
-import warnings
import hashlib
import torch
@@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps
import random
import cv2
from skimage import exposure
-from typing import Any, Dict, List, Optional
+from typing import Any, Dict, List
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index d8135211..81573b78 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -3,7 +3,7 @@ from torch.nn.functional import silu
from types import MethodType
import modules.textual_inversion.textual_inversion
-from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
+from modules import devices, sd_hijack_optimizations, shared
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr
diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py
index 55a2ce4d..344d75c8 100644
--- a/modules/sd_hijack_inpainting.py
+++ b/modules/sd_hijack_inpainting.py
@@ -1,15 +1,9 @@
-import os
import torch
-from einops import repeat
-from omegaconf import ListConfig
-
import ldm.models.diffusion.ddpm
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
-from ldm.models.diffusion.ddpm import LatentDiffusion
-from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.ddim import DDIMSampler, noise_like
from ldm.models.diffusion.sampling_util import norm_thresholding
diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py
index 41ed54a2..6fe6b6ff 100644
--- a/modules/sd_hijack_ip2p.py
+++ b/modules/sd_hijack_ip2p.py
@@ -1,8 +1,5 @@
-import collections
import os.path
-import sys
-import gc
-import time
+
def should_hijack_ip2p(checkpoint_info):
from modules import sd_models_config
diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py
index 4ac51c38..28528329 100644
--- a/modules/sd_hijack_xlmr.py
+++ b/modules/sd_hijack_xlmr.py
@@ -1,8 +1,6 @@
-import open_clip.tokenizer
import torch
from modules import sd_hijack_clip, devices
-from modules.shared import opts
class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords):
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 11c1a344..1c09c709 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -565,7 +565,7 @@ def reload_model_weights(sd_model=None, info=None):
def unload_model_weights(sd_model=None, info=None):
- from modules import lowvram, devices, sd_hijack
+ from modules import devices, sd_hijack
timer = Timer()
if model_data.sd_model:
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
index 7a79925a..9bfe1237 100644
--- a/modules/sd_models_config.py
+++ b/modules/sd_models_config.py
@@ -1,4 +1,3 @@
-import re
import os
import torch
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 0fc9f456..3b8e9622 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -1,7 +1,6 @@
from collections import deque
import torch
import inspect
-import einops
import k_diffusion.sampling
from modules import prompt_parser, devices, sd_samplers_common
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index 521e485a..b7176125 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -1,8 +1,5 @@
-import torch
-import safetensors.torch
import os
import collections
-from collections import namedtuple
from modules import paths, shared, devices, script_callbacks, sd_models
import glob
from copy import deepcopy
diff --git a/modules/shared.py b/modules/shared.py
index 4631965b..44cd2c0c 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -1,12 +1,9 @@
-import argparse
import datetime
import json
import os
import sys
import time
-import requests
-from PIL import Image
import gradio as gr
import tqdm
diff --git a/modules/styles.py b/modules/styles.py
index 11642075..c22769cf 100644
--- a/modules/styles.py
+++ b/modules/styles.py
@@ -1,18 +1,9 @@
-# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime
-from __future__ import annotations
-
import csv
import os
import os.path
import typing
-import collections.abc as abc
-import tempfile
import shutil
-if typing.TYPE_CHECKING:
- # Only import this when code is being type-checked, it doesn't have any effect at runtime
- from .processing import StableDiffusionProcessing
-
class PromptStyle(typing.NamedTuple):
name: str
diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py
index d7d8d2e3..7770d22f 100644
--- a/modules/textual_inversion/autocrop.py
+++ b/modules/textual_inversion/autocrop.py
@@ -1,10 +1,8 @@
import cv2
import requests
import os
-from collections import defaultdict
-from math import log, sqrt
import numpy as np
-from PIL import Image, ImageDraw
+from PIL import ImageDraw
GREEN = "#0F0"
BLUE = "#00F"
diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py
index 5593f88c..ee0e850a 100644
--- a/modules/textual_inversion/image_embedding.py
+++ b/modules/textual_inversion/image_embedding.py
@@ -2,7 +2,7 @@ import base64
import json
import numpy as np
import zlib
-from PIL import Image, PngImagePlugin, ImageDraw, ImageFont
+from PIL import Image, ImageDraw, ImageFont
from fonts.ttf import Roboto
import torch
from modules.shared import opts
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index da0bcb26..d0cad09e 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -1,13 +1,9 @@
import os
from PIL import Image, ImageOps
import math
-import platform
-import sys
import tqdm
-import time
from modules import paths, shared, images, deepbooru
-from modules.shared import opts, cmd_opts
from modules.textual_inversion import autocrop
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index f753b75f..9ed9ba45 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -1,7 +1,6 @@
import os
import sys
import traceback
-import inspect
from collections import namedtuple
import torch
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 16841d0f..f022381c 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -1,18 +1,15 @@
import modules.scripts
-from modules import sd_samplers
+from modules import sd_samplers, processing
from modules.generation_parameters_copypaste import create_override_settings_dict
-from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \
- StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, cmd_opts
import modules.shared as shared
-import modules.processing as processing
from modules.ui import plaintext_to_html
def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
- p = StableDiffusionProcessingTxt2Img(
+ p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids,
@@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
processed = modules.scripts.scripts_txt2img.run(p, *args)
if processed is None:
- processed = process_images(p)
+ processed = processing.process_images(p)
p.close()
diff --git a/modules/ui.py b/modules/ui.py
index 6beda76f..f7e57593 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -14,10 +14,10 @@ from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress
-from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML
+from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path
-from modules.shared import opts, cmd_opts, restricted_opts
+from modules.shared import opts, cmd_opts
import modules.codeformer_model
import modules.generation_parameters_copypaste as parameters_copypaste
@@ -28,7 +28,6 @@ import modules.shared as shared
import modules.styles
import modules.textual_inversion.ui
from modules import prompt_parser
-from modules.images import save_image
from modules.sd_hijack import model_hijack
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.textual_inversion import textual_inversion
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index 49e06289..800e467a 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -1,4 +1,3 @@
-import glob
import os.path
import urllib.parse
from pathlib import Path
diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py
index f25639e5..c7dc1154 100644
--- a/modules/ui_postprocessing.py
+++ b/modules/ui_postprocessing.py
@@ -1,5 +1,5 @@
import gradio as gr
-from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue
+from modules import scripts, shared, ui_common, postprocessing, call_queue
import modules.generation_parameters_copypaste as parameters_copypaste
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 0ad4fe99..777593b0 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -2,8 +2,6 @@ import os
from abc import abstractmethod
import PIL
-import numpy as np
-import torch
from PIL import Image
import modules.shared
diff --git a/modules/xlmr.py b/modules/xlmr.py
index beab3fdf..e056c3f6 100644
--- a/modules/xlmr.py
+++ b/modules/xlmr.py
@@ -1,4 +1,4 @@
-from transformers import BertPreTrainedModel,BertModel,BertConfig
+from transformers import BertPreTrainedModel, BertConfig
import torch.nn as nn
import torch
from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig
diff --git a/pyproject.toml b/pyproject.toml
index 1e164abc..9caa9ba2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,10 +1,13 @@
[tool.ruff]
+exclude = ["extensions"]
+
ignore = [
"E501",
- "E731",
- "E402", # Module level import not at top of file
- "F401" # Module imported but unused
+
+ "F401", # Module imported but unused
]
-exclude = ["extensions"]
+
+[tool.ruff.per-file-ignores]
+"webui.py" = ["E402"] # Module level import not at top of file
\ No newline at end of file
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index f36a3675..cc6f0d49 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -4,7 +4,7 @@ import ast
import copy
from modules.processing import Processed
-from modules.shared import opts, cmd_opts, state
+from modules.shared import cmd_opts
def convertExpr2Expression(expr):
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index b10fed6c..665dbe89 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -7,9 +7,9 @@ import modules.scripts as scripts
import gradio as gr
from PIL import Image, ImageDraw
-from modules import images, processing, devices
+from modules import images
from modules.processing import Processed, process_images
-from modules.shared import opts, cmd_opts, state
+from modules.shared import opts, state
# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index ddcbd2d3..c0bbecc1 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -4,9 +4,9 @@ import modules.scripts as scripts
import gradio as gr
from PIL import Image, ImageDraw
-from modules import images, processing, devices
+from modules import images, devices
from modules.processing import Processed, process_images
-from modules.shared import opts, cmd_opts, state
+from modules.shared import opts, state
class Script(scripts.Script):
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index e9b11517..fb06beab 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -1,14 +1,11 @@
import math
-from collections import namedtuple
-from copy import copy
-import random
import modules.scripts as scripts
import gradio as gr
from modules import images
-from modules.processing import process_images, Processed
-from modules.shared import opts, cmd_opts, state
+from modules.processing import process_images
+from modules.shared import opts, state
import modules.sd_samplers
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 76dc5778..149bc85f 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -1,6 +1,4 @@
import copy
-import math
-import os
import random
import sys
import traceback
@@ -11,8 +9,7 @@ import gradio as gr
from modules import sd_samplers
from modules.processing import Processed, process_images
-from PIL import Image
-from modules.shared import opts, cmd_opts, state
+from modules.shared import state
def process_string_tag(tag):
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 332d76d9..d873a09c 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -4,9 +4,9 @@ import modules.scripts as scripts
import gradio as gr
from PIL import Image
-from modules import processing, shared, sd_samplers, images, devices
+from modules import processing, shared, images, devices
from modules.processing import Processed
-from modules.shared import opts, cmd_opts, state
+from modules.shared import opts, state
class Script(scripts.Script):
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 2ff42ef8..332e0ecd 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -10,15 +10,13 @@ import numpy as np
import modules.scripts as scripts
import gradio as gr
-from modules import images, paths, sd_samplers, processing, sd_models, sd_vae
+from modules import images, sd_samplers, processing, sd_models, sd_vae
from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
-from modules.shared import opts, cmd_opts, state
+from modules.shared import opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
import modules.sd_vae
-import glob
-import os
import re
from modules.ui_components import ToolButton
diff --git a/webui.py b/webui.py
index ec3d2aba..48277075 100644
--- a/webui.py
+++ b/webui.py
@@ -43,7 +43,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__:
torch.__long_version__ = torch.__version__
torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
-from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
+from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
import modules.codeformer_model as codeformer
import modules.face_restoration
import modules.gfpgan_model as gfpgan
--
cgit v1.2.3
From 4b854806d98cf5ccd48e5cd99c172613da7937f0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 10 May 2023 09:02:23 +0300
Subject: F401 fixes for ruff
---
extensions-builtin/LDSR/scripts/ldsr_model.py | 4 ++--
modules/cmd_args.py | 2 +-
modules/deepbooru.py | 1 -
modules/extensions.py | 2 +-
modules/gfpgan_model.py | 2 +-
modules/models/diffusion/uni_pc/__init__.py | 2 +-
modules/paths.py | 4 ++--
modules/realesrgan_model.py | 6 +++---
modules/script_loading.py | 1 -
modules/sd_hijack_inpainting.py | 2 +-
modules/sd_models.py | 4 +---
modules/sd_samplers.py | 2 +-
modules/shared.py | 2 +-
modules/ui.py | 4 ++--
modules/upscaler.py | 2 +-
pyproject.toml | 9 +++++----
webui.py | 8 ++++----
17 files changed, 27 insertions(+), 30 deletions(-)
(limited to 'modules/ui.py')
diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py
index e8dc083c..fbbe9005 100644
--- a/extensions-builtin/LDSR/scripts/ldsr_model.py
+++ b/extensions-builtin/LDSR/scripts/ldsr_model.py
@@ -7,8 +7,8 @@ from basicsr.utils.download_util import load_file_from_url
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
from modules import shared, script_callbacks
-import sd_hijack_autoencoder
-import sd_hijack_ddpm_v1
+import sd_hijack_autoencoder # noqa: F401
+import sd_hijack_ddpm_v1 # noqa: F401
class UpscalerLDSR(Upscaler):
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index d906a571..e01ca655 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -1,6 +1,6 @@
import argparse
import os
-from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file
+from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401
parser = argparse.ArgumentParser()
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 122fce7f..1c4554a2 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -2,7 +2,6 @@ import os
import re
import torch
-from PIL import Image
import numpy as np
from modules import modelloader, paths, deepbooru_model, devices, images, shared
diff --git a/modules/extensions.py b/modules/extensions.py
index 829f8cd9..bc2c0450 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -6,7 +6,7 @@ import time
import git
from modules import shared
-from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path
+from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
extensions = []
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index fbe6215a..0131dea4 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -78,7 +78,7 @@ def setup_model(dirname):
try:
from gfpgan import GFPGANer
- from facexlib import detection, parsing
+ from facexlib import detection, parsing # noqa: F401
global user_path
global have_gfpgan
global gfpgan_constructor
diff --git a/modules/models/diffusion/uni_pc/__init__.py b/modules/models/diffusion/uni_pc/__init__.py
index e1265e3f..dbb35964 100644
--- a/modules/models/diffusion/uni_pc/__init__.py
+++ b/modules/models/diffusion/uni_pc/__init__.py
@@ -1 +1 @@
-from .sampler import UniPCSampler
+from .sampler import UniPCSampler # noqa: F401
diff --git a/modules/paths.py b/modules/paths.py
index acf1894b..5f6474c0 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -1,8 +1,8 @@
import os
import sys
-from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir
+from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401
-import modules.safe
+import modules.safe # noqa: F401
# data_path = cmd_opts_pre.data
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index 9ec1adf2..c24d8dbb 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -17,9 +17,9 @@ class UpscalerRealESRGAN(Upscaler):
self.user_path = path
super().__init__()
try:
- from basicsr.archs.rrdbnet_arch import RRDBNet
- from realesrgan import RealESRGANer
- from realesrgan.archs.srvgg_arch import SRVGGNetCompact
+ from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401
+ from realesrgan import RealESRGANer # noqa: F401
+ from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401
self.enable = True
self.scalers = []
scalers = self.load_models(path)
diff --git a/modules/script_loading.py b/modules/script_loading.py
index a7d2203f..57b15862 100644
--- a/modules/script_loading.py
+++ b/modules/script_loading.py
@@ -2,7 +2,6 @@ import os
import sys
import traceback
import importlib.util
-from types import ModuleType
def load_module(path):
diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py
index 344d75c8..058575b7 100644
--- a/modules/sd_hijack_inpainting.py
+++ b/modules/sd_hijack_inpainting.py
@@ -4,7 +4,7 @@ import ldm.models.diffusion.ddpm
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
-from ldm.models.diffusion.ddim import DDIMSampler, noise_like
+from ldm.models.diffusion.ddim import noise_like
from ldm.models.diffusion.sampling_util import norm_thresholding
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 1c09c709..d1e946a5 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -15,7 +15,6 @@ import ldm.modules.midas as midas
from ldm.util import instantiate_from_config
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config
-from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack
from modules.timer import Timer
@@ -87,8 +86,7 @@ class CheckpointInfo:
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
-
- from transformers import logging, CLIPModel
+ from transformers import logging, CLIPModel # noqa: F401
logging.set_verbosity_error()
except Exception:
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index ff361f22..4f1bf21d 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -1,7 +1,7 @@
from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared
# imports for functions that previously were here and are used by other modules
-from modules.sd_samplers_common import samples_to_image_grid, sample_to_image
+from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401
all_samplers = [
*sd_samplers_kdiffusion.samplers_data_k_diffusion,
diff --git a/modules/shared.py b/modules/shared.py
index 44cd2c0c..7d70f041 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -12,7 +12,7 @@ import modules.memmon
import modules.styles
import modules.devices as devices
from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args
-from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir
+from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
from ldm.models.diffusion.ddpm import LatentDiffusion
demo = None
diff --git a/modules/ui.py b/modules/ui.py
index f7e57593..782b569d 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -10,10 +10,10 @@ import gradio as gr
import gradio.routes
import gradio.utils
import numpy as np
-from PIL import Image, PngImagePlugin
+from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress
+from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 777593b0..e145be30 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -41,7 +41,7 @@ class Upscaler:
os.makedirs(self.model_path, exist_ok=True)
try:
- import cv2
+ import cv2 # noqa: F401
self.can_tile = True
except Exception:
pass
diff --git a/pyproject.toml b/pyproject.toml
index 9caa9ba2..0883c127 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,13 +1,14 @@
[tool.ruff]
+target-version = "py310"
+
exclude = ["extensions"]
ignore = [
- "E501",
-
- "F401", # Module imported but unused
+ "E501", # Line too long
+ "E731", # Do not assign a `lambda` expression, use a `def`
]
[tool.ruff.per-file-ignores]
-"webui.py" = ["E402"] # Module level import not at top of file
\ No newline at end of file
+"webui.py" = ["E402"] # Module level import not at top of file
diff --git a/webui.py b/webui.py
index 48277075..5d5e80b5 100644
--- a/webui.py
+++ b/webui.py
@@ -16,12 +16,12 @@ from packaging import version
import logging
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
-from modules import paths, timer, import_hook, errors
+from modules import paths, timer, import_hook, errors # noqa: F401
startup_timer = timer.Timer()
import torch
-import pytorch_lightning # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
+import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
@@ -31,12 +31,12 @@ startup_timer.record("import torch")
import gradio
startup_timer.record("import gradio")
-import ldm.modules.encoders.modules
+import ldm.modules.encoders.modules # noqa: F401
startup_timer.record("import ldm")
from modules import extra_networks, ui_extra_networks_checkpoints
from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion
-from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call
+from modules.call_queue import wrap_queued_call, queue_lock
# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
if ".dev" in torch.__version__ or "+git" in torch.__version__:
--
cgit v1.2.3
From 028d3f6425d85f122027c127fba8bcbf4f66ee75 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 10 May 2023 11:05:02 +0300
Subject: ruff auto fixes
---
extensions-builtin/LDSR/sd_hijack_autoencoder.py | 4 ++--
extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 12 ++++++------
extensions-builtin/Lora/lora.py | 12 ++++++------
extensions-builtin/Lora/scripts/lora_script.py | 2 +-
modules/config_states.py | 2 +-
modules/deepbooru.py | 2 +-
modules/devices.py | 2 +-
modules/hypernetworks/hypernetwork.py | 2 +-
modules/hypernetworks/ui.py | 4 ++--
modules/interrogate.py | 2 +-
modules/modelloader.py | 2 +-
modules/models/diffusion/ddpm_edit.py | 4 ++--
modules/scripts_auto_postprocessing.py | 2 +-
modules/sd_hijack.py | 2 +-
modules/sd_hijack_optimizations.py | 14 +++++++-------
modules/sd_samplers_compvis.py | 2 +-
modules/sd_samplers_kdiffusion.py | 2 +-
modules/shared.py | 6 +++---
modules/textual_inversion/textual_inversion.py | 2 +-
modules/ui.py | 8 ++++----
modules/ui_extra_networks.py | 4 ++--
modules/ui_tempdir.py | 2 +-
22 files changed, 47 insertions(+), 47 deletions(-)
(limited to 'modules/ui.py')
diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
index 6303fed5..f457ca93 100644
--- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py
+++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
@@ -288,5 +288,5 @@ class VQModelInterface(VQModel):
dec = self.decoder(quant)
return dec
-setattr(ldm.models.autoencoder, "VQModel", VQModel)
-setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface)
+ldm.models.autoencoder.VQModel = VQModel
+ldm.models.autoencoder.VQModelInterface = VQModelInterface
diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
index 4d3f6c56..d8fc30e3 100644
--- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
+++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
@@ -1116,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1):
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+ [x[:batch_size] for x in cond[key]] for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
@@ -1215,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1):
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+ [x[:batch_size] for x in cond[key]] for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond,
@@ -1437,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
logs['bbox_image'] = cond_img
return logs
-setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1)
-setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1)
-setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1)
-setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1)
+ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
+ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
+ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
+ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py
index 0ab43229..9795540f 100644
--- a/extensions-builtin/Lora/lora.py
+++ b/extensions-builtin/Lora/lora.py
@@ -172,7 +172,7 @@ def load_lora(name, filename):
else:
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
continue
- assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'
+ raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}")
with torch.no_grad():
module.weight.copy_(weight)
@@ -184,7 +184,7 @@ def load_lora(name, filename):
elif lora_key == "lora_down.weight":
lora_module.down = module
else:
- assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha'
+ raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")
if len(keys_failed_to_match) > 0:
print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}")
@@ -202,7 +202,7 @@ def load_loras(names, multipliers=None):
loaded_loras.clear()
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
- if any([x is None for x in loras_on_disk]):
+ if any(x is None for x in loras_on_disk):
list_available_loras()
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
@@ -309,7 +309,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu
print(f'failed to calculate lora weights for layer {lora_layer_name}')
- setattr(self, "lora_current_names", wanted_names)
+ self.lora_current_names = wanted_names
def lora_forward(module, input, original_forward):
@@ -343,8 +343,8 @@ def lora_forward(module, input, original_forward):
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
- setattr(self, "lora_current_names", ())
- setattr(self, "lora_weights_backup", None)
+ self.lora_current_names = ()
+ self.lora_weights_backup = None
def lora_Linear_forward(self, input):
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index 7db971fd..b70e2de7 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted)
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
- "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras),
+ "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras),
}))
diff --git a/modules/config_states.py b/modules/config_states.py
index 8f1ff428..75da862a 100644
--- a/modules/config_states.py
+++ b/modules/config_states.py
@@ -35,7 +35,7 @@ def list_config_states():
j["filepath"] = path
config_states.append(j)
- config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True))
+ config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)
for cs in config_states:
timestamp = time.asctime(time.gmtime(cs["created_at"]))
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 1c4554a2..547e1b4c 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -78,7 +78,7 @@ class DeepDanbooru:
res = []
- filtertags = set([x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")])
+ filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")}
for tag in [x for x in tags if x not in filtertags]:
probability = probability_dict[tag]
diff --git a/modules/devices.py b/modules/devices.py
index c705a3cb..d8a34a0f 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -65,7 +65,7 @@ def enable_tf32():
# enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
- if any([torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())]):
+ if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 9fe749b7..6ef0bfdf 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -403,7 +403,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None):
k = self.to_k(context_k)
v = self.to_v(context_v)
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+ q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py
index be168736..e3f9eb13 100644
--- a/modules/hypernetworks/ui.py
+++ b/modules/hypernetworks/ui.py
@@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork
from modules import devices, sd_hijack, shared
not_available = ["hardswish", "multiheadattention"]
-keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
+keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available]
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
- return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", ""
+ return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", ""
def train_hypernetwork(*args):
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 22df9216..a1c8e537 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -159,7 +159,7 @@ class InterrogateModels:
text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
top_count = min(top_count, len(text_array))
- text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate)
+ text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate)
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
text_features /= text_features.norm(dim=-1, keepdim=True)
diff --git a/modules/modelloader.py b/modules/modelloader.py
index 92ada694..25612bf8 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -39,7 +39,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
if os.path.islink(full_path) and not os.path.exists(full_path):
print(f"Skipping broken symlink: {full_path}")
continue
- if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]):
+ if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist):
continue
if full_path not in output:
output.append(full_path)
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
index 611c2b69..09432117 100644
--- a/modules/models/diffusion/ddpm_edit.py
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -1130,7 +1130,7 @@ class LatentDiffusion(DDPM):
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+ [x[:batch_size] for x in cond[key]] for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
@@ -1229,7 +1229,7 @@ class LatentDiffusion(DDPM):
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+ [x[:batch_size] for x in cond[key]] for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond,
diff --git a/modules/scripts_auto_postprocessing.py b/modules/scripts_auto_postprocessing.py
index 30d6d658..d63078de 100644
--- a/modules/scripts_auto_postprocessing.py
+++ b/modules/scripts_auto_postprocessing.py
@@ -17,7 +17,7 @@ class ScriptPostprocessingForMainUI(scripts.Script):
return self.postprocessing_controls.values()
def postprocess_image(self, p, script_pp, *args):
- args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)}
+ args_dict = dict(zip(self.postprocessing_controls, args))
pp = scripts_postprocessing.PostprocessedImage(script_pp.image)
pp.info = {}
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 81573b78..e374aeb8 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -37,7 +37,7 @@ def apply_optimizations():
optimization_method = None
- can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp
+ can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.")
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index b623d53d..a174bbe1 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -49,7 +49,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
v_in = self.to_v(context_v)
del context, context_k, context_v, x
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
+ q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in))
del q_in, k_in, v_in
dtype = q.dtype
@@ -98,7 +98,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
del context, x
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
+ q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in))
del q_in, k_in, v_in
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
@@ -229,7 +229,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
with devices.without_autocast(disable=not shared.opts.upcast_attn):
k = k * self.scale
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+ q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
r = einsum_op(q, k, v)
r = r.to(dtype)
return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h))
@@ -334,7 +334,7 @@ def xformers_attention_forward(self, x, context=None, mask=None):
k_in = self.to_k(context_k)
v_in = self.to_v(context_v)
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))
+ q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in))
del q_in, k_in, v_in
dtype = q.dtype
@@ -460,7 +460,7 @@ def xformers_attnblock_forward(self, x):
k = self.k(h_)
v = self.v(h_)
b, c, h, w = q.shape
- q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
+ q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
dtype = q.dtype
if shared.opts.upcast_attn:
q, k = q.float(), k.float()
@@ -482,7 +482,7 @@ def sdp_attnblock_forward(self, x):
k = self.k(h_)
v = self.v(h_)
b, c, h, w = q.shape
- q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
+ q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
dtype = q.dtype
if shared.opts.upcast_attn:
q, k = q.float(), k.float()
@@ -506,7 +506,7 @@ def sub_quad_attnblock_forward(self, x):
k = self.k(h_)
v = self.v(h_)
b, c, h, w = q.shape
- q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
+ q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
q = q.contiguous()
k = k.contiguous()
v = v.contiguous()
diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py
index bfcc5574..7427648f 100644
--- a/modules/sd_samplers_compvis.py
+++ b/modules/sd_samplers_compvis.py
@@ -83,7 +83,7 @@ class VanillaStableDiffusionSampler:
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
- assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
+ assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently;
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 3b8e9622..2f733cf5 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -86,7 +86,7 @@ class CFGDenoiser(torch.nn.Module):
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
- assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
+ assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)]
diff --git a/modules/shared.py b/modules/shared.py
index 7d70f041..e2691585 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"),
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"),
- "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
+ "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks),
}))
options_templates.update(options_section(('ui', "User interface"), {
@@ -403,7 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
"quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}),
- "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}),
+ "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}),
"ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"),
"localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
@@ -583,7 +583,7 @@ class Options:
if item.section not in section_ids:
section_ids[item.section] = len(section_ids)
- self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
+ self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
def cast_value(self, key, value):
"""casts an arbitrary to the same type as this setting's value with key
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 9ed9ba45..c37bb2ad 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -167,7 +167,7 @@ class EmbeddingDatabase:
if 'string_to_param' in data:
param_dict = data['string_to_param']
if hasattr(param_dict, '_parameters'):
- param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
+ param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
# diffuser concepts
diff --git a/modules/ui.py b/modules/ui.py
index 782b569d..84d661b2 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1222,7 +1222,7 @@ def create_ui():
)
def get_textual_inversion_template_names():
- return sorted([x for x in textual_inversion.textual_inversion_templates])
+ return sorted(textual_inversion.textual_inversion_templates)
with gr.Tab(label="Train", id="train"):
gr.HTML(value="Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]
")
@@ -1230,8 +1230,8 @@ def create_ui():
train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
- train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()])
- create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name")
+ train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys()))
+ create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name")
with FormRow():
embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
@@ -1808,7 +1808,7 @@ def create_ui():
if type(x) == gr.Dropdown:
def check_dropdown(val):
if getattr(x, 'multiselect', False):
- return all([value in x.choices for value in val])
+ return all(value in x.choices for value in val)
else:
return val in x.choices
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index 800e467a..ab585917 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -26,7 +26,7 @@ def register_page(page):
def fetch_file(filename: str = ""):
from starlette.responses import FileResponse
- if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]):
+ if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs):
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
ext = os.path.splitext(filename)[1].lower()
@@ -326,7 +326,7 @@ def setup_ui(ui, gallery):
is_allowed = False
for extra_page in ui.stored_extra_pages:
- if any([path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()]):
+ if any(path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()):
is_allowed = True
break
diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py
index 46fa9cb0..cac73c51 100644
--- a/modules/ui_tempdir.py
+++ b/modules/ui_tempdir.py
@@ -23,7 +23,7 @@ def register_tmp_file(gradio, filename):
def check_tmp_file(gradio, filename):
if hasattr(gradio, 'temp_file_sets'):
- return any([filename in fileset for fileset in gradio.temp_file_sets])
+ return any(filename in fileset for fileset in gradio.temp_file_sets)
if hasattr(gradio, 'temp_dirs'):
return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs)
--
cgit v1.2.3
From a5121e7a0623db328a9462d340d389ed6737374a Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 10 May 2023 11:37:18 +0300
Subject: fixes for B007
---
extensions-builtin/LDSR/ldsr_model_arch.py | 2 +-
extensions-builtin/Lora/lora.py | 2 +-
extensions-builtin/ScuNET/scripts/scunet_model.py | 2 +-
extensions-builtin/SwinIR/swinir_model_arch.py | 2 +-
extensions-builtin/SwinIR/swinir_model_arch_v2.py | 2 +-
modules/codeformer_model.py | 2 +-
modules/esrgan_model.py | 8 ++------
modules/extra_networks.py | 2 +-
modules/generation_parameters_copypaste.py | 2 +-
modules/hypernetworks/hypernetwork.py | 12 ++++++------
modules/images.py | 2 +-
modules/interrogate.py | 4 ++--
modules/prompt_parser.py | 14 +++++++-------
modules/safe.py | 4 ++--
modules/scripts.py | 10 +++++-----
modules/scripts_postprocessing.py | 8 ++++----
modules/sd_hijack_clip.py | 2 +-
modules/shared.py | 6 +++---
modules/textual_inversion/learn_schedule.py | 2 +-
modules/textual_inversion/textual_inversion.py | 10 +++++-----
modules/ui.py | 6 +++---
modules/ui_extra_networks.py | 2 +-
modules/ui_tempdir.py | 2 +-
modules/upscaler.py | 2 +-
pyproject.toml | 1 -
scripts/prompts_from_file.py | 2 +-
scripts/sd_upscale.py | 4 ++--
scripts/xyz_grid.py | 2 +-
28 files changed, 57 insertions(+), 62 deletions(-)
(limited to 'modules/ui.py')
diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py
index a5fb8907..27e38549 100644
--- a/extensions-builtin/LDSR/ldsr_model_arch.py
+++ b/extensions-builtin/LDSR/ldsr_model_arch.py
@@ -88,7 +88,7 @@ class LDSR:
x_t = None
logs = None
- for n in range(n_runs):
+ for _ in range(n_runs):
if custom_shape is not None:
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py
index 9795540f..7b56136f 100644
--- a/extensions-builtin/Lora/lora.py
+++ b/extensions-builtin/Lora/lora.py
@@ -418,7 +418,7 @@ def infotext_pasted(infotext, params):
added = []
- for k, v in params.items():
+ for k in params:
if not k.startswith("AddNet Model "):
continue
diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py
index aa2fdb3a..1f5ea0d3 100644
--- a/extensions-builtin/ScuNET/scripts/scunet_model.py
+++ b/extensions-builtin/ScuNET/scripts/scunet_model.py
@@ -132,7 +132,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
model.load_state_dict(torch.load(filename), strict=True)
model.eval()
- for k, v in model.named_parameters():
+ for _, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py
index 75f7bedc..de195d9b 100644
--- a/extensions-builtin/SwinIR/swinir_model_arch.py
+++ b/extensions-builtin/SwinIR/swinir_model_arch.py
@@ -848,7 +848,7 @@ class SwinIR(nn.Module):
H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops()
- for i, layer in enumerate(self.layers):
+ for layer in self.layers:
flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops()
diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
index d4c0b0da..15777af9 100644
--- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py
+++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
@@ -1001,7 +1001,7 @@ class Swin2SR(nn.Module):
H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops()
- for i, layer in enumerate(self.layers):
+ for layer in self.layers:
flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops()
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index 8e56cb89..ececdbae 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -94,7 +94,7 @@ def setup_model(dirname):
self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
self.face_helper.align_warp_face()
- for idx, cropped_face in enumerate(self.face_helper.cropped_faces):
+ for cropped_face in self.face_helper.cropped_faces:
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer)
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 85aa6934..a009eb42 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -16,9 +16,7 @@ def mod2normal(state_dict):
# this code is copied from https://github.com/victorca25/iNNfer
if 'conv_first.weight' in state_dict:
crt_net = {}
- items = []
- for k, v in state_dict.items():
- items.append(k)
+ items = list(state_dict)
crt_net['model.0.weight'] = state_dict['conv_first.weight']
crt_net['model.0.bias'] = state_dict['conv_first.bias']
@@ -52,9 +50,7 @@ def resrgan2normal(state_dict, nb=23):
if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
re8x = 0
crt_net = {}
- items = []
- for k, v in state_dict.items():
- items.append(k)
+ items = list(state_dict)
crt_net['model.0.weight'] = state_dict['conv_first.weight']
crt_net['model.0.bias'] = state_dict['conv_first.bias']
diff --git a/modules/extra_networks.py b/modules/extra_networks.py
index 1978673d..f9db41bc 100644
--- a/modules/extra_networks.py
+++ b/modules/extra_networks.py
@@ -91,7 +91,7 @@ def deactivate(p, extra_network_data):
"""call deactivate for extra networks in extra_network_data in specified order, then call
deactivate for all remaining registered networks"""
- for extra_network_name, extra_network_args in extra_network_data.items():
+ for extra_network_name in extra_network_data:
extra_network = extra_network_registry.get(extra_network_name, None)
if extra_network is None:
continue
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 7fbbe707..b0e945a1 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -247,7 +247,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
lines.append(lastline)
lastline = ''
- for i, line in enumerate(lines):
+ for line in lines:
line = line.strip()
if line.startswith("Negative prompt:"):
done_with_prompt = True
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 6ef0bfdf..38ef074f 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -177,34 +177,34 @@ class Hypernetwork:
def weights(self):
res = []
- for k, layers in self.layers.items():
+ for layers in self.layers.values():
for layer in layers:
res += layer.parameters()
return res
def train(self, mode=True):
- for k, layers in self.layers.items():
+ for layers in self.layers.values():
for layer in layers:
layer.train(mode=mode)
for param in layer.parameters():
param.requires_grad = mode
def to(self, device):
- for k, layers in self.layers.items():
+ for layers in self.layers.values():
for layer in layers:
layer.to(device)
return self
def set_multiplier(self, multiplier):
- for k, layers in self.layers.items():
+ for layers in self.layers.values():
for layer in layers:
layer.multiplier = multiplier
return self
def eval(self):
- for k, layers in self.layers.items():
+ for layers in self.layers.values():
for layer in layers:
layer.eval()
for param in layer.parameters():
@@ -619,7 +619,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
try:
sd_hijack_checkpoint.add()
- for i in range((steps-initial_step) * gradient_step):
+ for _ in range((steps-initial_step) * gradient_step):
if scheduler.finished:
break
if shared.state.interrupted:
diff --git a/modules/images.py b/modules/images.py
index 7392cb8b..c4e98c75 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -149,7 +149,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
return ImageFont.truetype(Roboto, fontsize)
def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
- for i, line in enumerate(lines):
+ for line in lines:
fnt = initial_fnt
fontsize = initial_fontsize
while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:
diff --git a/modules/interrogate.py b/modules/interrogate.py
index a1c8e537..111b1322 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -207,8 +207,8 @@ class InterrogateModels:
image_features /= image_features.norm(dim=-1, keepdim=True)
- for name, topn, items in self.categories():
- matches = self.rank(image_features, items, top_count=topn)
+ for cat in self.categories():
+ matches = self.rank(image_features, cat.items, top_count=cat.topn)
for match, score in matches:
if shared.opts.interrogate_return_ranks:
res += f", ({match}:{score/100:.3f})"
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 3a720721..b4aff704 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -143,7 +143,7 @@ def get_learned_conditioning(model, prompts, steps):
conds = model.get_learned_conditioning(texts)
cond_schedule = []
- for i, (end_at_step, text) in enumerate(prompt_schedule):
+ for i, (end_at_step, _) in enumerate(prompt_schedule):
cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i]))
cache[prompt] = cond_schedule
@@ -219,8 +219,8 @@ def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_s
res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
for i, cond_schedule in enumerate(c):
target_index = 0
- for current, (end_at, cond) in enumerate(cond_schedule):
- if current_step <= end_at:
+ for current, entry in enumerate(cond_schedule):
+ if current_step <= entry.end_at_step:
target_index = current
break
res[i] = cond_schedule[target_index].cond
@@ -234,13 +234,13 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
tensors = []
conds_list = []
- for batch_no, composable_prompts in enumerate(c.batch):
+ for composable_prompts in c.batch:
conds_for_batch = []
- for cond_index, composable_prompt in enumerate(composable_prompts):
+ for composable_prompt in composable_prompts:
target_index = 0
- for current, (end_at, cond) in enumerate(composable_prompt.schedules):
- if current_step <= end_at:
+ for current, entry in enumerate(composable_prompt.schedules):
+ if current_step <= entry.end_at_step:
target_index = current
break
diff --git a/modules/safe.py b/modules/safe.py
index 2d5b972f..1e791c5b 100644
--- a/modules/safe.py
+++ b/modules/safe.py
@@ -95,11 +95,11 @@ def check_pt(filename, extra_handler):
except zipfile.BadZipfile:
- # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle
+ # if it's not a zip file, it's an old pytorch format, with five objects written to pickle
with open(filename, "rb") as file:
unpickler = RestrictedUnpickler(file)
unpickler.extra_handler = extra_handler
- for i in range(5):
+ for _ in range(5):
unpickler.load()
diff --git a/modules/scripts.py b/modules/scripts.py
index d945b89f..0c12ebd5 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -231,7 +231,7 @@ def load_scripts():
syspath = sys.path
def register_scripts_from_module(module):
- for key, script_class in module.__dict__.items():
+ for script_class in module.__dict__.values():
if type(script_class) != type:
continue
@@ -295,9 +295,9 @@ class ScriptRunner:
auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
- for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data:
- script = script_class()
- script.filename = path
+ for script_data in auto_processing_scripts + scripts_data:
+ script = script_data.script_class()
+ script.filename = script_data.path
script.is_txt2img = not is_img2img
script.is_img2img = is_img2img
@@ -492,7 +492,7 @@ class ScriptRunner:
module = script_loading.load_module(script.filename)
cache[filename] = module
- for key, script_class in module.__dict__.items():
+ for script_class in module.__dict__.values():
if type(script_class) == type and issubclass(script_class, Script):
self.scripts[si] = script_class()
self.scripts[si].filename = filename
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py
index b11568c0..6751406c 100644
--- a/modules/scripts_postprocessing.py
+++ b/modules/scripts_postprocessing.py
@@ -66,9 +66,9 @@ class ScriptPostprocessingRunner:
def initialize_scripts(self, scripts_data):
self.scripts = []
- for script_class, path, basedir, script_module in scripts_data:
- script: ScriptPostprocessing = script_class()
- script.filename = path
+ for script_data in scripts_data:
+ script: ScriptPostprocessing = script_data.script_class()
+ script.filename = script_data.path
if script.name == "Simple Upscale":
continue
@@ -124,7 +124,7 @@ class ScriptPostprocessingRunner:
script_args = args[script.args_from:script.args_to]
process_args = {}
- for (name, component), value in zip(script.controls.items(), script_args):
+ for (name, component), value in zip(script.controls.items(), script_args): # noqa B007
process_args[name] = value
script.process(pp, **process_args)
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index 9fa5c5c5..c0c350f6 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
self.hijack.fixes = [x.fixes for x in batch_chunk]
for fixes in self.hijack.fixes:
- for position, embedding in fixes:
+ for position, embedding in fixes: # noqa: B007
used_embeddings[embedding.name] = embedding
z = self.process_tokens(tokens, multipliers)
diff --git a/modules/shared.py b/modules/shared.py
index e2691585..913c9e63 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -211,7 +211,7 @@ class OptionInfo:
def options_section(section_identifier, options_dict):
- for k, v in options_dict.items():
+ for v in options_dict.values():
v.section = section_identifier
return options_dict
@@ -579,7 +579,7 @@ class Options:
section_ids = {}
settings_items = self.data_labels.items()
- for k, item in settings_items:
+ for _, item in settings_items:
if item.section not in section_ids:
section_ids[item.section] = len(section_ids)
@@ -740,7 +740,7 @@ def walk_files(path, allowed_extensions=None):
if allowed_extensions is not None:
allowed_extensions = set(allowed_extensions)
- for root, dirs, files in os.walk(path):
+ for root, _, files in os.walk(path):
for filename in files:
if allowed_extensions is not None:
_, ext = os.path.splitext(filename)
diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py
index fda58898..c56bea45 100644
--- a/modules/textual_inversion/learn_schedule.py
+++ b/modules/textual_inversion/learn_schedule.py
@@ -12,7 +12,7 @@ class LearnScheduleIterator:
self.it = 0
self.maxit = 0
try:
- for i, pair in enumerate(pairs):
+ for pair in pairs:
if not pair.strip():
continue
tmp = pair.split(':')
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index c37bb2ad..47035332 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -29,7 +29,7 @@ textual_inversion_templates = {}
def list_textual_inversion_templates():
textual_inversion_templates.clear()
- for root, dirs, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
+ for root, _, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
for fn in fns:
path = os.path.join(root, fn)
@@ -198,7 +198,7 @@ class EmbeddingDatabase:
if not os.path.isdir(embdir.path):
return
- for root, dirs, fns in os.walk(embdir.path, followlinks=True):
+ for root, _, fns in os.walk(embdir.path, followlinks=True):
for fn in fns:
try:
fullfn = os.path.join(root, fn)
@@ -215,7 +215,7 @@ class EmbeddingDatabase:
def load_textual_inversion_embeddings(self, force_reload=False):
if not force_reload:
need_reload = False
- for path, embdir in self.embedding_dirs.items():
+ for embdir in self.embedding_dirs.values():
if embdir.has_changed():
need_reload = True
break
@@ -228,7 +228,7 @@ class EmbeddingDatabase:
self.skipped_embeddings.clear()
self.expected_shape = self.get_expected_shape()
- for path, embdir in self.embedding_dirs.items():
+ for embdir in self.embedding_dirs.values():
self.load_from_dir(embdir)
embdir.update()
@@ -469,7 +469,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
try:
sd_hijack_checkpoint.add()
- for i in range((steps-initial_step) * gradient_step):
+ for _ in range((steps-initial_step) * gradient_step):
if scheduler.finished:
break
if shared.state.interrupted:
diff --git a/modules/ui.py b/modules/ui.py
index 84d661b2..83bfb7d8 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -416,7 +416,7 @@ def create_sampler_and_steps_selection(choices, tabname):
def ordered_ui_categories():
user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))}
- for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
+ for _, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
yield category
@@ -1646,7 +1646,7 @@ def create_ui():
with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings", variant="compact"):
- for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
+ for _i, k, _item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
component = create_setting_component(k, is_quicksettings=True)
component_dict[k] = component
@@ -1673,7 +1673,7 @@ def create_ui():
outputs=[text_settings, result],
)
- for i, k, item in quicksettings_list:
+ for _i, k, _item in quicksettings_list:
component = component_dict[k]
info = opts.data_labels[k]
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index ab585917..2fd82e8e 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -90,7 +90,7 @@ class ExtraNetworksPage:
subdirs = {}
for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]:
- for root, dirs, files in os.walk(parentdir):
+ for root, dirs, _ in os.walk(parentdir):
for dirname in dirs:
x = os.path.join(root, dirname)
diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py
index cac73c51..f05049e1 100644
--- a/modules/ui_tempdir.py
+++ b/modules/ui_tempdir.py
@@ -72,7 +72,7 @@ def cleanup_tmpdr():
if temp_dir == "" or not os.path.isdir(temp_dir):
return
- for root, dirs, files in os.walk(temp_dir, topdown=False):
+ for root, _, files in os.walk(temp_dir, topdown=False):
for name in files:
_, extension = os.path.splitext(name)
if extension != ".png":
diff --git a/modules/upscaler.py b/modules/upscaler.py
index e145be30..8acb6e96 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -55,7 +55,7 @@ class Upscaler:
dest_w = int(img.width * scale)
dest_h = int(img.height * scale)
- for i in range(3):
+ for _ in range(3):
shape = (img.width, img.height)
img = self.do_upscale(img, selected_model)
diff --git a/pyproject.toml b/pyproject.toml
index 346a0cde..c88907be 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,7 +20,6 @@ ignore = [
"I001", # Import block is un-sorted or un-formatted
"C901", # Function is too complex
"C408", # Rewrite as a literal
- "B007", # Loop control variable not used within loop body
]
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 149bc85f..27af5ff6 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -156,7 +156,7 @@ class Script(scripts.Script):
images = []
all_prompts = []
infotexts = []
- for n, args in enumerate(jobs):
+ for args in jobs:
state.job = f"{state.job_no + 1} out of {state.job_count}"
copy_p = copy.copy(p)
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index d873a09c..0b1d3096 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -56,7 +56,7 @@ class Script(scripts.Script):
work = []
- for y, h, row in grid.tiles:
+ for _y, _h, row in grid.tiles:
for tiledata in row:
work.append(tiledata[2])
@@ -85,7 +85,7 @@ class Script(scripts.Script):
work_results += processed.images
image_index = 0
- for y, h, row in grid.tiles:
+ for _y, _h, row in grid.tiles:
for tiledata in row:
tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
image_index += 1
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 332e0ecd..38a20381 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -704,7 +704,7 @@ class Script(scripts.Script):
if not include_sub_grids:
# Done with sub-grids, drop all related information:
- for sg in range(z_count):
+ for _ in range(z_count):
del processed.images[1]
del processed.all_prompts[1]
del processed.all_seeds[1]
--
cgit v1.2.3
From 3ec7b705c78b7aca9569c92a419837352c7a4ec6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 10 May 2023 21:21:32 +0300
Subject: suggestions and fixes from the PR
---
extensions-builtin/Lora/scripts/lora_script.py | 2 +-
extensions-builtin/SwinIR/swinir_model_arch.py | 6 +-----
extensions-builtin/SwinIR/swinir_model_arch_v2.py | 11 ++---------
modules/codeformer/codeformer_arch.py | 7 ++-----
modules/hypernetworks/ui.py | 4 ++--
modules/models/diffusion/uni_pc/uni_pc.py | 4 ++--
modules/scripts_postprocessing.py | 2 +-
modules/sd_hijack_clip.py | 2 +-
modules/shared.py | 2 +-
modules/textual_inversion/textual_inversion.py | 3 +--
modules/ui.py | 4 ++--
11 files changed, 16 insertions(+), 31 deletions(-)
(limited to 'modules/ui.py')
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index b70e2de7..13d297d7 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted)
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
- "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras),
+ "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras),
}))
diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py
index de195d9b..73e37cfa 100644
--- a/extensions-builtin/SwinIR/swinir_model_arch.py
+++ b/extensions-builtin/SwinIR/swinir_model_arch.py
@@ -644,17 +644,13 @@ class SwinIR(nn.Module):
"""
def __init__(self, img_size=64, patch_size=1, in_chans=3,
- embed_dim=96, depths=None, num_heads=None,
+ embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
**kwargs):
super(SwinIR, self).__init__()
-
- depths = depths or [6, 6, 6, 6]
- num_heads = num_heads or [6, 6, 6, 6]
-
num_in_ch = in_chans
num_out_ch = in_chans
num_feat = 64
diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
index 15777af9..3ca9be78 100644
--- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py
+++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
@@ -74,12 +74,9 @@ class WindowAttention(nn.Module):
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
- pretrained_window_size=None):
+ pretrained_window_size=(0, 0)):
super().__init__()
-
- pretrained_window_size = pretrained_window_size or [0, 0]
-
self.dim = dim
self.window_size = window_size # Wh, Ww
self.pretrained_window_size = pretrained_window_size
@@ -701,17 +698,13 @@ class Swin2SR(nn.Module):
"""
def __init__(self, img_size=64, patch_size=1, in_chans=3,
- embed_dim=96, depths=None, num_heads=None,
+ embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
**kwargs):
super(Swin2SR, self).__init__()
-
- depths = depths or [6, 6, 6, 6]
- num_heads = num_heads or [6, 6, 6, 6]
-
num_in_ch = in_chans
num_out_ch = in_chans
num_feat = 64
diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py
index ff1c0b4b..45c70f84 100644
--- a/modules/codeformer/codeformer_arch.py
+++ b/modules/codeformer/codeformer_arch.py
@@ -161,13 +161,10 @@ class Fuse_sft_block(nn.Module):
class CodeFormer(VQAutoEncoder):
def __init__(self, dim_embd=512, n_head=8, n_layers=9,
codebook_size=1024, latent_size=256,
- connect_list=None,
- fix_modules=None):
+ connect_list=('32', '64', '128', '256'),
+ fix_modules=('quantize', 'generator')):
super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size)
- connect_list = connect_list or ['32', '64', '128', '256']
- fix_modules = fix_modules or ['quantize', 'generator']
-
if fix_modules is not None:
for module in fix_modules:
for param in getattr(self, module).parameters():
diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py
index e3f9eb13..8b6255e2 100644
--- a/modules/hypernetworks/ui.py
+++ b/modules/hypernetworks/ui.py
@@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork
from modules import devices, sd_hijack, shared
not_available = ["hardswish", "multiheadattention"]
-keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available]
+keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available]
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
- return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", ""
+ return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", ""
def train_hypernetwork(*args):
diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py
index f6c49f87..a227b947 100644
--- a/modules/models/diffusion/uni_pc/uni_pc.py
+++ b/modules/models/diffusion/uni_pc/uni_pc.py
@@ -275,8 +275,8 @@ def model_wrapper(
A noise prediction model that accepts the noised data and the continuous time as the inputs.
"""
- model_kwargs = model_kwargs or []
- classifier_kwargs = classifier_kwargs or []
+ model_kwargs = model_kwargs or {}
+ classifier_kwargs = classifier_kwargs or {}
def get_model_input_time(t_continuous):
"""
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py
index 6751406c..bac1335d 100644
--- a/modules/scripts_postprocessing.py
+++ b/modules/scripts_postprocessing.py
@@ -124,7 +124,7 @@ class ScriptPostprocessingRunner:
script_args = args[script.args_from:script.args_to]
process_args = {}
- for (name, component), value in zip(script.controls.items(), script_args): # noqa B007
+ for (name, _component), value in zip(script.controls.items(), script_args):
process_args[name] = value
script.process(pp, **process_args)
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index c0c350f6..cc6e8c21 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
self.hijack.fixes = [x.fixes for x in batch_chunk]
for fixes in self.hijack.fixes:
- for position, embedding in fixes: # noqa: B007
+ for _position, embedding in fixes:
used_embeddings[embedding.name] = embedding
z = self.process_tokens(tokens, multipliers)
diff --git a/modules/shared.py b/modules/shared.py
index 913c9e63..ac67adc0 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"),
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"),
- "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks),
+ "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", hypernetworks]}, refresh=reload_hypernetworks),
}))
options_templates.update(options_section(('ui', "User interface"), {
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 47035332..9e1b2b9a 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -166,8 +166,7 @@ class EmbeddingDatabase:
# textual inversion embeddings
if 'string_to_param' in data:
param_dict = data['string_to_param']
- if hasattr(param_dict, '_parameters'):
- param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11
+ param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
# diffuser concepts
diff --git a/modules/ui.py b/modules/ui.py
index 83bfb7d8..7ee99473 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1230,8 +1230,8 @@ def create_ui():
train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
- train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys()))
- create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name")
+ train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks))
+ create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name")
with FormRow():
embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
--
cgit v1.2.3
From 8aa87c564a79965013715d56a5f90d2a34d5d6ee Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 10 May 2023 23:41:08 +0300
Subject: add UI to edit defaults allow setting defaults for elements in
extensions' tabs fix a problem with ESRGAN upscalers disappearing after UI
reload implicit change: HTML element id for train tab from tab_ti to
tab_train (will this break things?)
---
modules/modelloader.py | 27 +++----
modules/ui.py | 122 +++++------------------------
modules/ui_loadsave.py | 208 +++++++++++++++++++++++++++++++++++++++++++++++++
style.css | 4 +
webui.py | 6 +-
5 files changed, 242 insertions(+), 125 deletions(-)
create mode 100644 modules/ui_loadsave.py
(limited to 'modules/ui.py')
diff --git a/modules/modelloader.py b/modules/modelloader.py
index 25612bf8..2a479bcb 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -116,20 +116,6 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None):
pass
-builtin_upscaler_classes = []
-forbidden_upscaler_classes = set()
-
-
-def list_builtin_upscalers():
- builtin_upscaler_classes.clear()
- builtin_upscaler_classes.extend(Upscaler.__subclasses__())
-
-def forbid_loaded_nonbuiltin_upscalers():
- for cls in Upscaler.__subclasses__():
- if cls not in builtin_upscaler_classes:
- forbidden_upscaler_classes.add(cls)
-
-
def load_upscalers():
# We can only do this 'magic' method to dynamically load upscalers if they are referenced,
# so we'll try to import any _model.py files before looking in __subclasses__
@@ -145,10 +131,17 @@ def load_upscalers():
datas = []
commandline_options = vars(shared.cmd_opts)
- for cls in Upscaler.__subclasses__():
- if cls in forbidden_upscaler_classes:
- continue
+ # some of upscaler classes will not go away after reloading their modules, and we'll end
+ # up with two copies of those classes. The newest copy will always be the last in the list,
+ # so we go from end to beginning and ignore duplicates
+ used_classes = {}
+ for cls in reversed(Upscaler.__subclasses__()):
+ classname = str(cls)
+ if classname not in used_classes:
+ used_classes[classname] = cls
+
+ for cls in reversed(used_classes.values()):
name = cls.__name__
cmd_name = f"{name.lower().replace('upscaler', '')}_models_path"
scaler = cls(commandline_options.get(cmd_name, None))
diff --git a/modules/ui.py b/modules/ui.py
index 7ee99473..1efb656a 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -13,7 +13,7 @@ import numpy as np
from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress
+from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path
@@ -86,16 +86,6 @@ def send_gradio_gallery_to_image(x):
return None
return image_from_url_text(x[0])
-def visit(x, func, path=""):
- if hasattr(x, 'children'):
- if isinstance(x, gr.Tabs) and x.elem_id is not None:
- # Tabs element can't have a label, have to use elem_id instead
- func(f"{path}/Tabs@{x.elem_id}", x)
- for c in x.children:
- visit(c, func, path)
- elif x.label is not None:
- func(f"{path}/{x.label}", x)
-
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
@@ -1471,6 +1461,8 @@ def create_ui():
return res
+ loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file)
+
components = []
component_dict = {}
shared.settings_components = component_dict
@@ -1558,6 +1550,9 @@ def create_ui():
current_row.__exit__()
current_tab.__exit__()
+ with gr.TabItem("Defaults", id="defaults", elem_id="settings_tab_defaults"):
+ loadsave.create_ui()
+
with gr.TabItem("Actions", id="actions", elem_id="settings_tab_actions"):
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
@@ -1631,7 +1626,7 @@ def create_ui():
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
- (train_interface, "Train", "ti"),
+ (train_interface, "Train", "train"),
]
interfaces += script_callbacks.ui_tabs_callback()
@@ -1659,6 +1654,16 @@ def create_ui():
with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"):
interface.render()
+ for interface, _label, ifid in interfaces:
+ if ifid in ["extensions", "settings"]:
+ continue
+
+ loadsave.add_block(interface, ifid)
+
+ loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs)
+
+ loadsave.setup_ui()
+
if os.path.exists(os.path.join(script_path, "notification.mp3")):
gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
@@ -1747,97 +1752,8 @@ def create_ui():
]
)
- ui_config_file = cmd_opts.ui_config_file
- ui_settings = {}
- settings_count = len(ui_settings)
- error_loading = False
-
- try:
- if os.path.exists(ui_config_file):
- with open(ui_config_file, "r", encoding="utf8") as file:
- ui_settings = json.load(file)
- except Exception:
- error_loading = True
- print("Error loading settings:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
-
- def loadsave(path, x):
- def apply_field(obj, field, condition=None, init_field=None):
- key = f"{path}/{field}"
-
- if getattr(obj, 'custom_script_source', None) is not None:
- key = f"customscript/{obj.custom_script_source}/{key}"
-
- if getattr(obj, 'do_not_save_to_config', False):
- return
-
- saved_value = ui_settings.get(key, None)
- if saved_value is None:
- ui_settings[key] = getattr(obj, field)
- elif condition and not condition(saved_value):
- pass
-
- # this warning is generally not useful;
- # print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.')
- else:
- setattr(obj, field, saved_value)
- if init_field is not None:
- init_field(saved_value)
-
- if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton] and x.visible:
- apply_field(x, 'visible')
-
- if type(x) == gr.Slider:
- apply_field(x, 'value')
- apply_field(x, 'minimum')
- apply_field(x, 'maximum')
- apply_field(x, 'step')
-
- if type(x) == gr.Radio:
- apply_field(x, 'value', lambda val: val in x.choices)
-
- if type(x) == gr.Checkbox:
- apply_field(x, 'value')
-
- if type(x) == gr.Textbox:
- apply_field(x, 'value')
-
- if type(x) == gr.Number:
- apply_field(x, 'value')
-
- if type(x) == gr.Dropdown:
- def check_dropdown(val):
- if getattr(x, 'multiselect', False):
- return all(value in x.choices for value in val)
- else:
- return val in x.choices
-
- apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None))
-
- def check_tab_id(tab_id):
- tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children))
- if type(tab_id) == str:
- tab_ids = [t.id for t in tab_items]
- return tab_id in tab_ids
- elif type(tab_id) == int:
- return tab_id >= 0 and tab_id < len(tab_items)
- else:
- return False
-
- if type(x) == gr.Tabs:
- apply_field(x, 'selected', check_tab_id)
-
- visit(txt2img_interface, loadsave, "txt2img")
- visit(img2img_interface, loadsave, "img2img")
- visit(extras_interface, loadsave, "extras")
- visit(modelmerger_interface, loadsave, "modelmerger")
- visit(train_interface, loadsave, "train")
-
- loadsave(f"webui/Tabs@{tabs.elem_id}", tabs)
-
- if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
- with open(ui_config_file, "w", encoding="utf8") as file:
- json.dump(ui_settings, file, indent=4)
+ loadsave.dump_defaults()
+ demo.ui_loadsave = loadsave
# Required as a workaround for change() event not triggering when loading values from ui-config.json
interp_description.value = update_interp_description(interp_method.value)
diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py
new file mode 100644
index 00000000..728fec9e
--- /dev/null
+++ b/modules/ui_loadsave.py
@@ -0,0 +1,208 @@
+import json
+import os
+
+import gradio as gr
+
+from modules import errors
+from modules.ui_components import ToolButton
+
+
+class UiLoadsave:
+ """allows saving and restorig default values for gradio components"""
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.ui_settings = {}
+ self.component_mapping = {}
+ self.error_loading = False
+ self.finalized_ui = False
+
+ self.ui_defaults_view = None
+ self.ui_defaults_apply = None
+ self.ui_defaults_review = None
+
+ try:
+ if os.path.exists(self.filename):
+ self.ui_settings = self.read_from_file()
+ except Exception as e:
+ self.error_loading = True
+ errors.display(e, "loading settings")
+
+ def add_component(self, path, x):
+ """adds component to the registry of tracked components"""
+
+ assert not self.finalized_ui
+
+ def apply_field(obj, field, condition=None, init_field=None):
+ key = f"{path}/{field}"
+
+ if getattr(obj, 'custom_script_source', None) is not None:
+ key = f"customscript/{obj.custom_script_source}/{key}"
+
+ if getattr(obj, 'do_not_save_to_config', False):
+ return
+
+ saved_value = self.ui_settings.get(key, None)
+ if saved_value is None:
+ self.ui_settings[key] = getattr(obj, field)
+ elif condition and not condition(saved_value):
+ pass
+ else:
+ setattr(obj, field, saved_value)
+ if init_field is not None:
+ init_field(saved_value)
+
+ if field == 'value' and key not in self.component_mapping:
+ self.component_mapping[key] = x
+
+ if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton] and x.visible:
+ apply_field(x, 'visible')
+
+ if type(x) == gr.Slider:
+ apply_field(x, 'value')
+ apply_field(x, 'minimum')
+ apply_field(x, 'maximum')
+ apply_field(x, 'step')
+
+ if type(x) == gr.Radio:
+ apply_field(x, 'value', lambda val: val in x.choices)
+
+ if type(x) == gr.Checkbox:
+ apply_field(x, 'value')
+
+ if type(x) == gr.Textbox:
+ apply_field(x, 'value')
+
+ if type(x) == gr.Number:
+ apply_field(x, 'value')
+
+ if type(x) == gr.Dropdown:
+ def check_dropdown(val):
+ if getattr(x, 'multiselect', False):
+ return all(value in x.choices for value in val)
+ else:
+ return val in x.choices
+
+ apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None))
+
+ def check_tab_id(tab_id):
+ tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children))
+ if type(tab_id) == str:
+ tab_ids = [t.id for t in tab_items]
+ return tab_id in tab_ids
+ elif type(tab_id) == int:
+ return 0 <= tab_id < len(tab_items)
+ else:
+ return False
+
+ if type(x) == gr.Tabs:
+ apply_field(x, 'selected', check_tab_id)
+
+ def add_block(self, x, path=""):
+ """adds all components inside a gradio block x to the registry of tracked components"""
+
+ if hasattr(x, 'children'):
+ if isinstance(x, gr.Tabs) and x.elem_id is not None:
+ # Tabs element can't have a label, have to use elem_id instead
+ self.add_component(f"{path}/Tabs@{x.elem_id}", x)
+ for c in x.children:
+ self.add_block(c, path)
+ elif x.label is not None:
+ self.add_component(f"{path}/{x.label}", x)
+
+ def read_from_file(self):
+ with open(self.filename, "r", encoding="utf8") as file:
+ return json.load(file)
+
+ def write_to_file(self, current_ui_settings):
+ with open(self.filename, "w", encoding="utf8") as file:
+ json.dump(current_ui_settings, file, indent=4)
+
+ def dump_defaults(self):
+ """saves default values to a file unless tjhe file is present and there was an error loading default values at start"""
+
+ if self.error_loading and os.path.exists(self.filename):
+ return
+
+ self.write_to_file(self.ui_settings)
+
+ def iter_changes(self, current_ui_settings, values):
+ """
+ given a dictionary with defaults from a file and current values from gradio elements, returns
+ an iterator over tuples of values that are not the same between the file and the current;
+ tuple contents are: path, old value, new value
+ """
+
+ for (path, component), new_value in zip(self.component_mapping.items(), values):
+ old_value = current_ui_settings.get(path)
+
+ choices = getattr(component, 'choices', None)
+ if isinstance(new_value, int) and choices:
+ if new_value >= len(choices):
+ continue
+
+ new_value = choices[new_value]
+
+ if new_value == old_value:
+ continue
+
+ if old_value is None and new_value == '' or new_value == []:
+ continue
+
+ yield path, old_value, new_value
+
+ def ui_view(self, *values):
+ text = ["Path | Old value | New value |
"]
+
+ for path, old_value, new_value in self.iter_changes(self.read_from_file(), values):
+ if old_value is None:
+ old_value = "None"
+
+ text.append(f"{path} | {old_value} | {new_value} |
")
+
+ if len(text) == 1:
+ text.append("No changes |
")
+
+ text.append("")
+ return "".join(text)
+
+ def ui_apply(self, *values):
+ num_changed = 0
+
+ current_ui_settings = self.read_from_file()
+
+ for path, _, new_value in self.iter_changes(current_ui_settings.copy(), values):
+ num_changed += 1
+ current_ui_settings[path] = new_value
+
+ if num_changed == 0:
+ return "No changes."
+
+ self.write_to_file(current_ui_settings)
+
+ return f"Wrote {num_changed} changes."
+
+ def create_ui(self):
+ """creates ui elements for editing defaults UI, without adding any logic to them"""
+
+ gr.HTML(
+ f"This page allows you to change default values in UI elements on other tabs.
"
+ f"Make your changes, press 'View changes' to review the changed default values,
"
+ f"then press 'Apply' to write them to {self.filename}.
"
+ f"New defaults will apply after you restart the UI.
"
+ )
+
+ with gr.Row():
+ self.ui_defaults_view = gr.Button(value='View changes', elem_id="ui_defaults_view", variant="secondary")
+ self.ui_defaults_apply = gr.Button(value='Apply', elem_id="ui_defaults_apply", variant="primary")
+
+ self.ui_defaults_review = gr.HTML("")
+
+ def setup_ui(self):
+ """adds logic to elements created with create_ui; all add_block class must be made before this"""
+
+ assert not self.finalized_ui
+ self.finalized_ui = True
+
+ self.ui_defaults_view.click(fn=self.ui_view, inputs=list(self.component_mapping.values()), outputs=[self.ui_defaults_review])
+ self.ui_defaults_apply.click(fn=self.ui_apply, inputs=list(self.component_mapping.values()), outputs=[self.ui_defaults_review])
diff --git a/style.css b/style.css
index b823c7dd..4ac919b5 100644
--- a/style.css
+++ b/style.css
@@ -414,6 +414,10 @@ table.settings-value-table td{
max-width: 36em;
}
+.ui-defaults-none{
+ color: #aaa !important;
+}
+
/* live preview */
.progressDiv{
position: relative;
diff --git a/webui.py b/webui.py
index 5d5e80b5..2eecfaa0 100644
--- a/webui.py
+++ b/webui.py
@@ -181,14 +181,11 @@ def initialize():
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
startup_timer.record("setup gfpgan")
- modelloader.list_builtin_upscalers()
- startup_timer.record("list builtin upscalers")
-
modules.scripts.load_scripts()
startup_timer.record("load scripts")
modelloader.load_upscalers()
- #startup_timer.record("load upscalers") #Is this necessary? I don't know.
+ startup_timer.record("load upscalers")
modules.sd_vae.refresh_vae_list()
startup_timer.record("refresh VAE")
@@ -388,7 +385,6 @@ def webui():
localization.list_localizations(cmd_opts.localizations_dir)
- modelloader.forbid_loaded_nonbuiltin_upscalers()
modules.scripts.reload_scripts()
startup_timer.record("load scripts")
--
cgit v1.2.3
From 49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Thu, 11 May 2023 18:28:15 +0300
Subject: Autofix Ruff W (not W605) (mostly whitespace)
---
extensions-builtin/LDSR/ldsr_model_arch.py | 4 +-
extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 6 +--
extensions-builtin/ScuNET/scunet_model_arch.py | 2 +-
extensions-builtin/SwinIR/scripts/swinir_model.py | 2 +-
extensions-builtin/SwinIR/swinir_model_arch.py | 2 +-
extensions-builtin/SwinIR/swinir_model_arch_v2.py | 52 +++++++++++------------
launch.py | 2 +-
modules/api/api.py | 4 +-
modules/api/models.py | 2 +-
modules/cmd_args.py | 2 +-
modules/codeformer/codeformer_arch.py | 14 +++---
modules/codeformer/vqgan_arch.py | 38 ++++++++---------
modules/esrgan_model_arch.py | 4 +-
modules/extras.py | 2 +-
modules/hypernetworks/hypernetwork.py | 12 +++---
modules/images.py | 2 +-
modules/mac_specific.py | 4 +-
modules/masking.py | 2 +-
modules/ngrok.py | 4 +-
modules/processing.py | 2 +-
modules/script_callbacks.py | 14 +++---
modules/sd_hijack.py | 12 +++---
modules/sd_hijack_optimizations.py | 32 +++++++-------
modules/sd_models.py | 4 +-
modules/sd_samplers_kdiffusion.py | 18 ++++----
modules/sub_quadratic_attention.py | 2 +-
modules/textual_inversion/dataset.py | 4 +-
modules/textual_inversion/preprocess.py | 2 +-
modules/textual_inversion/textual_inversion.py | 16 +++----
modules/ui.py | 18 ++++----
modules/ui_extensions.py | 6 +--
modules/xlmr.py | 6 +--
pyproject.toml | 5 ++-
scripts/img2imgalt.py | 14 +++---
scripts/loopback.py | 8 ++--
scripts/poor_mans_outpainting.py | 2 +-
scripts/prompt_matrix.py | 2 +-
scripts/prompts_from_file.py | 4 +-
scripts/sd_upscale.py | 2 +-
39 files changed, 167 insertions(+), 166 deletions(-)
(limited to 'modules/ui.py')
diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py
index 2173de79..7f450086 100644
--- a/extensions-builtin/LDSR/ldsr_model_arch.py
+++ b/extensions-builtin/LDSR/ldsr_model_arch.py
@@ -130,11 +130,11 @@ class LDSR:
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
else:
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
-
+
# pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
-
+
logs = self.run(model["model"], im_padded, diffusion_steps, eta)
sample = logs["sample"]
diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
index 57c02d12..631a08ef 100644
--- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
+++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
@@ -460,7 +460,7 @@ class LatentDiffusionV1(DDPMV1):
self.instantiate_cond_stage(cond_stage_config)
self.cond_stage_forward = cond_stage_forward
self.clip_denoised = False
- self.bbox_tokenizer = None
+ self.bbox_tokenizer = None
self.restarted_from_ckpt = False
if ckpt_path is not None:
@@ -792,7 +792,7 @@ class LatentDiffusionV1(DDPMV1):
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
# 2. apply model loop over last dim
- if isinstance(self.first_stage_model, VQModelInterface):
+ if isinstance(self.first_stage_model, VQModelInterface):
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
force_not_quantize=predict_cids or force_not_quantize)
for i in range(z.shape[-1])]
@@ -890,7 +890,7 @@ class LatentDiffusionV1(DDPMV1):
if hasattr(self, "split_input_params"):
assert len(cond) == 1 # todo can only deal with one conditioning atm
- assert not return_ids
+ assert not return_ids
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py
index 8028918a..b51a8806 100644
--- a/extensions-builtin/ScuNET/scunet_model_arch.py
+++ b/extensions-builtin/ScuNET/scunet_model_arch.py
@@ -265,4 +265,4 @@ class SCUNet(nn.Module):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
\ No newline at end of file
+ nn.init.constant_(m.weight, 1.0)
diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py
index 55dd94ab..0ba50487 100644
--- a/extensions-builtin/SwinIR/scripts/swinir_model.py
+++ b/extensions-builtin/SwinIR/scripts/swinir_model.py
@@ -150,7 +150,7 @@ def inference(img, model, tile, tile_overlap, window_size, scale):
for w_idx in w_idx_list:
if state.interrupted or state.skipped:
break
-
+
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py
index 73e37cfa..93b93274 100644
--- a/extensions-builtin/SwinIR/swinir_model_arch.py
+++ b/extensions-builtin/SwinIR/swinir_model_arch.py
@@ -805,7 +805,7 @@ class SwinIR(nn.Module):
def forward(self, x):
H, W = x.shape[2:]
x = self.check_image_size(x)
-
+
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
index 3ca9be78..dad22cca 100644
--- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py
+++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
@@ -241,7 +241,7 @@ class SwinTransformerBlock(nn.Module):
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
-
+
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
@@ -263,7 +263,7 @@ class SwinTransformerBlock(nn.Module):
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
- return attn_mask
+ return attn_mask
def forward(self, x, x_size):
H, W = x_size
@@ -288,7 +288,7 @@ class SwinTransformerBlock(nn.Module):
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
-
+
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
@@ -369,7 +369,7 @@ class PatchMerging(nn.Module):
H, W = self.input_resolution
flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
flops += H * W * self.dim // 2
- return flops
+ return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
@@ -447,7 +447,7 @@ class BasicLayer(nn.Module):
nn.init.constant_(blk.norm1.weight, 0)
nn.init.constant_(blk.norm2.bias, 0)
nn.init.constant_(blk.norm2.weight, 0)
-
+
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
@@ -492,7 +492,7 @@ class PatchEmbed(nn.Module):
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
- return flops
+ return flops
class RSTB(nn.Module):
"""Residual Swin Transformer Block (RSTB).
@@ -531,7 +531,7 @@ class RSTB(nn.Module):
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
+ qkv_bias=qkv_bias,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path,
norm_layer=norm_layer,
@@ -622,7 +622,7 @@ class Upsample(nn.Sequential):
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
-
+
class Upsample_hf(nn.Sequential):
"""Upsample module.
@@ -642,7 +642,7 @@ class Upsample_hf(nn.Sequential):
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
- super(Upsample_hf, self).__init__(*m)
+ super(Upsample_hf, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
@@ -667,8 +667,8 @@ class UpsampleOneStep(nn.Sequential):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
-
-
+
+
class Swin2SR(nn.Module):
r""" Swin2SR
@@ -699,7 +699,7 @@ class Swin2SR(nn.Module):
def __init__(self, img_size=64, patch_size=1, in_chans=3,
embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
- window_size=7, mlp_ratio=4., qkv_bias=True,
+ window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
@@ -764,7 +764,7 @@ class Swin2SR(nn.Module):
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
+ qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
norm_layer=norm_layer,
@@ -776,7 +776,7 @@ class Swin2SR(nn.Module):
)
self.layers.append(layer)
-
+
if self.upsampler == 'pixelshuffle_hf':
self.layers_hf = nn.ModuleList()
for i_layer in range(self.num_layers):
@@ -787,7 +787,7 @@ class Swin2SR(nn.Module):
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
+ qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
norm_layer=norm_layer,
@@ -799,7 +799,7 @@ class Swin2SR(nn.Module):
)
self.layers_hf.append(layer)
-
+
self.norm = norm_layer(self.num_features)
# build the last conv layer in deep feature extraction
@@ -829,10 +829,10 @@ class Swin2SR(nn.Module):
self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.conv_after_aux = nn.Sequential(
nn.Conv2d(3, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
+ nn.LeakyReLU(inplace=True))
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
+
elif self.upsampler == 'pixelshuffle_hf':
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
@@ -846,7 +846,7 @@ class Swin2SR(nn.Module):
nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
+
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
@@ -905,7 +905,7 @@ class Swin2SR(nn.Module):
x = self.patch_unembed(x, x_size)
return x
-
+
def forward_features_hf(self, x):
x_size = (x.shape[2], x.shape[3])
x = self.patch_embed(x)
@@ -919,7 +919,7 @@ class Swin2SR(nn.Module):
x = self.norm(x) # B L C
x = self.patch_unembed(x, x_size)
- return x
+ return x
def forward(self, x):
H, W = x.shape[2:]
@@ -951,7 +951,7 @@ class Swin2SR(nn.Module):
x = self.conv_after_body(self.forward_features(x)) + x
x_before = self.conv_before_upsample(x)
x_out = self.conv_last(self.upsample(x_before))
-
+
x_hf = self.conv_first_hf(x_before)
x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf
x_hf = self.conv_before_upsample_hf(x_hf)
@@ -977,15 +977,15 @@ class Swin2SR(nn.Module):
x_first = self.conv_first(x)
res = self.conv_after_body(self.forward_features(x_first)) + x_first
x = x + self.conv_last(res)
-
+
x = x / self.img_range + self.mean
if self.upsampler == "pixelshuffle_aux":
return x[:, :, :H*self.upscale, :W*self.upscale], aux
-
+
elif self.upsampler == "pixelshuffle_hf":
x_out = x_out / self.img_range + self.mean
return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale]
-
+
else:
return x[:, :, :H*self.upscale, :W*self.upscale]
@@ -1014,4 +1014,4 @@ if __name__ == '__main__':
x = torch.randn((1, 3, height, width))
x = model(x)
- print(x.shape)
\ No newline at end of file
+ print(x.shape)
diff --git a/launch.py b/launch.py
index 670af87c..62b33f14 100644
--- a/launch.py
+++ b/launch.py
@@ -327,7 +327,7 @@ def prepare_environment():
if args.update_all_extensions:
git_pull_recursive(extensions_dir)
-
+
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
exit(0)
diff --git a/modules/api/api.py b/modules/api/api.py
index 594fa655..165985c3 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -227,7 +227,7 @@ class Api:
script_idx = script_name_to_index(script_name, script_runner.selectable_scripts)
script = script_runner.selectable_scripts[script_idx]
return script, script_idx
-
+
def get_scripts_list(self):
t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles]
i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles]
@@ -237,7 +237,7 @@ class Api:
def get_script(self, script_name, script_runner):
if script_name is None or script_name == "":
return None, None
-
+
script_idx = script_name_to_index(script_name, script_runner.scripts)
return script_runner.scripts[script_idx]
diff --git a/modules/api/models.py b/modules/api/models.py
index 4d291076..006ccdb7 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -289,4 +289,4 @@ class MemoryResponse(BaseModel):
class ScriptsList(BaseModel):
txt2img: list = Field(default=None,title="Txt2img", description="Titles of scripts (txt2img)")
- img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)")
\ No newline at end of file
+ img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)")
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index e01ca655..f4a4ab36 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -102,4 +102,4 @@ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gra
parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers")
parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)
-parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy')
\ No newline at end of file
+parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy')
diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py
index 45c70f84..12db6814 100644
--- a/modules/codeformer/codeformer_arch.py
+++ b/modules/codeformer/codeformer_arch.py
@@ -119,7 +119,7 @@ class TransformerSALayer(nn.Module):
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
-
+
# self attention
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
@@ -159,7 +159,7 @@ class Fuse_sft_block(nn.Module):
@ARCH_REGISTRY.register()
class CodeFormer(VQAutoEncoder):
- def __init__(self, dim_embd=512, n_head=8, n_layers=9,
+ def __init__(self, dim_embd=512, n_head=8, n_layers=9,
codebook_size=1024, latent_size=256,
connect_list=('32', '64', '128', '256'),
fix_modules=('quantize', 'generator')):
@@ -179,14 +179,14 @@ class CodeFormer(VQAutoEncoder):
self.feat_emb = nn.Linear(256, self.dim_embd)
# transformer
- self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0)
+ self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0)
for _ in range(self.n_layers)])
# logits_predict head
self.idx_pred_layer = nn.Sequential(
nn.LayerNorm(dim_embd),
nn.Linear(dim_embd, codebook_size, bias=False))
-
+
self.channels = {
'16': 512,
'32': 256,
@@ -221,7 +221,7 @@ class CodeFormer(VQAutoEncoder):
enc_feat_dict = {}
out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list]
for i, block in enumerate(self.encoder.blocks):
- x = block(x)
+ x = block(x)
if i in out_list:
enc_feat_dict[str(x.shape[-1])] = x.clone()
@@ -266,11 +266,11 @@ class CodeFormer(VQAutoEncoder):
fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list]
for i, block in enumerate(self.generator.blocks):
- x = block(x)
+ x = block(x)
if i in fuse_list: # fuse after i-th block
f_size = str(x.shape[-1])
if w>0:
x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w)
out = x
# logits doesn't need softmax before cross_entropy loss
- return out, logits, lq_feat
\ No newline at end of file
+ return out, logits, lq_feat
diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py
index b24a0394..09ee6660 100644
--- a/modules/codeformer/vqgan_arch.py
+++ b/modules/codeformer/vqgan_arch.py
@@ -13,7 +13,7 @@ from basicsr.utils.registry import ARCH_REGISTRY
def normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
-
+
@torch.jit.script
def swish(x):
@@ -210,15 +210,15 @@ class AttnBlock(nn.Module):
# compute attention
b, c, h, w = q.shape
q = q.reshape(b, c, h*w)
- q = q.permute(0, 2, 1)
+ q = q.permute(0, 2, 1)
k = k.reshape(b, c, h*w)
- w_ = torch.bmm(q, k)
+ w_ = torch.bmm(q, k)
w_ = w_ * (int(c)**(-0.5))
w_ = F.softmax(w_, dim=2)
# attend to values
v = v.reshape(b, c, h*w)
- w_ = w_.permute(0, 2, 1)
+ w_ = w_.permute(0, 2, 1)
h_ = torch.bmm(v, w_)
h_ = h_.reshape(b, c, h, w)
@@ -270,18 +270,18 @@ class Encoder(nn.Module):
def forward(self, x):
for block in self.blocks:
x = block(x)
-
+
return x
class Generator(nn.Module):
def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions):
super().__init__()
- self.nf = nf
- self.ch_mult = ch_mult
+ self.nf = nf
+ self.ch_mult = ch_mult
self.num_resolutions = len(self.ch_mult)
self.num_res_blocks = res_blocks
- self.resolution = img_size
+ self.resolution = img_size
self.attn_resolutions = attn_resolutions
self.in_channels = emb_dim
self.out_channels = 3
@@ -315,24 +315,24 @@ class Generator(nn.Module):
blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1))
self.blocks = nn.ModuleList(blocks)
-
+
def forward(self, x):
for block in self.blocks:
x = block(x)
-
+
return x
-
+
@ARCH_REGISTRY.register()
class VQAutoEncoder(nn.Module):
def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256,
beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None):
super().__init__()
logger = get_root_logger()
- self.in_channels = 3
- self.nf = nf
- self.n_blocks = res_blocks
+ self.in_channels = 3
+ self.nf = nf
+ self.n_blocks = res_blocks
self.codebook_size = codebook_size
self.embed_dim = emb_dim
self.ch_mult = ch_mult
@@ -363,11 +363,11 @@ class VQAutoEncoder(nn.Module):
self.kl_weight
)
self.generator = Generator(
- self.nf,
+ self.nf,
self.embed_dim,
- self.ch_mult,
- self.n_blocks,
- self.resolution,
+ self.ch_mult,
+ self.n_blocks,
+ self.resolution,
self.attn_resolutions
)
@@ -432,4 +432,4 @@ class VQGANDiscriminator(nn.Module):
raise ValueError('Wrong params!')
def forward(self, x):
- return self.main(x)
\ No newline at end of file
+ return self.main(x)
diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py
index 4de9dd8d..2b9888ba 100644
--- a/modules/esrgan_model_arch.py
+++ b/modules/esrgan_model_arch.py
@@ -105,7 +105,7 @@ class ResidualDenseBlock_5C(nn.Module):
Modified options that can be used:
- "Partial Convolution based Padding" arXiv:1811.11718
- "Spectral normalization" arXiv:1802.05957
- - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C.
+ - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C.
{Rakotonirina} and A. {Rasoanaivo}
"""
@@ -170,7 +170,7 @@ class GaussianNoise(nn.Module):
scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x
sampled_noise = self.noise.repeat(*x.size()).normal_() * scale
x = x + sampled_noise
- return x
+ return x
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
diff --git a/modules/extras.py b/modules/extras.py
index eb4f0b42..bdf9b3b7 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -199,7 +199,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
result_is_inpainting_model = True
else:
theta_0[key] = theta_func2(a, b, multiplier)
-
+
theta_0[key] = to_half(theta_0[key], save_as_half)
shared.state.sampling_step += 1
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 38ef074f..570b5603 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -540,7 +540,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
-
+
clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None
if clip_grad:
clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
@@ -593,7 +593,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
print(e)
scaler = torch.cuda.amp.GradScaler()
-
+
batch_size = ds.batch_size
gradient_step = ds.gradient_step
# n steps = batch_size * gradient_step * n image processed
@@ -636,7 +636,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
if clip_grad:
clip_grad_sched.step(hypernetwork.step)
-
+
with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
if use_weight:
@@ -657,14 +657,14 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
_loss_step += loss.item()
scaler.scale(loss).backward()
-
+
# go back until we reach gradient accumulation steps
if (j + 1) % gradient_step != 0:
continue
loss_logging.append(_loss_step)
if clip_grad:
clip_grad(weights, clip_grad_sched.learn_rate)
-
+
scaler.step(optimizer)
scaler.update()
hypernetwork.step += 1
@@ -674,7 +674,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
_loss_step = 0
steps_done = hypernetwork.step + 1
-
+
epoch_num = hypernetwork.step // steps_per_epoch
epoch_step = hypernetwork.step % steps_per_epoch
diff --git a/modules/images.py b/modules/images.py
index 3b8b62d9..b2de3662 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -367,7 +367,7 @@ class FilenameGenerator:
self.seed = seed
self.prompt = prompt
self.image = image
-
+
def hasprompt(self, *args):
lower = self.prompt.lower()
if self.p is None or self.prompt is None:
diff --git a/modules/mac_specific.py b/modules/mac_specific.py
index 5c2f92a1..d74c6b95 100644
--- a/modules/mac_specific.py
+++ b/modules/mac_specific.py
@@ -42,7 +42,7 @@ if has_mps:
# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs),
lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps'))
- # MPS workaround for https://github.com/pytorch/pytorch/issues/80800
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/80800
CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs),
lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps')
# MPS workaround for https://github.com/pytorch/pytorch/issues/90532
@@ -60,4 +60,4 @@ if has_mps:
# MPS workaround for https://github.com/pytorch/pytorch/issues/92311
if platform.processor() == 'i386':
for funcName in ['torch.argmax', 'torch.Tensor.argmax']:
- CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps')
\ No newline at end of file
+ CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps')
diff --git a/modules/masking.py b/modules/masking.py
index a5c4d2da..be9f84c7 100644
--- a/modules/masking.py
+++ b/modules/masking.py
@@ -4,7 +4,7 @@ from PIL import Image, ImageFilter, ImageOps
def get_crop_region(mask, pad=0):
"""finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle.
For example, if a user has painted the top-right part of a 512x512 image", the result may be (256, 0, 512, 256)"""
-
+
h, w = mask.shape
crop_left = 0
diff --git a/modules/ngrok.py b/modules/ngrok.py
index 7a7b4b26..67a74e85 100644
--- a/modules/ngrok.py
+++ b/modules/ngrok.py
@@ -13,7 +13,7 @@ def connect(token, port, region):
config = conf.PyngrokConfig(
auth_token=token, region=region
)
-
+
# Guard for existing tunnels
existing = ngrok.get_tunnels(pyngrok_config=config)
if existing:
@@ -24,7 +24,7 @@ def connect(token, port, region):
print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n'
'You can use this link after the launch is complete.')
return
-
+
try:
if account is None:
public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url
diff --git a/modules/processing.py b/modules/processing.py
index c3932d6b..f902b9df 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -164,7 +164,7 @@ class StableDiffusionProcessing:
self.all_subseeds = None
self.iteration = 0
self.is_hr_pass = False
-
+
@property
def sd_model(self):
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index 17109732..7d9dd736 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -32,22 +32,22 @@ class CFGDenoiserParams:
def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond):
self.x = x
"""Latent image representation in the process of being denoised"""
-
+
self.image_cond = image_cond
"""Conditioning image"""
-
+
self.sigma = sigma
"""Current sigma noise step value"""
-
+
self.sampling_step = sampling_step
"""Current Sampling step number"""
-
+
self.total_sampling_steps = total_sampling_steps
"""Total number of sampling steps planned"""
-
+
self.text_cond = text_cond
""" Encoder hidden states of text conditioning from prompt"""
-
+
self.text_uncond = text_uncond
""" Encoder hidden states of text conditioning from negative prompt"""
@@ -240,7 +240,7 @@ def add_callback(callbacks, fun):
callbacks.append(ScriptCallback(filename, fun))
-
+
def remove_current_script_callbacks():
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index e374aeb8..7e50f1ab 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -34,7 +34,7 @@ def apply_optimizations():
ldm.modules.diffusionmodules.model.nonlinearity = silu
ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th
-
+
optimization_method = None
can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp
@@ -92,12 +92,12 @@ def fix_checkpoint():
def weighted_loss(sd_model, pred, target, mean=True):
#Calculate the weight normally, but ignore the mean
loss = sd_model._old_get_loss(pred, target, mean=False)
-
+
#Check if we have weights available
weight = getattr(sd_model, '_custom_loss_weight', None)
if weight is not None:
loss *= weight
-
+
#Return the loss, as mean if specified
return loss.mean() if mean else loss
@@ -105,7 +105,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs):
try:
#Temporarily append weights to a place accessible during loss calc
sd_model._custom_loss_weight = w
-
+
#Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely
#Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set
if not hasattr(sd_model, '_old_get_loss'):
@@ -120,7 +120,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs):
del sd_model._custom_loss_weight
except AttributeError:
pass
-
+
#If we have an old loss function, reset the loss function to the original one
if hasattr(sd_model, '_old_get_loss'):
sd_model.get_loss = sd_model._old_get_loss
@@ -184,7 +184,7 @@ class StableDiffusionModelHijack:
def undo_hijack(self, m):
if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
- m.cond_stage_model = m.cond_stage_model.wrapped
+ m.cond_stage_model = m.cond_stage_model.wrapped
elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index a174bbe1..f00fe55c 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -62,10 +62,10 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
end = i + 2
s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end])
s1 *= self.scale
-
+
s2 = s1.softmax(dim=-1)
del s1
-
+
r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end])
del s2
del q, k, v
@@ -95,43 +95,43 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
with devices.without_autocast(disable=not shared.opts.upcast_attn):
k_in = k_in * self.scale
-
+
del context, x
-
+
q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in))
del q_in, k_in, v_in
-
+
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
-
+
mem_free_total = get_available_vram()
-
+
gb = 1024 ** 3
tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
modifier = 3 if q.element_size() == 2 else 2.5
mem_required = tensor_size * modifier
steps = 1
-
+
if mem_required > mem_free_total:
steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
# print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
# f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
-
+
if steps > 64:
max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
-
+
slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
for i in range(0, q.shape[1], slice_size):
end = i + slice_size
s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k)
-
+
s2 = s1.softmax(dim=-1, dtype=q.dtype)
del s1
-
+
r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
del s2
-
+
del q, k, v
r1 = r1.to(dtype)
@@ -228,7 +228,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
with devices.without_autocast(disable=not shared.opts.upcast_attn):
k = k * self.scale
-
+
q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
r = einsum_op(q, k, v)
r = r.to(dtype)
@@ -369,7 +369,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None):
q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2)
k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2)
v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2)
-
+
del q_in, k_in, v_in
dtype = q.dtype
@@ -451,7 +451,7 @@ def cross_attention_attnblock_forward(self, x):
h3 += x
return h3
-
+
def xformers_attnblock_forward(self, x):
try:
h_ = x
diff --git a/modules/sd_models.py b/modules/sd_models.py
index d1e946a5..3316d021 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -165,7 +165,7 @@ def model_hash(filename):
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
-
+
checkpoint_info = checkpoint_alisases.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
@@ -372,7 +372,7 @@ def enable_midas_autodownload():
if not os.path.exists(path):
if not os.path.exists(midas_path):
mkdir(midas_path)
-
+
print(f"Downloading midas model weights for {model_type} to {path}")
request.urlretrieve(midas_urls[model_type], path)
print(f"{model_type} downloaded")
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 2f733cf5..e9e41818 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -93,10 +93,10 @@ class CFGDenoiser(torch.nn.Module):
if shared.sd_model.model.conditioning_key == "crossattn-adm":
image_uncond = torch.zeros_like(image_cond)
- make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm}
+ make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm}
else:
image_uncond = image_cond
- make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]}
+ make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]}
if not is_edit_model:
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
@@ -316,7 +316,7 @@ class KDiffusionSampler:
sigma_sched = sigmas[steps - t_enc - 1:]
xi = x + noise * sigma_sched[0]
-
+
extra_params_kwargs = self.initialize(p)
parameters = inspect.signature(self.func).parameters
@@ -339,9 +339,9 @@ class KDiffusionSampler:
self.model_wrap_cfg.init_latent = x
self.last_latent = x
extra_args={
- 'cond': conditioning,
- 'image_cond': image_conditioning,
- 'uncond': unconditional_conditioning,
+ 'cond': conditioning,
+ 'image_cond': image_conditioning,
+ 'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale,
's_min_uncond': self.s_min_uncond
}
@@ -374,9 +374,9 @@ class KDiffusionSampler:
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
- 'cond': conditioning,
- 'image_cond': image_conditioning,
- 'uncond': unconditional_conditioning,
+ 'cond': conditioning,
+ 'image_cond': image_conditioning,
+ 'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale,
's_min_uncond': self.s_min_uncond
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
index cc38debd..497568eb 100644
--- a/modules/sub_quadratic_attention.py
+++ b/modules/sub_quadratic_attention.py
@@ -179,7 +179,7 @@ def efficient_dot_product_attention(
chunk_idx,
min(query_chunk_size, q_tokens)
)
-
+
summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale)
summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk
compute_query_chunk_attn: ComputeQueryChunkAttn = partial(
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py
index 41610e03..b9621fc9 100644
--- a/modules/textual_inversion/dataset.py
+++ b/modules/textual_inversion/dataset.py
@@ -118,7 +118,7 @@ class PersonalizedBase(Dataset):
weight = torch.ones(latent_sample.shape)
else:
weight = None
-
+
if latent_sampling_method == "random":
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight)
else:
@@ -243,4 +243,4 @@ class BatchLoaderRandom(BatchLoader):
return self
def collate_wrapper_random(batch):
- return BatchLoaderRandom(batch)
\ No newline at end of file
+ return BatchLoaderRandom(batch)
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index d0cad09e..a009d8e8 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -125,7 +125,7 @@ def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, thr
default=None
)
return wh and center_crop(image, *wh)
-
+
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
width = process_width
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 9e1b2b9a..d489ed1e 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -323,16 +323,16 @@ def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epo
tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step)
def tensorboard_add_scaler(tensorboard_writer, tag, value, step):
- tensorboard_writer.add_scalar(tag=tag,
+ tensorboard_writer.add_scalar(tag=tag,
scalar_value=value, global_step=step)
def tensorboard_add_image(tensorboard_writer, tag, pil_image, step):
# Convert a pil image to a torch tensor
img_tensor = torch.as_tensor(np.array(pil_image, copy=True))
- img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0],
+ img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0],
len(pil_image.getbands()))
img_tensor = img_tensor.permute((2, 0, 1))
-
+
tensorboard_writer.add_image(tag, img_tensor, global_step=step)
def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_model_every, create_image_every, log_directory, name="embedding"):
@@ -402,7 +402,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
if initial_step >= steps:
shared.state.textinfo = "Model has already been trained beyond specified max steps"
return embedding, filename
-
+
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \
torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
@@ -412,7 +412,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
old_parallel_processing_allowed = shared.parallel_processing_allowed
-
+
if shared.opts.training_enable_tensorboard:
tensorboard_writer = tensorboard_setup(log_directory)
@@ -439,7 +439,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu')
if embedding.checksum() == optimizer_saved_dict.get('hash', None):
optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
-
+
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
print("Loaded existing optimizer from checkpoint")
@@ -485,7 +485,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
if clip_grad:
clip_grad_sched.step(embedding.step)
-
+
with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
if use_weight:
@@ -513,7 +513,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
# go back until we reach gradient accumulation steps
if (j + 1) % gradient_step != 0:
continue
-
+
if clip_grad:
clip_grad(embedding.vec, clip_grad_sched.learn_rate)
diff --git a/modules/ui.py b/modules/ui.py
index 1efb656a..ff82fff6 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1171,7 +1171,7 @@ def create_ui():
process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
-
+
with gr.Column(visible=False) as process_multicrop_col:
gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
with gr.Row():
@@ -1183,7 +1183,7 @@ def create_ui():
with gr.Row():
process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
-
+
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
@@ -1226,7 +1226,7 @@ def create_ui():
with FormRow():
embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate")
-
+
with FormRow():
clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False)
@@ -1565,7 +1565,7 @@ def create_ui():
gr.HTML(shared.html("licenses.html"), elem_id="licenses")
gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
-
+
def unload_sd_weights():
modules.sd_models.unload_model_weights()
@@ -1841,15 +1841,15 @@ def versions_html():
return f"""
version: {tag}
- •
+ •
python: {python_version}
- •
+ •
torch: {getattr(torch, '__long_version__',torch.__version__)}
- •
+ •
xformers: {xformers_version}
- •
+ •
gradio: {gr.__version__}
- •
+ •
checkpoint: N/A
"""
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index ed70abe5..af497733 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -467,7 +467,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text="
{html.escape(description)} Added: {html.escape(added)} |
{install_code} |
-
+
"""
for tag in [x for x in extension_tags if x not in tags]:
@@ -535,9 +535,9 @@ def create_ui():
hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"])
sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index")
- with gr.Row():
+ with gr.Row():
search_extensions_text = gr.Text(label="Search").style(container=False)
-
+
install_result = gr.HTML()
available_extensions_table = gr.HTML()
diff --git a/modules/xlmr.py b/modules/xlmr.py
index e056c3f6..a407a3ca 100644
--- a/modules/xlmr.py
+++ b/modules/xlmr.py
@@ -28,7 +28,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel):
config_class = BertSeriesConfig
def __init__(self, config=None, **kargs):
- # modify initialization for autoloading
+ # modify initialization for autoloading
if config is None:
config = XLMRobertaConfig()
config.attention_probs_dropout_prob= 0.1
@@ -74,7 +74,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel):
text["attention_mask"] = torch.tensor(
text['attention_mask']).to(device)
features = self(**text)
- return features['projection_state']
+ return features['projection_state']
def forward(
self,
@@ -134,4 +134,4 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel):
class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation):
base_model_prefix = 'roberta'
- config_class= RobertaSeriesConfig
\ No newline at end of file
+ config_class= RobertaSeriesConfig
diff --git a/pyproject.toml b/pyproject.toml
index c88907be..d4a1bbf4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,6 +6,7 @@ extend-select = [
"B",
"C",
"I",
+ "W",
]
exclude = [
@@ -20,7 +21,7 @@ ignore = [
"I001", # Import block is un-sorted or un-formatted
"C901", # Function is too complex
"C408", # Rewrite as a literal
-
+ "W605", # invalid escape sequence, messes with some docstrings
]
[tool.ruff.per-file-ignores]
@@ -28,4 +29,4 @@ ignore = [
[tool.ruff.flake8-bugbear]
# Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`.
-extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"]
\ No newline at end of file
+extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"]
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index bb00fb3f..1e833fa8 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -149,9 +149,9 @@ class Script(scripts.Script):
sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment"))
return [
- info,
+ info,
override_sampler,
- override_prompt, original_prompt, original_negative_prompt,
+ override_prompt, original_prompt, original_negative_prompt,
override_steps, st,
override_strength,
cfg, randomness, sigma_adjustment,
@@ -191,17 +191,17 @@ class Script(scripts.Script):
self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)
rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)
-
+
combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
-
+
sampler = sd_samplers.create_sampler(p.sampler_name, p.sd_model)
sigmas = sampler.model_wrap.get_sigmas(p.steps)
-
+
noise_dt = combined_noise - (p.init_latent / sigmas[0])
-
+
p.seed = p.seed + 1
-
+
return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning)
p.sample = sample_extra
diff --git a/scripts/loopback.py b/scripts/loopback.py
index ad6609be..2d5feaf9 100644
--- a/scripts/loopback.py
+++ b/scripts/loopback.py
@@ -14,7 +14,7 @@ class Script(scripts.Script):
def show(self, is_img2img):
return is_img2img
- def ui(self, is_img2img):
+ def ui(self, is_img2img):
loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops"))
final_denoising_strength = gr.Slider(minimum=0, maximum=1, step=0.01, label='Final denoising strength', value=0.5, elem_id=self.elem_id("final_denoising_strength"))
denoising_curve = gr.Dropdown(label="Denoising strength curve", choices=["Aggressive", "Linear", "Lazy"], value="Linear")
@@ -104,7 +104,7 @@ class Script(scripts.Script):
p.seed = processed.seed + 1
p.denoising_strength = calculate_denoising_strength(i + 1)
-
+
if state.skipped:
break
@@ -121,7 +121,7 @@ class Script(scripts.Script):
all_images.append(last_image)
p.inpainting_fill = original_inpainting_fill
-
+
if state.interrupted:
break
@@ -132,7 +132,7 @@ class Script(scripts.Script):
if opts.return_grid:
grids.append(grid)
-
+
all_images = grids + all_images
processed = Processed(p, all_images, initial_seed, initial_info)
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index c0bbecc1..ea0632b6 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -19,7 +19,7 @@ class Script(scripts.Script):
def ui(self, is_img2img):
if not is_img2img:
return None
-
+
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur"))
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill"))
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index fb06beab..88324fe6 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -96,7 +96,7 @@ class Script(scripts.Script):
p.prompt_for_display = positive_prompt
processed = process_images(p)
- grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
+ grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[0].height, prompt_matrix_parts, margin_size)
processed.images.insert(0, grid)
processed.index_of_first_image = 1
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 9607077a..2378816f 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -109,7 +109,7 @@ class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
- def ui(self, is_img2img):
+ def ui(self, is_img2img):
checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate"))
checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch"))
@@ -166,7 +166,7 @@ class Script(scripts.Script):
proc = process_images(copy_p)
images += proc.images
-
+
if checkbox_iterate:
p.seed = p.seed + (p.batch_size * p.n_iter)
all_prompts += proc.all_prompts
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 0b1d3096..e614c23b 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -16,7 +16,7 @@ class Script(scripts.Script):
def show(self, is_img2img):
return is_img2img
- def ui(self, is_img2img):
+ def ui(self, is_img2img):
info = gr.HTML("Will upscale the image by the selected scale factor; use width and height sliders to set tile size
")
overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap"))
scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor"))
--
cgit v1.2.3