From 49db24ce273ebccb598fc13de946d1ac3cad38f8 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 10:30:29 +0300 Subject: launch.py: Add debugging envvar to see install output --- launch.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index cfc0cffa..136e8855 100644 --- a/launch.py +++ b/launch.py @@ -22,6 +22,9 @@ stored_commit_hash = None stored_git_tag = None dir_repos = "repositories" +# Whether to default to printing command output +default_command_live = (os.environ.get('WEBUI_LAUNCH_LIVE_OUTPUT') == "1") + if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False' @@ -85,7 +88,7 @@ def git_tag(): return stored_git_tag -def run(command, desc=None, errdesc=None, custom_env=None, live=False): +def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live): if desc is not None: print(desc) @@ -135,7 +138,7 @@ def run_python(code, desc=None, errdesc=None): return run(f'"{python}" -c "{code}"', desc, errdesc) -def run_pip(command, desc=None, live=False): +def run_pip(command, desc=None, live=default_command_live): if args.skip_install: return -- cgit v1.2.3 From 875bc270093b4f89b9d0d20a93a5e8ea514b3f6c Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 10:34:19 +0300 Subject: launch.py: Simplify run() --- launch.py | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 136e8855..ae75f6fd 100644 --- a/launch.py +++ b/launch.py @@ -88,32 +88,36 @@ def git_tag(): return stored_git_tag -def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live): +def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str: if desc is not None: print(desc) - if live: - result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - raise RuntimeError(f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode}""") + run_kwargs = { + "args": command, + "shell": True, + "env": os.environ if custom_env is None else custom_env, + "encoding": 'utf8', + "errors": 'ignore', + } - return "" + if not live: + run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) + result = subprocess.run(**run_kwargs) if result.returncode != 0: - - message = f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode} -stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''} -stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''} -""" - raise RuntimeError(message) - - return result.stdout.decode(encoding="utf8", errors="ignore") + error_bits = [ + f"{errdesc or 'Error running command'}.", + f"Command: {command}", + f"Error code: {result.returncode}", + ] + if result.stdout: + error_bits.append(f"stdout: {result.stdout}") + if result.stderr: + error_bits.append(f"stderr: {result.stderr}") + raise RuntimeError("\n".join(error_bits)) + + return (result.stdout or "") def check_run(command): -- cgit v1.2.3 From a09e1e6e18e800191817493253dfc481a07b1868 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 10:48:48 +0300 Subject: launch.py: Use GitHub archive URLs for gfpgan, clip, openclip instead of git clones --- launch.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index ae75f6fd..fed14d93 100644 --- a/launch.py +++ b/launch.py @@ -248,9 +248,9 @@ def prepare_environment(): requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') - gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") - clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") - openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") + gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip") + clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip") + openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip") stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git") taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git") -- cgit v1.2.3 From dd3ca9adf7077cb7209b31fd16a40deffc4948ca Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 10:35:22 +0300 Subject: launch.py: make torch_index_url an envvar --- launch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index fed14d93..670af87c 100644 --- a/launch.py +++ b/launch.py @@ -244,7 +244,8 @@ def run_extensions_installers(settings_file): def prepare_environment(): - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url https://download.pytorch.org/whl/cu118") + torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118") + torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') -- cgit v1.2.3 From 49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 18:28:15 +0300 Subject: Autofix Ruff W (not W605) (mostly whitespace) --- extensions-builtin/LDSR/ldsr_model_arch.py | 4 +- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 6 +-- extensions-builtin/ScuNET/scunet_model_arch.py | 2 +- extensions-builtin/SwinIR/scripts/swinir_model.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch_v2.py | 52 +++++++++++------------ launch.py | 2 +- modules/api/api.py | 4 +- modules/api/models.py | 2 +- modules/cmd_args.py | 2 +- modules/codeformer/codeformer_arch.py | 14 +++--- modules/codeformer/vqgan_arch.py | 38 ++++++++--------- modules/esrgan_model_arch.py | 4 +- modules/extras.py | 2 +- modules/hypernetworks/hypernetwork.py | 12 +++--- modules/images.py | 2 +- modules/mac_specific.py | 4 +- modules/masking.py | 2 +- modules/ngrok.py | 4 +- modules/processing.py | 2 +- modules/script_callbacks.py | 14 +++--- modules/sd_hijack.py | 12 +++--- modules/sd_hijack_optimizations.py | 32 +++++++------- modules/sd_models.py | 4 +- modules/sd_samplers_kdiffusion.py | 18 ++++---- modules/sub_quadratic_attention.py | 2 +- modules/textual_inversion/dataset.py | 4 +- modules/textual_inversion/preprocess.py | 2 +- modules/textual_inversion/textual_inversion.py | 16 +++---- modules/ui.py | 18 ++++---- modules/ui_extensions.py | 6 +-- modules/xlmr.py | 6 +-- pyproject.toml | 5 ++- scripts/img2imgalt.py | 14 +++--- scripts/loopback.py | 8 ++-- scripts/poor_mans_outpainting.py | 2 +- scripts/prompt_matrix.py | 2 +- scripts/prompts_from_file.py | 4 +- scripts/sd_upscale.py | 2 +- 39 files changed, 167 insertions(+), 166 deletions(-) (limited to 'launch.py') diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index 2173de79..7f450086 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -130,11 +130,11 @@ class LDSR: im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS) else: print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)") - + # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge')) - + logs = self.run(model["model"], im_padded, diffusion_steps, eta) sample = logs["sample"] diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 57c02d12..631a08ef 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -460,7 +460,7 @@ class LatentDiffusionV1(DDPMV1): self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False - self.bbox_tokenizer = None + self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: @@ -792,7 +792,7 @@ class LatentDiffusionV1(DDPMV1): z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): + if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] @@ -890,7 +890,7 @@ class LatentDiffusionV1(DDPMV1): if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids + assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py index 8028918a..b51a8806 100644 --- a/extensions-builtin/ScuNET/scunet_model_arch.py +++ b/extensions-builtin/ScuNET/scunet_model_arch.py @@ -265,4 +265,4 @@ class SCUNet(nn.Module): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) \ No newline at end of file + nn.init.constant_(m.weight, 1.0) diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index 55dd94ab..0ba50487 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -150,7 +150,7 @@ def inference(img, model, tile, tile_overlap, window_size, scale): for w_idx in w_idx_list: if state.interrupted or state.skipped: break - + in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] out_patch = model(in_patch) out_patch_mask = torch.ones_like(out_patch) diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py index 73e37cfa..93b93274 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch.py +++ b/extensions-builtin/SwinIR/swinir_model_arch.py @@ -805,7 +805,7 @@ class SwinIR(nn.Module): def forward(self, x): H, W = x.shape[2:] x = self.check_image_size(x) - + self.mean = self.mean.type_as(x) x = (x - self.mean) * self.img_range diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py index 3ca9be78..dad22cca 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py +++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py @@ -241,7 +241,7 @@ class SwinTransformerBlock(nn.Module): attn_mask = None self.register_buffer("attn_mask", attn_mask) - + def calculate_mask(self, x_size): # calculate attention mask for SW-MSA H, W = x_size @@ -263,7 +263,7 @@ class SwinTransformerBlock(nn.Module): attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - return attn_mask + return attn_mask def forward(self, x, x_size): H, W = x_size @@ -288,7 +288,7 @@ class SwinTransformerBlock(nn.Module): attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C else: attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) - + # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C @@ -369,7 +369,7 @@ class PatchMerging(nn.Module): H, W = self.input_resolution flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim flops += H * W * self.dim // 2 - return flops + return flops class BasicLayer(nn.Module): """ A basic Swin Transformer layer for one stage. @@ -447,7 +447,7 @@ class BasicLayer(nn.Module): nn.init.constant_(blk.norm1.weight, 0) nn.init.constant_(blk.norm2.bias, 0) nn.init.constant_(blk.norm2.weight, 0) - + class PatchEmbed(nn.Module): r""" Image to Patch Embedding Args: @@ -492,7 +492,7 @@ class PatchEmbed(nn.Module): flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) if self.norm is not None: flops += Ho * Wo * self.embed_dim - return flops + return flops class RSTB(nn.Module): """Residual Swin Transformer Block (RSTB). @@ -531,7 +531,7 @@ class RSTB(nn.Module): num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path, norm_layer=norm_layer, @@ -622,7 +622,7 @@ class Upsample(nn.Sequential): else: raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') super(Upsample, self).__init__(*m) - + class Upsample_hf(nn.Sequential): """Upsample module. @@ -642,7 +642,7 @@ class Upsample_hf(nn.Sequential): m.append(nn.PixelShuffle(3)) else: raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') - super(Upsample_hf, self).__init__(*m) + super(Upsample_hf, self).__init__(*m) class UpsampleOneStep(nn.Sequential): @@ -667,8 +667,8 @@ class UpsampleOneStep(nn.Sequential): H, W = self.input_resolution flops = H * W * self.num_feat * 3 * 9 return flops - - + + class Swin2SR(nn.Module): r""" Swin2SR @@ -699,7 +699,7 @@ class Swin2SR(nn.Module): def __init__(self, img_size=64, patch_size=1, in_chans=3, embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6), - window_size=7, mlp_ratio=4., qkv_bias=True, + window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', @@ -764,7 +764,7 @@ class Swin2SR(nn.Module): num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results norm_layer=norm_layer, @@ -776,7 +776,7 @@ class Swin2SR(nn.Module): ) self.layers.append(layer) - + if self.upsampler == 'pixelshuffle_hf': self.layers_hf = nn.ModuleList() for i_layer in range(self.num_layers): @@ -787,7 +787,7 @@ class Swin2SR(nn.Module): num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results norm_layer=norm_layer, @@ -799,7 +799,7 @@ class Swin2SR(nn.Module): ) self.layers_hf.append(layer) - + self.norm = norm_layer(self.num_features) # build the last conv layer in deep feature extraction @@ -829,10 +829,10 @@ class Swin2SR(nn.Module): self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) self.conv_after_aux = nn.Sequential( nn.Conv2d(3, num_feat, 3, 1, 1), - nn.LeakyReLU(inplace=True)) + nn.LeakyReLU(inplace=True)) self.upsample = Upsample(upscale, num_feat) self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - + elif self.upsampler == 'pixelshuffle_hf': self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) @@ -846,7 +846,7 @@ class Swin2SR(nn.Module): nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - + elif self.upsampler == 'pixelshuffledirect': # for lightweight SR (to save parameters) self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, @@ -905,7 +905,7 @@ class Swin2SR(nn.Module): x = self.patch_unembed(x, x_size) return x - + def forward_features_hf(self, x): x_size = (x.shape[2], x.shape[3]) x = self.patch_embed(x) @@ -919,7 +919,7 @@ class Swin2SR(nn.Module): x = self.norm(x) # B L C x = self.patch_unembed(x, x_size) - return x + return x def forward(self, x): H, W = x.shape[2:] @@ -951,7 +951,7 @@ class Swin2SR(nn.Module): x = self.conv_after_body(self.forward_features(x)) + x x_before = self.conv_before_upsample(x) x_out = self.conv_last(self.upsample(x_before)) - + x_hf = self.conv_first_hf(x_before) x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf x_hf = self.conv_before_upsample_hf(x_hf) @@ -977,15 +977,15 @@ class Swin2SR(nn.Module): x_first = self.conv_first(x) res = self.conv_after_body(self.forward_features(x_first)) + x_first x = x + self.conv_last(res) - + x = x / self.img_range + self.mean if self.upsampler == "pixelshuffle_aux": return x[:, :, :H*self.upscale, :W*self.upscale], aux - + elif self.upsampler == "pixelshuffle_hf": x_out = x_out / self.img_range + self.mean return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale] - + else: return x[:, :, :H*self.upscale, :W*self.upscale] @@ -1014,4 +1014,4 @@ if __name__ == '__main__': x = torch.randn((1, 3, height, width)) x = model(x) - print(x.shape) \ No newline at end of file + print(x.shape) diff --git a/launch.py b/launch.py index 670af87c..62b33f14 100644 --- a/launch.py +++ b/launch.py @@ -327,7 +327,7 @@ def prepare_environment(): if args.update_all_extensions: git_pull_recursive(extensions_dir) - + if "--exit" in sys.argv: print("Exiting because of --exit argument") exit(0) diff --git a/modules/api/api.py b/modules/api/api.py index 594fa655..165985c3 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -227,7 +227,7 @@ class Api: script_idx = script_name_to_index(script_name, script_runner.selectable_scripts) script = script_runner.selectable_scripts[script_idx] return script, script_idx - + def get_scripts_list(self): t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles] i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles] @@ -237,7 +237,7 @@ class Api: def get_script(self, script_name, script_runner): if script_name is None or script_name == "": return None, None - + script_idx = script_name_to_index(script_name, script_runner.scripts) return script_runner.scripts[script_idx] diff --git a/modules/api/models.py b/modules/api/models.py index 4d291076..006ccdb7 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -289,4 +289,4 @@ class MemoryResponse(BaseModel): class ScriptsList(BaseModel): txt2img: list = Field(default=None,title="Txt2img", description="Titles of scripts (txt2img)") - img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)") \ No newline at end of file + img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)") diff --git a/modules/cmd_args.py b/modules/cmd_args.py index e01ca655..f4a4ab36 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -102,4 +102,4 @@ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gra parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) -parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') \ No newline at end of file +parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index 45c70f84..12db6814 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -119,7 +119,7 @@ class TransformerSALayer(nn.Module): tgt_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, query_pos: Optional[Tensor] = None): - + # self attention tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) @@ -159,7 +159,7 @@ class Fuse_sft_block(nn.Module): @ARCH_REGISTRY.register() class CodeFormer(VQAutoEncoder): - def __init__(self, dim_embd=512, n_head=8, n_layers=9, + def __init__(self, dim_embd=512, n_head=8, n_layers=9, codebook_size=1024, latent_size=256, connect_list=('32', '64', '128', '256'), fix_modules=('quantize', 'generator')): @@ -179,14 +179,14 @@ class CodeFormer(VQAutoEncoder): self.feat_emb = nn.Linear(256, self.dim_embd) # transformer - self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) + self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) for _ in range(self.n_layers)]) # logits_predict head self.idx_pred_layer = nn.Sequential( nn.LayerNorm(dim_embd), nn.Linear(dim_embd, codebook_size, bias=False)) - + self.channels = { '16': 512, '32': 256, @@ -221,7 +221,7 @@ class CodeFormer(VQAutoEncoder): enc_feat_dict = {} out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list] for i, block in enumerate(self.encoder.blocks): - x = block(x) + x = block(x) if i in out_list: enc_feat_dict[str(x.shape[-1])] = x.clone() @@ -266,11 +266,11 @@ class CodeFormer(VQAutoEncoder): fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list] for i, block in enumerate(self.generator.blocks): - x = block(x) + x = block(x) if i in fuse_list: # fuse after i-th block f_size = str(x.shape[-1]) if w>0: x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w) out = x # logits doesn't need softmax before cross_entropy loss - return out, logits, lq_feat \ No newline at end of file + return out, logits, lq_feat diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index b24a0394..09ee6660 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -13,7 +13,7 @@ from basicsr.utils.registry import ARCH_REGISTRY def normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - + @torch.jit.script def swish(x): @@ -210,15 +210,15 @@ class AttnBlock(nn.Module): # compute attention b, c, h, w = q.shape q = q.reshape(b, c, h*w) - q = q.permute(0, 2, 1) + q = q.permute(0, 2, 1) k = k.reshape(b, c, h*w) - w_ = torch.bmm(q, k) + w_ = torch.bmm(q, k) w_ = w_ * (int(c)**(-0.5)) w_ = F.softmax(w_, dim=2) # attend to values v = v.reshape(b, c, h*w) - w_ = w_.permute(0, 2, 1) + w_ = w_.permute(0, 2, 1) h_ = torch.bmm(v, w_) h_ = h_.reshape(b, c, h, w) @@ -270,18 +270,18 @@ class Encoder(nn.Module): def forward(self, x): for block in self.blocks: x = block(x) - + return x class Generator(nn.Module): def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions): super().__init__() - self.nf = nf - self.ch_mult = ch_mult + self.nf = nf + self.ch_mult = ch_mult self.num_resolutions = len(self.ch_mult) self.num_res_blocks = res_blocks - self.resolution = img_size + self.resolution = img_size self.attn_resolutions = attn_resolutions self.in_channels = emb_dim self.out_channels = 3 @@ -315,24 +315,24 @@ class Generator(nn.Module): blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1)) self.blocks = nn.ModuleList(blocks) - + def forward(self, x): for block in self.blocks: x = block(x) - + return x - + @ARCH_REGISTRY.register() class VQAutoEncoder(nn.Module): def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256, beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None): super().__init__() logger = get_root_logger() - self.in_channels = 3 - self.nf = nf - self.n_blocks = res_blocks + self.in_channels = 3 + self.nf = nf + self.n_blocks = res_blocks self.codebook_size = codebook_size self.embed_dim = emb_dim self.ch_mult = ch_mult @@ -363,11 +363,11 @@ class VQAutoEncoder(nn.Module): self.kl_weight ) self.generator = Generator( - self.nf, + self.nf, self.embed_dim, - self.ch_mult, - self.n_blocks, - self.resolution, + self.ch_mult, + self.n_blocks, + self.resolution, self.attn_resolutions ) @@ -432,4 +432,4 @@ class VQGANDiscriminator(nn.Module): raise ValueError('Wrong params!') def forward(self, x): - return self.main(x) \ No newline at end of file + return self.main(x) diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 4de9dd8d..2b9888ba 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -105,7 +105,7 @@ class ResidualDenseBlock_5C(nn.Module): Modified options that can be used: - "Partial Convolution based Padding" arXiv:1811.11718 - "Spectral normalization" arXiv:1802.05957 - - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C. + - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C. {Rakotonirina} and A. {Rasoanaivo} """ @@ -170,7 +170,7 @@ class GaussianNoise(nn.Module): scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x sampled_noise = self.noise.repeat(*x.size()).normal_() * scale x = x + sampled_noise - return x + return x def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) diff --git a/modules/extras.py b/modules/extras.py index eb4f0b42..bdf9b3b7 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -199,7 +199,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ result_is_inpainting_model = True else: theta_0[key] = theta_func2(a, b, multiplier) - + theta_0[key] = to_half(theta_0[key], save_as_half) shared.state.sampling_step += 1 diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 38ef074f..570b5603 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -540,7 +540,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi return hypernetwork, filename scheduler = LearnRateScheduler(learn_rate, steps, initial_step) - + clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None if clip_grad: clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False) @@ -593,7 +593,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi print(e) scaler = torch.cuda.amp.GradScaler() - + batch_size = ds.batch_size gradient_step = ds.gradient_step # n steps = batch_size * gradient_step * n image processed @@ -636,7 +636,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi if clip_grad: clip_grad_sched.step(hypernetwork.step) - + with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) if use_weight: @@ -657,14 +657,14 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi _loss_step += loss.item() scaler.scale(loss).backward() - + # go back until we reach gradient accumulation steps if (j + 1) % gradient_step != 0: continue loss_logging.append(_loss_step) if clip_grad: clip_grad(weights, clip_grad_sched.learn_rate) - + scaler.step(optimizer) scaler.update() hypernetwork.step += 1 @@ -674,7 +674,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi _loss_step = 0 steps_done = hypernetwork.step + 1 - + epoch_num = hypernetwork.step // steps_per_epoch epoch_step = hypernetwork.step % steps_per_epoch diff --git a/modules/images.py b/modules/images.py index 3b8b62d9..b2de3662 100644 --- a/modules/images.py +++ b/modules/images.py @@ -367,7 +367,7 @@ class FilenameGenerator: self.seed = seed self.prompt = prompt self.image = image - + def hasprompt(self, *args): lower = self.prompt.lower() if self.p is None or self.prompt is None: diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 5c2f92a1..d74c6b95 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -42,7 +42,7 @@ if has_mps: # MPS workaround for https://github.com/pytorch/pytorch/issues/79383 CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs), lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps')) - # MPS workaround for https://github.com/pytorch/pytorch/issues/80800 + # MPS workaround for https://github.com/pytorch/pytorch/issues/80800 CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs), lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps') # MPS workaround for https://github.com/pytorch/pytorch/issues/90532 @@ -60,4 +60,4 @@ if has_mps: # MPS workaround for https://github.com/pytorch/pytorch/issues/92311 if platform.processor() == 'i386': for funcName in ['torch.argmax', 'torch.Tensor.argmax']: - CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps') \ No newline at end of file + CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps') diff --git a/modules/masking.py b/modules/masking.py index a5c4d2da..be9f84c7 100644 --- a/modules/masking.py +++ b/modules/masking.py @@ -4,7 +4,7 @@ from PIL import Image, ImageFilter, ImageOps def get_crop_region(mask, pad=0): """finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle. For example, if a user has painted the top-right part of a 512x512 image", the result may be (256, 0, 512, 256)""" - + h, w = mask.shape crop_left = 0 diff --git a/modules/ngrok.py b/modules/ngrok.py index 7a7b4b26..67a74e85 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -13,7 +13,7 @@ def connect(token, port, region): config = conf.PyngrokConfig( auth_token=token, region=region ) - + # Guard for existing tunnels existing = ngrok.get_tunnels(pyngrok_config=config) if existing: @@ -24,7 +24,7 @@ def connect(token, port, region): print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' 'You can use this link after the launch is complete.') return - + try: if account is None: public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url diff --git a/modules/processing.py b/modules/processing.py index c3932d6b..f902b9df 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -164,7 +164,7 @@ class StableDiffusionProcessing: self.all_subseeds = None self.iteration = 0 self.is_hr_pass = False - + @property def sd_model(self): diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 17109732..7d9dd736 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -32,22 +32,22 @@ class CFGDenoiserParams: def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond): self.x = x """Latent image representation in the process of being denoised""" - + self.image_cond = image_cond """Conditioning image""" - + self.sigma = sigma """Current sigma noise step value""" - + self.sampling_step = sampling_step """Current Sampling step number""" - + self.total_sampling_steps = total_sampling_steps """Total number of sampling steps planned""" - + self.text_cond = text_cond """ Encoder hidden states of text conditioning from prompt""" - + self.text_uncond = text_uncond """ Encoder hidden states of text conditioning from negative prompt""" @@ -240,7 +240,7 @@ def add_callback(callbacks, fun): callbacks.append(ScriptCallback(filename, fun)) - + def remove_current_script_callbacks(): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if len(stack) > 0 else 'unknown file' diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index e374aeb8..7e50f1ab 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -34,7 +34,7 @@ def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th - + optimization_method = None can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp @@ -92,12 +92,12 @@ def fix_checkpoint(): def weighted_loss(sd_model, pred, target, mean=True): #Calculate the weight normally, but ignore the mean loss = sd_model._old_get_loss(pred, target, mean=False) - + #Check if we have weights available weight = getattr(sd_model, '_custom_loss_weight', None) if weight is not None: loss *= weight - + #Return the loss, as mean if specified return loss.mean() if mean else loss @@ -105,7 +105,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): try: #Temporarily append weights to a place accessible during loss calc sd_model._custom_loss_weight = w - + #Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely #Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set if not hasattr(sd_model, '_old_get_loss'): @@ -120,7 +120,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): del sd_model._custom_loss_weight except AttributeError: pass - + #If we have an old loss function, reset the loss function to the original one if hasattr(sd_model, '_old_get_loss'): sd_model.get_loss = sd_model._old_get_loss @@ -184,7 +184,7 @@ class StableDiffusionModelHijack: def undo_hijack(self, m): if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: - m.cond_stage_model = m.cond_stage_model.wrapped + m.cond_stage_model = m.cond_stage_model.wrapped elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index a174bbe1..f00fe55c 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -62,10 +62,10 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None): end = i + 2 s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) s1 *= self.scale - + s2 = s1.softmax(dim=-1) del s1 - + r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) del s2 del q, k, v @@ -95,43 +95,43 @@ def split_cross_attention_forward(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k_in = k_in * self.scale - + del context, x - + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in - + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - + mem_free_total = get_available_vram() - + gb = 1024 ** 3 tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() modifier = 3 if q.element_size() == 2 else 2.5 mem_required = tensor_size * modifier steps = 1 - + if mem_required > mem_free_total: steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") - + if steps > 64: max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') - + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] for i in range(0, q.shape[1], slice_size): end = i + slice_size s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) - + s2 = s1.softmax(dim=-1, dtype=q.dtype) del s1 - + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) del s2 - + del q, k, v r1 = r1.to(dtype) @@ -228,7 +228,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k = k * self.scale - + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) r = einsum_op(q, k, v) r = r.to(dtype) @@ -369,7 +369,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None): q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2) k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2) v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2) - + del q_in, k_in, v_in dtype = q.dtype @@ -451,7 +451,7 @@ def cross_attention_attnblock_forward(self, x): h3 += x return h3 - + def xformers_attnblock_forward(self, x): try: h_ = x diff --git a/modules/sd_models.py b/modules/sd_models.py index d1e946a5..3316d021 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -165,7 +165,7 @@ def model_hash(filename): def select_checkpoint(): model_checkpoint = shared.opts.sd_model_checkpoint - + checkpoint_info = checkpoint_alisases.get(model_checkpoint, None) if checkpoint_info is not None: return checkpoint_info @@ -372,7 +372,7 @@ def enable_midas_autodownload(): if not os.path.exists(path): if not os.path.exists(midas_path): mkdir(midas_path) - + print(f"Downloading midas model weights for {model_type} to {path}") request.urlretrieve(midas_urls[model_type], path) print(f"{model_type} downloaded") diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 2f733cf5..e9e41818 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -93,10 +93,10 @@ class CFGDenoiser(torch.nn.Module): if shared.sd_model.model.conditioning_key == "crossattn-adm": image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} else: image_uncond = image_cond - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} if not is_edit_model: x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) @@ -316,7 +316,7 @@ class KDiffusionSampler: sigma_sched = sigmas[steps - t_enc - 1:] xi = x + noise * sigma_sched[0] - + extra_params_kwargs = self.initialize(p) parameters = inspect.signature(self.func).parameters @@ -339,9 +339,9 @@ class KDiffusionSampler: self.model_wrap_cfg.init_latent = x self.last_latent = x extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond } @@ -374,9 +374,9 @@ class KDiffusionSampler: self.last_latent = x samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index cc38debd..497568eb 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -179,7 +179,7 @@ def efficient_dot_product_attention( chunk_idx, min(query_chunk_size, q_tokens) ) - + summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale) summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk compute_query_chunk_attn: ComputeQueryChunkAttn = partial( diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 41610e03..b9621fc9 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -118,7 +118,7 @@ class PersonalizedBase(Dataset): weight = torch.ones(latent_sample.shape) else: weight = None - + if latent_sampling_method == "random": entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight) else: @@ -243,4 +243,4 @@ class BatchLoaderRandom(BatchLoader): return self def collate_wrapper_random(batch): - return BatchLoaderRandom(batch) \ No newline at end of file + return BatchLoaderRandom(batch) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index d0cad09e..a009d8e8 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -125,7 +125,7 @@ def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, thr default=None ) return wh and center_crop(image, *wh) - + def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): width = process_width diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 9e1b2b9a..d489ed1e 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -323,16 +323,16 @@ def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epo tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step) def tensorboard_add_scaler(tensorboard_writer, tag, value, step): - tensorboard_writer.add_scalar(tag=tag, + tensorboard_writer.add_scalar(tag=tag, scalar_value=value, global_step=step) def tensorboard_add_image(tensorboard_writer, tag, pil_image, step): # Convert a pil image to a torch tensor img_tensor = torch.as_tensor(np.array(pil_image, copy=True)) - img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], + img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], len(pil_image.getbands())) img_tensor = img_tensor.permute((2, 0, 1)) - + tensorboard_writer.add_image(tag, img_tensor, global_step=step) def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_model_every, create_image_every, log_directory, name="embedding"): @@ -402,7 +402,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st if initial_step >= steps: shared.state.textinfo = "Model has already been trained beyond specified max steps" return embedding, filename - + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \ @@ -412,7 +412,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed - + if shared.opts.training_enable_tensorboard: tensorboard_writer = tensorboard_setup(log_directory) @@ -439,7 +439,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu') if embedding.checksum() == optimizer_saved_dict.get('hash', None): optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) - + if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) print("Loaded existing optimizer from checkpoint") @@ -485,7 +485,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st if clip_grad: clip_grad_sched.step(embedding.step) - + with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) if use_weight: @@ -513,7 +513,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st # go back until we reach gradient accumulation steps if (j + 1) % gradient_step != 0: continue - + if clip_grad: clip_grad(embedding.vec, clip_grad_sched.learn_rate) diff --git a/modules/ui.py b/modules/ui.py index 1efb656a..ff82fff6 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1171,7 +1171,7 @@ def create_ui(): process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - + with gr.Column(visible=False) as process_multicrop_col: gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') with gr.Row(): @@ -1183,7 +1183,7 @@ def create_ui(): with gr.Row(): process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - + with gr.Row(): with gr.Column(scale=3): gr.HTML(value="") @@ -1226,7 +1226,7 @@ def create_ui(): with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - + with FormRow(): clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) @@ -1565,7 +1565,7 @@ def create_ui(): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") - + def unload_sd_weights(): modules.sd_models.unload_model_weights() @@ -1841,15 +1841,15 @@ def versions_html(): return f""" version: {tag} - •  + • python: {python_version} - •  + • torch: {getattr(torch, '__long_version__',torch.__version__)} - •  + • xformers: {xformers_version} - •  + • gradio: {gr.__version__} - •  + • checkpoint: N/A """ diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index ed70abe5..af497733 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -467,7 +467,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" {html.escape(description)}

Added: {html.escape(added)}

{install_code} - + """ for tag in [x for x in extension_tags if x not in tags]: @@ -535,9 +535,9 @@ def create_ui(): hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"]) sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index") - with gr.Row(): + with gr.Row(): search_extensions_text = gr.Text(label="Search").style(container=False) - + install_result = gr.HTML() available_extensions_table = gr.HTML() diff --git a/modules/xlmr.py b/modules/xlmr.py index e056c3f6..a407a3ca 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -28,7 +28,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): config_class = BertSeriesConfig def __init__(self, config=None, **kargs): - # modify initialization for autoloading + # modify initialization for autoloading if config is None: config = XLMRobertaConfig() config.attention_probs_dropout_prob= 0.1 @@ -74,7 +74,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): text["attention_mask"] = torch.tensor( text['attention_mask']).to(device) features = self(**text) - return features['projection_state'] + return features['projection_state'] def forward( self, @@ -134,4 +134,4 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation): base_model_prefix = 'roberta' - config_class= RobertaSeriesConfig \ No newline at end of file + config_class= RobertaSeriesConfig diff --git a/pyproject.toml b/pyproject.toml index c88907be..d4a1bbf4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,7 @@ extend-select = [ "B", "C", "I", + "W", ] exclude = [ @@ -20,7 +21,7 @@ ignore = [ "I001", # Import block is un-sorted or un-formatted "C901", # Function is too complex "C408", # Rewrite as a literal - + "W605", # invalid escape sequence, messes with some docstrings ] [tool.ruff.per-file-ignores] @@ -28,4 +29,4 @@ ignore = [ [tool.ruff.flake8-bugbear] # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`. -extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"] \ No newline at end of file +extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"] diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index bb00fb3f..1e833fa8 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -149,9 +149,9 @@ class Script(scripts.Script): sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment")) return [ - info, + info, override_sampler, - override_prompt, original_prompt, original_negative_prompt, + override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment, @@ -191,17 +191,17 @@ class Script(scripts.Script): self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment) rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p) - + combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5) - + sampler = sd_samplers.create_sampler(p.sampler_name, p.sd_model) sigmas = sampler.model_wrap.get_sigmas(p.steps) - + noise_dt = combined_noise - (p.init_latent / sigmas[0]) - + p.seed = p.seed + 1 - + return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning) p.sample = sample_extra diff --git a/scripts/loopback.py b/scripts/loopback.py index ad6609be..2d5feaf9 100644 --- a/scripts/loopback.py +++ b/scripts/loopback.py @@ -14,7 +14,7 @@ class Script(scripts.Script): def show(self, is_img2img): return is_img2img - def ui(self, is_img2img): + def ui(self, is_img2img): loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops")) final_denoising_strength = gr.Slider(minimum=0, maximum=1, step=0.01, label='Final denoising strength', value=0.5, elem_id=self.elem_id("final_denoising_strength")) denoising_curve = gr.Dropdown(label="Denoising strength curve", choices=["Aggressive", "Linear", "Lazy"], value="Linear") @@ -104,7 +104,7 @@ class Script(scripts.Script): p.seed = processed.seed + 1 p.denoising_strength = calculate_denoising_strength(i + 1) - + if state.skipped: break @@ -121,7 +121,7 @@ class Script(scripts.Script): all_images.append(last_image) p.inpainting_fill = original_inpainting_fill - + if state.interrupted: break @@ -132,7 +132,7 @@ class Script(scripts.Script): if opts.return_grid: grids.append(grid) - + all_images = grids + all_images processed = Processed(p, all_images, initial_seed, initial_info) diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index c0bbecc1..ea0632b6 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -19,7 +19,7 @@ class Script(scripts.Script): def ui(self, is_img2img): if not is_img2img: return None - + pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels")) mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur")) inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill")) diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index fb06beab..88324fe6 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -96,7 +96,7 @@ class Script(scripts.Script): p.prompt_for_display = positive_prompt processed = process_images(p) - grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) + grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[0].height, prompt_matrix_parts, margin_size) processed.images.insert(0, grid) processed.index_of_first_image = 1 diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 9607077a..2378816f 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -109,7 +109,7 @@ class Script(scripts.Script): def title(self): return "Prompts from file or textbox" - def ui(self, is_img2img): + def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate")) checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch")) @@ -166,7 +166,7 @@ class Script(scripts.Script): proc = process_images(copy_p) images += proc.images - + if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) all_prompts += proc.all_prompts diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 0b1d3096..e614c23b 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -16,7 +16,7 @@ class Script(scripts.Script): def show(self, is_img2img): return is_img2img - def ui(self, is_img2img): + def ui(self, is_img2img): info = gr.HTML("

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

") overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap")) scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor")) -- cgit v1.2.3 From 681c16dd1e911bdf831b031b0f31aaba41c280f8 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 12 May 2023 22:33:21 +0900 Subject: fix --data-dir for COMMANDLINE_ARGS move reading of COMMANDLINE_ARGS into paths_internal.py so --data-dir can be properly read --- launch.py | 4 ---- modules/paths_internal.py | 5 +++++ 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 62b33f14..516746ac 100644 --- a/launch.py +++ b/launch.py @@ -3,16 +3,12 @@ import subprocess import os import sys import importlib.util -import shlex import platform import json from modules import cmd_args from modules.paths_internal import script_path, extensions_dir -commandline_args = os.environ.get('COMMANDLINE_ARGS', "") -sys.argv += shlex.split(commandline_args) - args, _ = cmd_args.parser.parse_known_args() python = sys.executable diff --git a/modules/paths_internal.py b/modules/paths_internal.py index a23f6d70..005a9b0a 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -2,6 +2,11 @@ import argparse import os +import sys +import shlex + +commandline_args = os.environ.get('COMMANDLINE_ARGS', "") +sys.argv += shlex.split(commandline_args) modules_path = os.path.dirname(os.path.realpath(__file__)) script_path = os.path.dirname(modules_path) -- cgit v1.2.3 From 55d222a9f4fb51eeb4c0b0fe4e703d45a39ae7a0 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 23:00:53 +0300 Subject: launch.py: make git_tag() and commit_hash() work even when WEBUI_LAUNCH_LIVE_OUTPUT --- launch.py | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 516746ac..ae67fe5c 100644 --- a/launch.py +++ b/launch.py @@ -5,6 +5,7 @@ import sys import importlib.util import platform import json +from functools import lru_cache from modules import cmd_args from modules.paths_internal import script_path, extensions_dir @@ -14,8 +15,6 @@ args, _ = cmd_args.parser.parse_known_args() python = sys.executable git = os.environ.get('GIT', "git") index_url = os.environ.get('INDEX_URL', "") -stored_commit_hash = None -stored_git_tag = None dir_repos = "repositories" # Whether to default to printing command output @@ -56,32 +55,20 @@ Use --skip-python-version-check to suppress this warning. """) +@lru_cache() def commit_hash(): - global stored_commit_hash - - if stored_commit_hash is not None: - return stored_commit_hash - try: - stored_commit_hash = run(f"{git} rev-parse HEAD").strip() + return subprocess.check_output(f"{git} rev-parse HEAD", encoding='utf8').strip() except Exception: - stored_commit_hash = "" - - return stored_commit_hash + return "" +@lru_cache() def git_tag(): - global stored_git_tag - - if stored_git_tag is not None: - return stored_git_tag - try: - stored_git_tag = run(f"{git} describe --tags").strip() + return subprocess.check_output(f"{git} describe --tags", encoding='utf8').strip() except Exception: - stored_git_tag = "" - - return stored_git_tag + return "" def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str: -- cgit v1.2.3 From 451d255b5859580c4adf99d67760330d58d76446 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 23:29:34 +0300 Subject: Get rid of check_run + run_python --- launch.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index ae67fe5c..578af229 100644 --- a/launch.py +++ b/launch.py @@ -103,11 +103,6 @@ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_ return (result.stdout or "") -def check_run(command): - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - return result.returncode == 0 - - def is_installed(package): try: spec = importlib.util.find_spec(package) @@ -121,10 +116,6 @@ def repo_dir(name): return os.path.join(script_path, dir_repos, name) -def run_python(code, desc=None, errdesc=None): - return run(f'"{python}" -c "{code}"', desc, errdesc) - - def run_pip(command, desc=None, live=default_command_live): if args.skip_install: return @@ -133,8 +124,9 @@ def run_pip(command, desc=None, live=default_command_live): return run(f'"{python}" -m pip {command} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}", live=live) -def check_run_python(code): - return check_run(f'"{python}" -c "{code}"') +def check_run_python(code: str) -> bool: + result = subprocess.run([python, "-c", code], capture_output=True, shell=True) + return result.returncode == 0 def git_clone(url, dir, name, commithash=None): @@ -261,8 +253,11 @@ def prepare_environment(): if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True) - if not args.skip_torch_cuda_test: - run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'") + if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"): + raise RuntimeError( + 'Torch is not able to use GPU; ' + 'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check' + ) if not is_installed("gfpgan"): run_pip(f"install {gfpgan_package}", "gfpgan") -- cgit v1.2.3 From f29c41bf6dd04a721e1dc0a162b042cad269f247 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Sun, 14 May 2023 22:29:28 +0800 Subject: Modify pytorch command --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index cfc0cffa..b00420d0 100644 --- a/launch.py +++ b/launch.py @@ -237,7 +237,7 @@ def run_extensions_installers(settings_file): def prepare_environment(): - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url https://download.pytorch.org/whl/cu118") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') -- cgit v1.2.3 From d9968e61082fb5ed16eedfe8fa43bf6f2e7b76e7 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Sun, 14 May 2023 20:30:02 +0300 Subject: launch.py: Don't involve shell for running Python or Git for output Fixes Linux regression in 451d255b5859580c4adf99d67760330d58d76446 --- launch.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 578af229..f8380911 100644 --- a/launch.py +++ b/launch.py @@ -58,7 +58,7 @@ Use --skip-python-version-check to suppress this warning. @lru_cache() def commit_hash(): try: - return subprocess.check_output(f"{git} rev-parse HEAD", encoding='utf8').strip() + return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() except Exception: return "" @@ -66,7 +66,7 @@ def commit_hash(): @lru_cache() def git_tag(): try: - return subprocess.check_output(f"{git} describe --tags", encoding='utf8').strip() + return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip() except Exception: return "" @@ -125,7 +125,7 @@ def run_pip(command, desc=None, live=default_command_live): def check_run_python(code: str) -> bool: - result = subprocess.run([python, "-c", code], capture_output=True, shell=True) + result = subprocess.run([python, "-c", code], capture_output=True, shell=False) return result.returncode == 0 -- cgit v1.2.3 From 9a9557ecfc824c4a005dc79466dba8f5884dbfb1 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Mon, 15 May 2023 03:00:23 +0800 Subject: Change to extra-index-url --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index b00420d0..034de0a9 100644 --- a/launch.py +++ b/launch.py @@ -237,7 +237,7 @@ def run_extensions_installers(settings_file): def prepare_environment(): - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') -- cgit v1.2.3 From 0d31f20cbd556ea4ba3d8ad9254bcce71c32088c Mon Sep 17 00:00:00 2001 From: bobzilladev Date: Tue, 16 May 2023 13:15:30 -0400 Subject: Use ngrok-py library --- launch.py | 4 ++-- modules/cmd_args.py | 3 ++- modules/ngrok.py | 37 ++++++++++++++----------------------- modules/ui.py | 2 +- 4 files changed, 19 insertions(+), 27 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index cfc0cffa..871d3ea8 100644 --- a/launch.py +++ b/launch.py @@ -294,8 +294,8 @@ def prepare_environment(): elif platform.system() == "Linux": run_pip(f"install {xformers_package}", "xformers") - if not is_installed("pyngrok") and args.ngrok: - run_pip("install pyngrok", "ngrok") + if not is_installed("ngrok") and args.ngrok: + run_pip("install ngrok", "ngrok") os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index d906a571..bf18b7b7 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -1,4 +1,5 @@ import argparse +import json import os from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file @@ -39,7 +40,7 @@ parser.add_argument("--precision", type=str, help="evaluate at this precision", parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.") parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site") parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None) -parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us") +parser.add_argument("--ngrok-options", type=json.loads, help='The options to pass to ngrok in JSON format, e.g.: \'{"authtoken_from_env":true, "basic_auth":"user:password", "oauth_provider":"google", "oauth_allow_emails":"user@asdf.com"}\'', default=dict()) parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options") parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer')) parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN')) diff --git a/modules/ngrok.py b/modules/ngrok.py index 7a7b4b26..caa352d1 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -1,6 +1,7 @@ -from pyngrok import ngrok, conf, exception +import ngrok -def connect(token, port, region): +# Connect to ngrok for ingress +def connect(token, port, options): account = None if token is None: token = 'None' @@ -10,28 +11,18 @@ def connect(token, port, region): token, username, password = token.split(':', 2) account = f"{username}:{password}" - config = conf.PyngrokConfig( - auth_token=token, region=region - ) - - # Guard for existing tunnels - existing = ngrok.get_tunnels(pyngrok_config=config) - if existing: - for established in existing: - # Extra configuration in the case that the user is also using ngrok for other tunnels - if established.config['addr'][-4:] == str(port): - public_url = existing[0].public_url - print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' - 'You can use this link after the launch is complete.') - return - + # For all options see: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py + if not options.get('authtoken_from_env'): + options['authtoken'] = token + if account: + options['basic_auth'] = account + if not options.get('session_metadata'): + options['session_metadata'] = 'stable-diffusion-webui' + try: - if account is None: - public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url - else: - public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True, auth=account).public_url - except exception.PyngrokNgrokError: - print(f'Invalid ngrok authtoken, ngrok connection aborted.\n' + public_url = ngrok.connect(f"127.0.0.1:{port}", **options).url() + except Exception as e: + print(f'Invalid ngrok authtoken? ngrok connection aborted due to: {e}\n' f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken') else: print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' diff --git a/modules/ui.py b/modules/ui.py index f07bcc41..5f5405f0 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -59,7 +59,7 @@ if cmd_opts.ngrok is not None: ngrok.connect( cmd_opts.ngrok, cmd_opts.port if cmd_opts.port is not None else 7860, - cmd_opts.ngrok_region + cmd_opts.ngrok_options ) -- cgit v1.2.3 From 96cba45d71cf25ab2f3bb7d7085347c8b6ef6d7d Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 18 May 2023 17:29:47 +0800 Subject: Modify xformers instead of pytorch --- launch.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 034de0a9..de8bde24 100644 --- a/launch.py +++ b/launch.py @@ -237,7 +237,8 @@ def run_extensions_installers(settings_file): def prepare_environment(): - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 --extra-index-url https://download.pytorch.org/whl/cu118") + torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118") + torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') @@ -292,7 +293,7 @@ def prepare_environment(): if not is_installed("xformers"): exit(0) elif platform.system() == "Linux": - run_pip(f"install {xformers_package}", "xformers") + run_pip(f"install -U -I --no-deps {xformers_package}", "xformers") if not is_installed("pyngrok") and args.ngrok: run_pip("install pyngrok", "ngrok") -- cgit v1.2.3 From 793a491923ba11adbe1024e0eb0402923165dafa Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 21:57:43 +0300 Subject: Overhaul tests to use py.test --- .github/workflows/run_tests.yaml | 49 +++++++++++++++++--- .gitignore | 3 +- launch.py | 32 +++++-------- modules/cmd_args.py | 4 +- pyproject.toml | 3 ++ requirements-test.txt | 3 ++ test/basic_features/__init__.py | 0 test/basic_features/extras_test.py | 56 ----------------------- test/basic_features/img2img_test.py | 68 ---------------------------- test/basic_features/txt2img_test.py | 82 --------------------------------- test/basic_features/utils_test.py | 64 -------------------------- test/conftest.py | 17 +++++++ test/server_poll.py | 26 ----------- test/test_extras.py | 35 +++++++++++++++ test/test_img2img.py | 68 ++++++++++++++++++++++++++++ test/test_txt2img.py | 90 +++++++++++++++++++++++++++++++++++++ test/test_utils.py | 33 ++++++++++++++ 17 files changed, 307 insertions(+), 326 deletions(-) create mode 100644 requirements-test.txt delete mode 100644 test/basic_features/__init__.py delete mode 100644 test/basic_features/extras_test.py delete mode 100644 test/basic_features/img2img_test.py delete mode 100644 test/basic_features/txt2img_test.py delete mode 100644 test/basic_features/utils_test.py create mode 100644 test/conftest.py delete mode 100644 test/server_poll.py create mode 100644 test/test_extras.py create mode 100644 test/test_img2img.py create mode 100644 test/test_txt2img.py create mode 100644 test/test_utils.py (limited to 'launch.py') diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index 0708398b..226cf759 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -18,18 +18,53 @@ jobs: cache-dependency-path: | **/requirements*txt launch.py - - name: Run tests - run: python launch.py --tests test --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test + - name: Install test dependencies + run: pip install wait-for-it -r requirements-test.txt + env: + PIP_DISABLE_PIP_VERSION_CHECK: "1" + PIP_PROGRESS_BAR: "off" + - name: Setup environment + run: python launch.py --skip-torch-cuda-test --exit env: PIP_DISABLE_PIP_VERSION_CHECK: "1" PIP_PROGRESS_BAR: "off" TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu WEBUI_LAUNCH_LIVE_OUTPUT: "1" - - name: Upload main app stdout-stderr + PYTHONUNBUFFERED: "1" + - name: Start test server + run: > + python -m coverage run + --data-file=.coverage.server + launch.py + --skip-prepare-environment + --skip-torch-cuda-test + --test-server + --no-half + --disable-opt-split-attention + --use-cpu all + --add-stop-route + 2>&1 | tee output.txt & + - name: Run tests + run: | + wait-for-it --service 127.0.0.1:7860 -t 600 + python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test + - name: Kill test server + if: always() + run: curl -vv -XPOST http://127.0.0.1:7860/_stop && sleep 10 + - name: Show coverage + run: | + python -m coverage combine .coverage* + python -m coverage report -i + python -m coverage html -i + - name: Upload main app output + uses: actions/upload-artifact@v3 + if: always() + with: + name: output + path: output.txt + - name: Upload coverage HTML uses: actions/upload-artifact@v3 if: always() with: - name: stdout-stderr - path: | - test/stdout.txt - test/stderr.txt + name: htmlcov + path: htmlcov diff --git a/.gitignore b/.gitignore index 46654d83..09734267 100644 --- a/.gitignore +++ b/.gitignore @@ -35,4 +35,5 @@ notification.mp3 /cache.json* /config_states/ /node_modules -/package-lock.json \ No newline at end of file +/package-lock.json +/.coverage* diff --git a/launch.py b/launch.py index 6e9ca8de..b9b5b709 100644 --- a/launch.py +++ b/launch.py @@ -310,12 +310,8 @@ def prepare_environment(): print("Exiting because of --exit argument") exit(0) - if args.tests and not args.no_tests: - exitcode = tests(args.tests) - exit(exitcode) - -def tests(test_dir): +def configure_for_tests(): if "--api" not in sys.argv: sys.argv.append("--api") if "--ckpt" not in sys.argv: @@ -325,21 +321,8 @@ def tests(test_dir): sys.argv.append("--skip-torch-cuda-test") if "--disable-nan-check" not in sys.argv: sys.argv.append("--disable-nan-check") - if "--no-tests" not in sys.argv: - sys.argv.append("--no-tests") - - print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}") os.environ['COMMANDLINE_ARGS'] = "" - with open(os.path.join(script_path, 'test/stdout.txt'), "w", encoding="utf8") as stdout, open(os.path.join(script_path, 'test/stderr.txt'), "w", encoding="utf8") as stderr: - proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr) - - import test.server_poll - exitcode = test.server_poll.run_tests(proc, test_dir) - - print(f"Stopping Web UI process with id {proc.pid}") - proc.kill() - return exitcode def start(): @@ -351,6 +334,15 @@ def start(): webui.webui() -if __name__ == "__main__": - prepare_environment() +def main(): + if not args.skip_prepare_environment: + prepare_environment() + + if args.test_server: + configure_for_tests() + start() + + +if __name__ == "__main__": + main() diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 7bde161e..8625690b 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -12,8 +12,8 @@ parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch. parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed") parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed") parser.add_argument("--update-check", action='store_true', help="launch.py argument: chck for updates at startup") -parser.add_argument("--tests", type=str, default=None, help="launch.py argument: run tests in the specified directory") -parser.add_argument("--no-tests", action='store_true', help="launch.py argument: do not run tests even if --tests option is specified") +parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing") +parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation") parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages") parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored") parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",) diff --git a/pyproject.toml b/pyproject.toml index d4a1bbf4..80541a8f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,3 +30,6 @@ ignore = [ [tool.ruff.flake8-bugbear] # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`. extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"] + +[tool.pytest.ini_options] +base_url = "http://127.0.0.1:7860" diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 00000000..37838ca2 --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,3 @@ +pytest-base-url~=2.0 +pytest-cov~=4.0 +pytest~=7.3 diff --git a/test/basic_features/__init__.py b/test/basic_features/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/basic_features/extras_test.py b/test/basic_features/extras_test.py deleted file mode 100644 index 8ed98747..00000000 --- a/test/basic_features/extras_test.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import unittest -import requests -from gradio.processing_utils import encode_pil_to_base64 -from PIL import Image -from modules.paths import script_path - -class TestExtrasWorking(unittest.TestCase): - def setUp(self): - self.url_extras_single = "http://localhost:7860/sdapi/v1/extra-single-image" - self.extras_single = { - "resize_mode": 0, - "show_extras_results": True, - "gfpgan_visibility": 0, - "codeformer_visibility": 0, - "codeformer_weight": 0, - "upscaling_resize": 2, - "upscaling_resize_w": 128, - "upscaling_resize_h": 128, - "upscaling_crop": True, - "upscaler_1": "None", - "upscaler_2": "None", - "extras_upscaler_2_visibility": 0, - "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png"))) - } - - def test_simple_upscaling_performed(self): - self.extras_single["upscaler_1"] = "Lanczos" - self.assertEqual(requests.post(self.url_extras_single, json=self.extras_single).status_code, 200) - - -class TestPngInfoWorking(unittest.TestCase): - def setUp(self): - self.url_png_info = "http://localhost:7860/sdapi/v1/extra-single-image" - self.png_info = { - "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png"))) - } - - def test_png_info_performed(self): - self.assertEqual(requests.post(self.url_png_info, json=self.png_info).status_code, 200) - - -class TestInterrogateWorking(unittest.TestCase): - def setUp(self): - self.url_interrogate = "http://localhost:7860/sdapi/v1/extra-single-image" - self.interrogate = { - "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png"))), - "model": "clip" - } - - def test_interrogate_performed(self): - self.assertEqual(requests.post(self.url_interrogate, json=self.interrogate).status_code, 200) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/basic_features/img2img_test.py b/test/basic_features/img2img_test.py deleted file mode 100644 index 5240ec36..00000000 --- a/test/basic_features/img2img_test.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import unittest -import requests -from gradio.processing_utils import encode_pil_to_base64 -from PIL import Image -from modules.paths import script_path - - -class TestImg2ImgWorking(unittest.TestCase): - def setUp(self): - self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" - self.simple_img2img = { - "init_images": [encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png")))], - "resize_mode": 0, - "denoising_strength": 0.75, - "mask": None, - "mask_blur": 4, - "inpainting_fill": 0, - "inpaint_full_res": False, - "inpaint_full_res_padding": 0, - "inpainting_mask_invert": False, - "prompt": "example prompt", - "styles": [], - "seed": -1, - "subseed": -1, - "subseed_strength": 0, - "seed_resize_from_h": -1, - "seed_resize_from_w": -1, - "batch_size": 1, - "n_iter": 1, - "steps": 3, - "cfg_scale": 7, - "width": 64, - "height": 64, - "restore_faces": False, - "tiling": False, - "negative_prompt": "", - "eta": 0, - "s_churn": 0, - "s_tmax": 0, - "s_tmin": 0, - "s_noise": 1, - "override_settings": {}, - "sampler_index": "Euler a", - "include_init_images": False - } - - def test_img2img_simple_performed(self): - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - def test_inpainting_masked_performed(self): - self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png"))) - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - def test_inpainting_with_inverted_masked_performed(self): - self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png"))) - self.simple_img2img["inpainting_mask_invert"] = True - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - def test_img2img_sd_upscale_performed(self): - self.simple_img2img["script_name"] = "sd upscale" - self.simple_img2img["script_args"] = ["", 8, "Lanczos", 2.0] - - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/basic_features/txt2img_test.py b/test/basic_features/txt2img_test.py deleted file mode 100644 index cb525fbb..00000000 --- a/test/basic_features/txt2img_test.py +++ /dev/null @@ -1,82 +0,0 @@ -import unittest -import requests - - -class TestTxt2ImgWorking(unittest.TestCase): - def setUp(self): - self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" - self.simple_txt2img = { - "enable_hr": False, - "denoising_strength": 0, - "firstphase_width": 0, - "firstphase_height": 0, - "prompt": "example prompt", - "styles": [], - "seed": -1, - "subseed": -1, - "subseed_strength": 0, - "seed_resize_from_h": -1, - "seed_resize_from_w": -1, - "batch_size": 1, - "n_iter": 1, - "steps": 3, - "cfg_scale": 7, - "width": 64, - "height": 64, - "restore_faces": False, - "tiling": False, - "negative_prompt": "", - "eta": 0, - "s_churn": 0, - "s_tmax": 0, - "s_tmin": 0, - "s_noise": 1, - "sampler_index": "Euler a" - } - - def test_txt2img_simple_performed(self): - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_negative_prompt_performed(self): - self.simple_txt2img["negative_prompt"] = "example negative prompt" - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_complex_prompt_performed(self): - self.simple_txt2img["prompt"] = "((emphasis)), (emphasis1:1.1), [to:1], [from::2], [from:to:0.3], [alt|alt1]" - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_not_square_image_performed(self): - self.simple_txt2img["height"] = 128 - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_hrfix_performed(self): - self.simple_txt2img["enable_hr"] = True - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_tiling_performed(self): - self.simple_txt2img["tiling"] = True - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_restore_faces_performed(self): - self.simple_txt2img["restore_faces"] = True - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_vanilla_sampler_performed(self): - self.simple_txt2img["sampler_index"] = "PLMS" - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - self.simple_txt2img["sampler_index"] = "DDIM" - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - self.simple_txt2img["sampler_index"] = "UniPC" - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_multiple_batches_performed(self): - self.simple_txt2img["n_iter"] = 2 - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_batch_performed(self): - self.simple_txt2img["batch_size"] = 2 - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/basic_features/utils_test.py b/test/basic_features/utils_test.py deleted file mode 100644 index d9e46b5e..00000000 --- a/test/basic_features/utils_test.py +++ /dev/null @@ -1,64 +0,0 @@ -import unittest -import requests - - -class UtilsTests(unittest.TestCase): - def setUp(self): - self.url_options = "http://localhost:7860/sdapi/v1/options" - self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags" - self.url_samplers = "http://localhost:7860/sdapi/v1/samplers" - self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers" - self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models" - self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks" - self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers" - self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models" - self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles" - self.url_embeddings = "http://localhost:7860/sdapi/v1/embeddings" - - def test_options_get(self): - self.assertEqual(requests.get(self.url_options).status_code, 200) - - def test_options_write(self): - response = requests.get(self.url_options) - self.assertEqual(response.status_code, 200) - - pre_value = response.json()["send_seed"] - - self.assertEqual(requests.post(self.url_options, json={"send_seed": not pre_value}).status_code, 200) - - response = requests.get(self.url_options) - self.assertEqual(response.status_code, 200) - self.assertEqual(response.json()["send_seed"], not pre_value) - - requests.post(self.url_options, json={"send_seed": pre_value}) - - def test_cmd_flags(self): - self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200) - - def test_samplers(self): - self.assertEqual(requests.get(self.url_samplers).status_code, 200) - - def test_upscalers(self): - self.assertEqual(requests.get(self.url_upscalers).status_code, 200) - - def test_sd_models(self): - self.assertEqual(requests.get(self.url_sd_models).status_code, 200) - - def test_hypernetworks(self): - self.assertEqual(requests.get(self.url_hypernetworks).status_code, 200) - - def test_face_restorers(self): - self.assertEqual(requests.get(self.url_face_restorers).status_code, 200) - - def test_realesrgan_models(self): - self.assertEqual(requests.get(self.url_realesrgan_models).status_code, 200) - - def test_prompt_styles(self): - self.assertEqual(requests.get(self.url_prompt_styles).status_code, 200) - - def test_embeddings(self): - self.assertEqual(requests.get(self.url_embeddings).status_code, 200) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 00000000..0723f62a --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,17 @@ +import os + +import pytest +from PIL import Image +from gradio.processing_utils import encode_pil_to_base64 + +test_files_path = os.path.dirname(__file__) + "/test_files" + + +@pytest.fixture(scope="session") # session so we don't read this over and over +def img2img_basic_image_base64() -> str: + return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "img2img_basic.png"))) + + +@pytest.fixture(scope="session") # session so we don't read this over and over +def mask_basic_image_base64() -> str: + return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "mask_basic.png"))) diff --git a/test/server_poll.py b/test/server_poll.py deleted file mode 100644 index c732630f..00000000 --- a/test/server_poll.py +++ /dev/null @@ -1,26 +0,0 @@ -import unittest -import requests -import time -import os -from modules.paths import script_path - - -def run_tests(proc, test_dir): - timeout_threshold = 240 - start_time = time.time() - while time.time()-start_time < timeout_threshold: - try: - requests.head("http://localhost:7860/") - break - except requests.exceptions.ConnectionError: - if proc.poll() is not None: - break - if proc.poll() is None: - if test_dir is None: - test_dir = os.path.join(script_path, "test") - suite = unittest.TestLoader().discover(test_dir, pattern="*_test.py", top_level_dir=test_dir) - result = unittest.TextTestRunner(verbosity=2).run(suite) - return len(result.failures) + len(result.errors) - else: - print("Launch unsuccessful") - return 1 diff --git a/test/test_extras.py b/test/test_extras.py new file mode 100644 index 00000000..799d9fad --- /dev/null +++ b/test/test_extras.py @@ -0,0 +1,35 @@ +import requests + + +def test_simple_upscaling_performed(base_url, img2img_basic_image_base64): + payload = { + "resize_mode": 0, + "show_extras_results": True, + "gfpgan_visibility": 0, + "codeformer_visibility": 0, + "codeformer_weight": 0, + "upscaling_resize": 2, + "upscaling_resize_w": 128, + "upscaling_resize_h": 128, + "upscaling_crop": True, + "upscaler_1": "Lanczos", + "upscaler_2": "None", + "extras_upscaler_2_visibility": 0, + "image": img2img_basic_image_base64, + } + assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 + + +def test_png_info_performed(base_url, img2img_basic_image_base64): + payload = { + "image": img2img_basic_image_base64, + } + assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 + + +def test_interrogate_performed(base_url, img2img_basic_image_base64): + payload = { + "image": img2img_basic_image_base64, + "model": "clip", + } + assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200 diff --git a/test/test_img2img.py b/test/test_img2img.py new file mode 100644 index 00000000..117d2d1e --- /dev/null +++ b/test/test_img2img.py @@ -0,0 +1,68 @@ + +import pytest +import requests + + +@pytest.fixture() +def url_img2img(base_url): + return f"{base_url}/sdapi/v1/img2img" + + +@pytest.fixture() +def simple_img2img_request(img2img_basic_image_base64): + return { + "batch_size": 1, + "cfg_scale": 7, + "denoising_strength": 0.75, + "eta": 0, + "height": 64, + "include_init_images": False, + "init_images": [img2img_basic_image_base64], + "inpaint_full_res": False, + "inpaint_full_res_padding": 0, + "inpainting_fill": 0, + "inpainting_mask_invert": False, + "mask": None, + "mask_blur": 4, + "n_iter": 1, + "negative_prompt": "", + "override_settings": {}, + "prompt": "example prompt", + "resize_mode": 0, + "restore_faces": False, + "s_churn": 0, + "s_noise": 1, + "s_tmax": 0, + "s_tmin": 0, + "sampler_index": "Euler a", + "seed": -1, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "steps": 3, + "styles": [], + "subseed": -1, + "subseed_strength": 0, + "tiling": False, + "width": 64, + } + + +def test_img2img_simple_performed(url_img2img, simple_img2img_request): + assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 + + +def test_inpainting_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64): + simple_img2img_request["mask"] = mask_basic_image_base64 + assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 + + +def test_inpainting_with_inverted_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64): + simple_img2img_request["mask"] = mask_basic_image_base64 + simple_img2img_request["inpainting_mask_invert"] = True + assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 + + +def test_img2img_sd_upscale_performed(url_img2img, simple_img2img_request): + simple_img2img_request["script_name"] = "sd upscale" + simple_img2img_request["script_args"] = ["", 8, "Lanczos", 2.0] + assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200 diff --git a/test/test_txt2img.py b/test/test_txt2img.py new file mode 100644 index 00000000..6eb94f0a --- /dev/null +++ b/test/test_txt2img.py @@ -0,0 +1,90 @@ + +import pytest +import requests + + +@pytest.fixture() +def url_txt2img(base_url): + return f"{base_url}/sdapi/v1/txt2img" + + +@pytest.fixture() +def simple_txt2img_request(): + return { + "batch_size": 1, + "cfg_scale": 7, + "denoising_strength": 0, + "enable_hr": False, + "eta": 0, + "firstphase_height": 0, + "firstphase_width": 0, + "height": 64, + "n_iter": 1, + "negative_prompt": "", + "prompt": "example prompt", + "restore_faces": False, + "s_churn": 0, + "s_noise": 1, + "s_tmax": 0, + "s_tmin": 0, + "sampler_index": "Euler a", + "seed": -1, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "steps": 3, + "styles": [], + "subseed": -1, + "subseed_strength": 0, + "tiling": False, + "width": 64, + } + + +def test_txt2img_simple_performed(url_txt2img, simple_txt2img_request): + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +def test_txt2img_with_negative_prompt_performed(url_txt2img, simple_txt2img_request): + simple_txt2img_request["negative_prompt"] = "example negative prompt" + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +def test_txt2img_with_complex_prompt_performed(url_txt2img, simple_txt2img_request): + simple_txt2img_request["prompt"] = "((emphasis)), (emphasis1:1.1), [to:1], [from::2], [from:to:0.3], [alt|alt1]" + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +def test_txt2img_not_square_image_performed(url_txt2img, simple_txt2img_request): + simple_txt2img_request["height"] = 128 + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +def test_txt2img_with_hrfix_performed(url_txt2img, simple_txt2img_request): + simple_txt2img_request["enable_hr"] = True + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +def test_txt2img_with_tiling_performed(url_txt2img, simple_txt2img_request): + simple_txt2img_request["tiling"] = True + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +def test_txt2img_with_restore_faces_performed(url_txt2img, simple_txt2img_request): + simple_txt2img_request["restore_faces"] = True + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +@pytest.mark.parametrize("sampler", ["PLMS", "DDIM", "UniPC"]) +def test_txt2img_with_vanilla_sampler_performed(url_txt2img, simple_txt2img_request, sampler): + simple_txt2img_request["sampler_index"] = sampler + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +def test_txt2img_multiple_batches_performed(url_txt2img, simple_txt2img_request): + simple_txt2img_request["n_iter"] = 2 + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 + + +def test_txt2img_batch_performed(url_txt2img, simple_txt2img_request): + simple_txt2img_request["batch_size"] = 2 + assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200 diff --git a/test/test_utils.py b/test/test_utils.py new file mode 100644 index 00000000..edba0b18 --- /dev/null +++ b/test/test_utils.py @@ -0,0 +1,33 @@ +import pytest +import requests + + +def test_options_write(base_url): + url_options = f"{base_url}/sdapi/v1/options" + response = requests.get(url_options) + assert response.status_code == 200 + + pre_value = response.json()["send_seed"] + + assert requests.post(url_options, json={'send_seed': (not pre_value)}).status_code == 200 + + response = requests.get(url_options) + assert response.status_code == 200 + assert response.json()['send_seed'] == (not pre_value) + + requests.post(url_options, json={"send_seed": pre_value}) + + +@pytest.mark.parametrize("url", [ + "sdapi/v1/cmd-flags", + "sdapi/v1/samplers", + "sdapi/v1/upscalers", + "sdapi/v1/sd-models", + "sdapi/v1/hypernetworks", + "sdapi/v1/face-restorers", + "sdapi/v1/realesrgan-models", + "sdapi/v1/prompt-styles", + "sdapi/v1/embeddings", +]) +def test_get_api_url(base_url, url): + assert requests.get(f"{base_url}/{url}").status_code == 200 -- cgit v1.2.3 From 31545abe145ac8833c9c15aa41300fb609dcb128 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 21 May 2023 07:31:39 +0300 Subject: add DPM-Solver++(2M) SDE from new k-diffusion --- launch.py | 2 +- modules/sd_samplers_kdiffusion.py | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index b9b5b709..1d504c38 100644 --- a/launch.py +++ b/launch.py @@ -236,7 +236,7 @@ def prepare_environment(): stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf") taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") - k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec") + k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "c9fe758757e022f05ca5a53fa8fac28889e4f1cf") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 552c6c64..1838cf08 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -19,7 +19,8 @@ samplers_k_diffusion = [ ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), @@ -27,7 +28,8 @@ samplers_k_diffusion = [ ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), ] samplers_data_k_diffusion = [ @@ -228,7 +230,7 @@ class KDiffusionSampler: self.sampler_noises = None self.stop_at = None self.eta = None - self.config = None + self.config = None # set by the function calling the constructor self.last_latent = None self.s_min_uncond = None @@ -337,13 +339,13 @@ class KDiffusionSampler: if 'sigmas' in parameters: extra_params_kwargs['sigmas'] = sigma_sched - if self.funcname == 'sample_dpmpp_sde': + if self.config.options.get('brownian_noise', False): noise_sampler = self.create_noise_sampler(x, sigmas, p) extra_params_kwargs['noise_sampler'] = noise_sampler self.model_wrap_cfg.init_latent = x self.last_latent = x - extra_args={ + extra_args = { 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, @@ -373,7 +375,7 @@ class KDiffusionSampler: else: extra_params_kwargs['sigmas'] = sigmas - if self.funcname == 'sample_dpmpp_sde': + if self.config.options.get('brownian_noise', False): noise_sampler = self.create_noise_sampler(x, sigmas, p) extra_params_kwargs['noise_sampler'] = noise_sampler -- cgit v1.2.3 From 4b07984d1b92949311a703cfbb7dad5e96a7fb01 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 21 May 2023 16:15:16 +0300 Subject: reworking launch.py: rename --- launch.py | 348 ------------------------------------------------ modules/launch_utils.py | 348 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 348 insertions(+), 348 deletions(-) delete mode 100644 launch.py create mode 100644 modules/launch_utils.py (limited to 'launch.py') diff --git a/launch.py b/launch.py deleted file mode 100644 index 1d504c38..00000000 --- a/launch.py +++ /dev/null @@ -1,348 +0,0 @@ -# this scripts installs necessary requirements and launches main program in webui.py -import subprocess -import os -import sys -import importlib.util -import platform -import json -from functools import lru_cache - -from modules import cmd_args -from modules.paths_internal import script_path, extensions_dir - -args, _ = cmd_args.parser.parse_known_args() - -python = sys.executable -git = os.environ.get('GIT', "git") -index_url = os.environ.get('INDEX_URL', "") -dir_repos = "repositories" - -# Whether to default to printing command output -default_command_live = (os.environ.get('WEBUI_LAUNCH_LIVE_OUTPUT') == "1") - -if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: - os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False' - - -def check_python_version(): - is_windows = platform.system() == "Windows" - major = sys.version_info.major - minor = sys.version_info.minor - micro = sys.version_info.micro - - if is_windows: - supported_minors = [10] - else: - supported_minors = [7, 8, 9, 10, 11] - - if not (major == 3 and minor in supported_minors): - import modules.errors - - modules.errors.print_error_explanation(f""" -INCOMPATIBLE PYTHON VERSION - -This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}. -If you encounter an error with "RuntimeError: Couldn't install torch." message, -or any other error regarding unsuccessful package (library) installation, -please downgrade (or upgrade) to the latest version of 3.10 Python -and delete current Python and "venv" folder in WebUI's directory. - -You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/ - -{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""} - -Use --skip-python-version-check to suppress this warning. -""") - - -@lru_cache() -def commit_hash(): - try: - return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() - except Exception: - return "" - - -@lru_cache() -def git_tag(): - try: - return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip() - except Exception: - return "" - - -def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str: - if desc is not None: - print(desc) - - run_kwargs = { - "args": command, - "shell": True, - "env": os.environ if custom_env is None else custom_env, - "encoding": 'utf8', - "errors": 'ignore', - } - - if not live: - run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE - - result = subprocess.run(**run_kwargs) - - if result.returncode != 0: - error_bits = [ - f"{errdesc or 'Error running command'}.", - f"Command: {command}", - f"Error code: {result.returncode}", - ] - if result.stdout: - error_bits.append(f"stdout: {result.stdout}") - if result.stderr: - error_bits.append(f"stderr: {result.stderr}") - raise RuntimeError("\n".join(error_bits)) - - return (result.stdout or "") - - -def is_installed(package): - try: - spec = importlib.util.find_spec(package) - except ModuleNotFoundError: - return False - - return spec is not None - - -def repo_dir(name): - return os.path.join(script_path, dir_repos, name) - - -def run_pip(command, desc=None, live=default_command_live): - if args.skip_install: - return - - index_url_line = f' --index-url {index_url}' if index_url != '' else '' - return run(f'"{python}" -m pip {command} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}", live=live) - - -def check_run_python(code: str) -> bool: - result = subprocess.run([python, "-c", code], capture_output=True, shell=False) - return result.returncode == 0 - - -def git_clone(url, dir, name, commithash=None): - # TODO clone into temporary dir and move if successful - - if os.path.exists(dir): - if commithash is None: - return - - current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() - if current_hash == commithash: - return - - run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") - return - - run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") - - if commithash is not None: - run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") - - -def git_pull_recursive(dir): - for subdir, _, _ in os.walk(dir): - if os.path.exists(os.path.join(subdir, '.git')): - try: - output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash']) - print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n") - except subprocess.CalledProcessError as e: - print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n") - - -def version_check(commit): - try: - import requests - commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json() - if commit != "" and commits['commit']['sha'] != commit: - print("--------------------------------------------------------") - print("| You are not up to date with the most recent release. |") - print("| Consider running `git pull` to update. |") - print("--------------------------------------------------------") - elif commits['commit']['sha'] == commit: - print("You are up to date with the most recent release.") - else: - print("Not a git clone, can't perform version check.") - except Exception as e: - print("version check failed", e) - - -def run_extension_installer(extension_dir): - path_installer = os.path.join(extension_dir, "install.py") - if not os.path.isfile(path_installer): - return - - try: - env = os.environ.copy() - env['PYTHONPATH'] = os.path.abspath(".") - - print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) - except Exception as e: - print(e, file=sys.stderr) - - -def list_extensions(settings_file): - settings = {} - - try: - if os.path.isfile(settings_file): - with open(settings_file, "r", encoding="utf8") as file: - settings = json.load(file) - except Exception as e: - print(e, file=sys.stderr) - - disabled_extensions = set(settings.get('disabled_extensions', [])) - disable_all_extensions = settings.get('disable_all_extensions', 'none') - - if disable_all_extensions != 'none': - return [] - - return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions] - - -def run_extensions_installers(settings_file): - if not os.path.isdir(extensions_dir): - return - - for dirname_extension in list_extensions(settings_file): - run_extension_installer(os.path.join(extensions_dir, dirname_extension)) - - -def prepare_environment(): - torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118") - torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}") - requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') - gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip") - clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip") - openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip") - - stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git") - taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git") - k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git') - codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git') - blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git') - - stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf") - taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") - k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "c9fe758757e022f05ca5a53fa8fac28889e4f1cf") - codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") - blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") - - if not args.skip_python_version_check: - check_python_version() - - commit = commit_hash() - tag = git_tag() - - print(f"Python {sys.version}") - print(f"Version: {tag}") - print(f"Commit hash: {commit}") - - if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): - run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True) - - if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"): - raise RuntimeError( - 'Torch is not able to use GPU; ' - 'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check' - ) - - if not is_installed("gfpgan"): - run_pip(f"install {gfpgan_package}", "gfpgan") - - if not is_installed("clip"): - run_pip(f"install {clip_package}", "clip") - - if not is_installed("open_clip"): - run_pip(f"install {openclip_package}", "open_clip") - - if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers: - if platform.system() == "Windows": - if platform.python_version().startswith("3.10"): - run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True) - else: - print("Installation of xformers is not supported in this version of Python.") - print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness") - if not is_installed("xformers"): - exit(0) - elif platform.system() == "Linux": - run_pip(f"install -U -I --no-deps {xformers_package}", "xformers") - - if not is_installed("ngrok") and args.ngrok: - run_pip("install ngrok", "ngrok") - - os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True) - - git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) - git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) - git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) - git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) - git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash) - - if not is_installed("lpips"): - run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer") - - if not os.path.isfile(requirements_file): - requirements_file = os.path.join(script_path, requirements_file) - run_pip(f"install -r \"{requirements_file}\"", "requirements") - - run_extensions_installers(settings_file=args.ui_settings_file) - - if args.update_check: - version_check(commit) - - if args.update_all_extensions: - git_pull_recursive(extensions_dir) - - if "--exit" in sys.argv: - print("Exiting because of --exit argument") - exit(0) - - -def configure_for_tests(): - if "--api" not in sys.argv: - sys.argv.append("--api") - if "--ckpt" not in sys.argv: - sys.argv.append("--ckpt") - sys.argv.append(os.path.join(script_path, "test/test_files/empty.pt")) - if "--skip-torch-cuda-test" not in sys.argv: - sys.argv.append("--skip-torch-cuda-test") - if "--disable-nan-check" not in sys.argv: - sys.argv.append("--disable-nan-check") - - os.environ['COMMANDLINE_ARGS'] = "" - - -def start(): - print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}") - import webui - if '--nowebui' in sys.argv: - webui.api_only() - else: - webui.webui() - - -def main(): - if not args.skip_prepare_environment: - prepare_environment() - - if args.test_server: - configure_for_tests() - - start() - - -if __name__ == "__main__": - main() diff --git a/modules/launch_utils.py b/modules/launch_utils.py new file mode 100644 index 00000000..1d504c38 --- /dev/null +++ b/modules/launch_utils.py @@ -0,0 +1,348 @@ +# this scripts installs necessary requirements and launches main program in webui.py +import subprocess +import os +import sys +import importlib.util +import platform +import json +from functools import lru_cache + +from modules import cmd_args +from modules.paths_internal import script_path, extensions_dir + +args, _ = cmd_args.parser.parse_known_args() + +python = sys.executable +git = os.environ.get('GIT', "git") +index_url = os.environ.get('INDEX_URL', "") +dir_repos = "repositories" + +# Whether to default to printing command output +default_command_live = (os.environ.get('WEBUI_LAUNCH_LIVE_OUTPUT') == "1") + +if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: + os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False' + + +def check_python_version(): + is_windows = platform.system() == "Windows" + major = sys.version_info.major + minor = sys.version_info.minor + micro = sys.version_info.micro + + if is_windows: + supported_minors = [10] + else: + supported_minors = [7, 8, 9, 10, 11] + + if not (major == 3 and minor in supported_minors): + import modules.errors + + modules.errors.print_error_explanation(f""" +INCOMPATIBLE PYTHON VERSION + +This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}. +If you encounter an error with "RuntimeError: Couldn't install torch." message, +or any other error regarding unsuccessful package (library) installation, +please downgrade (or upgrade) to the latest version of 3.10 Python +and delete current Python and "venv" folder in WebUI's directory. + +You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/ + +{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""} + +Use --skip-python-version-check to suppress this warning. +""") + + +@lru_cache() +def commit_hash(): + try: + return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() + except Exception: + return "" + + +@lru_cache() +def git_tag(): + try: + return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip() + except Exception: + return "" + + +def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str: + if desc is not None: + print(desc) + + run_kwargs = { + "args": command, + "shell": True, + "env": os.environ if custom_env is None else custom_env, + "encoding": 'utf8', + "errors": 'ignore', + } + + if not live: + run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE + + result = subprocess.run(**run_kwargs) + + if result.returncode != 0: + error_bits = [ + f"{errdesc or 'Error running command'}.", + f"Command: {command}", + f"Error code: {result.returncode}", + ] + if result.stdout: + error_bits.append(f"stdout: {result.stdout}") + if result.stderr: + error_bits.append(f"stderr: {result.stderr}") + raise RuntimeError("\n".join(error_bits)) + + return (result.stdout or "") + + +def is_installed(package): + try: + spec = importlib.util.find_spec(package) + except ModuleNotFoundError: + return False + + return spec is not None + + +def repo_dir(name): + return os.path.join(script_path, dir_repos, name) + + +def run_pip(command, desc=None, live=default_command_live): + if args.skip_install: + return + + index_url_line = f' --index-url {index_url}' if index_url != '' else '' + return run(f'"{python}" -m pip {command} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}", live=live) + + +def check_run_python(code: str) -> bool: + result = subprocess.run([python, "-c", code], capture_output=True, shell=False) + return result.returncode == 0 + + +def git_clone(url, dir, name, commithash=None): + # TODO clone into temporary dir and move if successful + + if os.path.exists(dir): + if commithash is None: + return + + current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() + if current_hash == commithash: + return + + run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") + run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") + return + + run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") + + if commithash is not None: + run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") + + +def git_pull_recursive(dir): + for subdir, _, _ in os.walk(dir): + if os.path.exists(os.path.join(subdir, '.git')): + try: + output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash']) + print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n") + except subprocess.CalledProcessError as e: + print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n") + + +def version_check(commit): + try: + import requests + commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json() + if commit != "" and commits['commit']['sha'] != commit: + print("--------------------------------------------------------") + print("| You are not up to date with the most recent release. |") + print("| Consider running `git pull` to update. |") + print("--------------------------------------------------------") + elif commits['commit']['sha'] == commit: + print("You are up to date with the most recent release.") + else: + print("Not a git clone, can't perform version check.") + except Exception as e: + print("version check failed", e) + + +def run_extension_installer(extension_dir): + path_installer = os.path.join(extension_dir, "install.py") + if not os.path.isfile(path_installer): + return + + try: + env = os.environ.copy() + env['PYTHONPATH'] = os.path.abspath(".") + + print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) + except Exception as e: + print(e, file=sys.stderr) + + +def list_extensions(settings_file): + settings = {} + + try: + if os.path.isfile(settings_file): + with open(settings_file, "r", encoding="utf8") as file: + settings = json.load(file) + except Exception as e: + print(e, file=sys.stderr) + + disabled_extensions = set(settings.get('disabled_extensions', [])) + disable_all_extensions = settings.get('disable_all_extensions', 'none') + + if disable_all_extensions != 'none': + return [] + + return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions] + + +def run_extensions_installers(settings_file): + if not os.path.isdir(extensions_dir): + return + + for dirname_extension in list_extensions(settings_file): + run_extension_installer(os.path.join(extensions_dir, dirname_extension)) + + +def prepare_environment(): + torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118") + torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}") + requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") + + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') + gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip") + clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip") + openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip") + + stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git") + taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git") + k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git') + codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git') + blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git') + + stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf") + taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") + k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "c9fe758757e022f05ca5a53fa8fac28889e4f1cf") + codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") + blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") + + if not args.skip_python_version_check: + check_python_version() + + commit = commit_hash() + tag = git_tag() + + print(f"Python {sys.version}") + print(f"Version: {tag}") + print(f"Commit hash: {commit}") + + if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): + run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True) + + if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"): + raise RuntimeError( + 'Torch is not able to use GPU; ' + 'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check' + ) + + if not is_installed("gfpgan"): + run_pip(f"install {gfpgan_package}", "gfpgan") + + if not is_installed("clip"): + run_pip(f"install {clip_package}", "clip") + + if not is_installed("open_clip"): + run_pip(f"install {openclip_package}", "open_clip") + + if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers: + if platform.system() == "Windows": + if platform.python_version().startswith("3.10"): + run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True) + else: + print("Installation of xformers is not supported in this version of Python.") + print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness") + if not is_installed("xformers"): + exit(0) + elif platform.system() == "Linux": + run_pip(f"install -U -I --no-deps {xformers_package}", "xformers") + + if not is_installed("ngrok") and args.ngrok: + run_pip("install ngrok", "ngrok") + + os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True) + + git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) + git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) + git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) + git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) + git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash) + + if not is_installed("lpips"): + run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer") + + if not os.path.isfile(requirements_file): + requirements_file = os.path.join(script_path, requirements_file) + run_pip(f"install -r \"{requirements_file}\"", "requirements") + + run_extensions_installers(settings_file=args.ui_settings_file) + + if args.update_check: + version_check(commit) + + if args.update_all_extensions: + git_pull_recursive(extensions_dir) + + if "--exit" in sys.argv: + print("Exiting because of --exit argument") + exit(0) + + +def configure_for_tests(): + if "--api" not in sys.argv: + sys.argv.append("--api") + if "--ckpt" not in sys.argv: + sys.argv.append("--ckpt") + sys.argv.append(os.path.join(script_path, "test/test_files/empty.pt")) + if "--skip-torch-cuda-test" not in sys.argv: + sys.argv.append("--skip-torch-cuda-test") + if "--disable-nan-check" not in sys.argv: + sys.argv.append("--disable-nan-check") + + os.environ['COMMANDLINE_ARGS'] = "" + + +def start(): + print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}") + import webui + if '--nowebui' in sys.argv: + webui.api_only() + else: + webui.webui() + + +def main(): + if not args.skip_prepare_environment: + prepare_environment() + + if args.test_server: + configure_for_tests() + + start() + + +if __name__ == "__main__": + main() -- cgit v1.2.3 From f9fe5e5f9d7b7465da75bf8ba470785d2240caf7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 21 May 2023 16:27:23 +0300 Subject: reworking launch.py: add references to renamed file --- launch.py | 38 ++++++++++++++++++++++++++++++++++++++ modules/launch_utils.py | 14 -------------- 2 files changed, 38 insertions(+), 14 deletions(-) create mode 100644 launch.py (limited to 'launch.py') diff --git a/launch.py b/launch.py new file mode 100644 index 00000000..b103c8f3 --- /dev/null +++ b/launch.py @@ -0,0 +1,38 @@ +from modules import launch_utils + + +args = launch_utils.args +python = launch_utils.python +git = launch_utils.git +index_url = launch_utils.index_url +dir_repos = launch_utils.dir_repos + +commit_hash = launch_utils.commit_hash +git_tag = launch_utils.git_tag + +run = launch_utils.run +is_installed = launch_utils.is_installed +repo_dir = launch_utils.repo_dir + +run_pip = launch_utils.run_pip +check_run_python = launch_utils.check_run_python +git_clone = launch_utils.git_clone +git_pull_recursive = launch_utils.git_pull_recursive +run_extension_installer = launch_utils.run_extension_installer +prepare_environment = launch_utils.prepare_environment +configure_for_tests = launch_utils.configure_for_tests +start = launch_utils.start + + +def main(): + if not args.skip_prepare_environment: + prepare_environment() + + if args.test_server: + configure_for_tests() + + start() + + +if __name__ == "__main__": + main() diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 1d504c38..35a52310 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -332,17 +332,3 @@ def start(): webui.api_only() else: webui.webui() - - -def main(): - if not args.skip_prepare_environment: - prepare_environment() - - if args.test_server: - configure_for_tests() - - start() - - -if __name__ == "__main__": - main() -- cgit v1.2.3