diff options
author | Aarni Koskela <akx@iki.fi> | 2023-05-11 15:28:15 +0000 |
---|---|---|
committer | Aarni Koskela <akx@iki.fi> | 2023-05-11 17:29:11 +0000 |
commit | 49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c (patch) | |
tree | d79f004eae46bc1c49832f3c668a524107c30034 /modules/codeformer/codeformer_arch.py | |
parent | 431bc5a297ff7c17231b92b6c8f8152b2fab8553 (diff) | |
download | stable-diffusion-webui-gfx803-49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c.tar.gz stable-diffusion-webui-gfx803-49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c.tar.bz2 stable-diffusion-webui-gfx803-49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c.zip |
Autofix Ruff W (not W605) (mostly whitespace)
Diffstat (limited to 'modules/codeformer/codeformer_arch.py')
-rw-r--r-- | modules/codeformer/codeformer_arch.py | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index 45c70f84..12db6814 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -119,7 +119,7 @@ class TransformerSALayer(nn.Module): tgt_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, query_pos: Optional[Tensor] = None): - + # self attention tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) @@ -159,7 +159,7 @@ class Fuse_sft_block(nn.Module): @ARCH_REGISTRY.register() class CodeFormer(VQAutoEncoder): - def __init__(self, dim_embd=512, n_head=8, n_layers=9, + def __init__(self, dim_embd=512, n_head=8, n_layers=9, codebook_size=1024, latent_size=256, connect_list=('32', '64', '128', '256'), fix_modules=('quantize', 'generator')): @@ -179,14 +179,14 @@ class CodeFormer(VQAutoEncoder): self.feat_emb = nn.Linear(256, self.dim_embd) # transformer - self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) + self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) for _ in range(self.n_layers)]) # logits_predict head self.idx_pred_layer = nn.Sequential( nn.LayerNorm(dim_embd), nn.Linear(dim_embd, codebook_size, bias=False)) - + self.channels = { '16': 512, '32': 256, @@ -221,7 +221,7 @@ class CodeFormer(VQAutoEncoder): enc_feat_dict = {} out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list] for i, block in enumerate(self.encoder.blocks): - x = block(x) + x = block(x) if i in out_list: enc_feat_dict[str(x.shape[-1])] = x.clone() @@ -266,11 +266,11 @@ class CodeFormer(VQAutoEncoder): fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list] for i, block in enumerate(self.generator.blocks): - x = block(x) + x = block(x) if i in fuse_list: # fuse after i-th block f_size = str(x.shape[-1]) if w>0: x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w) out = x # logits doesn't need softmax before cross_entropy loss - return out, logits, lq_feat
\ No newline at end of file + return out, logits, lq_feat |