aboutsummaryrefslogtreecommitdiffstats
path: root/v2-inference-v.yaml
diff options
context:
space:
mode:
authorInvincibleDude <81354513+InvincibleDude@users.noreply.github.com>2023-01-29 11:36:10 +0000
committerGitHub <noreply@github.com>2023-01-29 11:36:10 +0000
commitee3d63b6beb88e63542976a1095d4c8aa97388bd (patch)
treecf891130682c343107ae0a0f8cec309aea16807a /v2-inference-v.yaml
parent44c0e6b993d00bb2f441f0fde409bcb79136f034 (diff)
parent00dab8f10defbbda579a1bc89c8d4e972c58a20d (diff)
downloadstable-diffusion-webui-gfx803-ee3d63b6beb88e63542976a1095d4c8aa97388bd.tar.gz
stable-diffusion-webui-gfx803-ee3d63b6beb88e63542976a1095d4c8aa97388bd.tar.bz2
stable-diffusion-webui-gfx803-ee3d63b6beb88e63542976a1095d4c8aa97388bd.zip
Merge branch 'master' into master
Diffstat (limited to 'v2-inference-v.yaml')
-rw-r--r--v2-inference-v.yaml68
1 files changed, 0 insertions, 68 deletions
diff --git a/v2-inference-v.yaml b/v2-inference-v.yaml
deleted file mode 100644
index 513cd635..00000000
--- a/v2-inference-v.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-model:
- base_learning_rate: 1.0e-4
- target: ldm.models.diffusion.ddpm.LatentDiffusion
- params:
- parameterization: "v"
- linear_start: 0.00085
- linear_end: 0.0120
- num_timesteps_cond: 1
- log_every_t: 200
- timesteps: 1000
- first_stage_key: "jpg"
- cond_stage_key: "txt"
- image_size: 64
- channels: 4
- cond_stage_trainable: false
- conditioning_key: crossattn
- monitor: val/loss_simple_ema
- scale_factor: 0.18215
- use_ema: False # we set this to false because this is an inference only config
-
- unet_config:
- target: ldm.modules.diffusionmodules.openaimodel.UNetModel
- params:
- use_checkpoint: True
- use_fp16: True
- image_size: 32 # unused
- in_channels: 4
- out_channels: 4
- model_channels: 320
- attention_resolutions: [ 4, 2, 1 ]
- num_res_blocks: 2
- channel_mult: [ 1, 2, 4, 4 ]
- num_head_channels: 64 # need to fix for flash-attn
- use_spatial_transformer: True
- use_linear_in_transformer: True
- transformer_depth: 1
- context_dim: 1024
- legacy: False
-
- first_stage_config:
- target: ldm.models.autoencoder.AutoencoderKL
- params:
- embed_dim: 4
- monitor: val/rec_loss
- ddconfig:
- #attn_type: "vanilla-xformers"
- double_z: true
- z_channels: 4
- resolution: 256
- in_channels: 3
- out_ch: 3
- ch: 128
- ch_mult:
- - 1
- - 2
- - 4
- - 4
- num_res_blocks: 2
- attn_resolutions: []
- dropout: 0.0
- lossconfig:
- target: torch.nn.Identity
-
- cond_stage_config:
- target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
- params:
- freeze: True
- layer: "penultimate" \ No newline at end of file