diff options
-rw-r--r-- | localizations/zh_CN.json | 42 | ||||
-rw-r--r-- | localizations/zh_TW.json | 2 | ||||
-rw-r--r-- | modules/api/models.py | 1 | ||||
-rw-r--r-- | modules/scripts.py | 34 | ||||
-rw-r--r-- | modules/sd_models.py | 5 | ||||
-rw-r--r-- | modules/ui.py | 4 | ||||
-rw-r--r-- | scripts/custom_code.py | 2 | ||||
-rw-r--r-- | scripts/outpainting_mk_2.py | 2 | ||||
-rw-r--r-- | scripts/poor_mans_outpainting.py | 4 | ||||
-rw-r--r-- | scripts/prompts_from_file.py | 10 | ||||
-rw-r--r-- | scripts/sd_upscale.py | 4 | ||||
-rw-r--r-- | scripts/xy_grid.py | 8 |
12 files changed, 68 insertions, 50 deletions
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json index 8a00c11c..56c8980e 100644 --- a/localizations/zh_CN.json +++ b/localizations/zh_CN.json @@ -7,7 +7,7 @@ "Loading...": "载入中...", "view": "查看", "api": "api", - "•": "•", + "•": " • ", "built with gradio": "基于 Gradio 构建", "Stable Diffusion checkpoint": "Stable Diffusion 模型(ckpt)", "txt2img": "文生图", @@ -69,15 +69,15 @@ "Variation strength": "差异强度", "Resize seed from width": "自宽度缩放随机种子", "Resize seed from height": "自高度缩放随机种子", - "Open for Clip Aesthetic!": "打开美术风格 Clip!", + "Open for Clip Aesthetic!": "打开以调整 Clip 的美术风格!", "Aesthetic weight": "美术风格权重", "Aesthetic steps": "美术风格迭代步数", "Aesthetic learning rate": "美术风格学习率", - "Slerp interpolation": "Slerp 插值", + "Slerp interpolation": "球面线性插值", "Aesthetic imgs embedding": "美术风格图集 embedding", "None": "无", "Aesthetic text for imgs": "该图集的美术风格描述", - "Slerp angle": "Slerp 角度", + "Slerp angle": "球面线性插值角度", "Is negative text": "是反向提示词", "Script": "脚本", "Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG 图片文件", @@ -181,7 +181,9 @@ "Color variation": "色彩变化", "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "将图像放大到两倍尺寸; 使用宽度和高度滑块设置图块尺寸(tile size)", "Tile overlap": "图块重叠的像素(Tile overlap)", + "Upscaler": "放大算法", "Lanczos": "Lanczos", + "Nearest": "最邻近(整数缩放)", "LDSR": "LDSR", "BSRGAN 4x": "BSRGAN 4x", "ESRGAN_4x": "ESRGAN_4x", @@ -198,6 +200,7 @@ "Scale to": "指定尺寸缩放", "Resize": "缩放", "Crop to fit": "裁剪以适应", + "Upscaler 2 visibility": "放大算法 2 (Upscaler 2) 可见度", "GFPGAN visibility": "GFPGAN 可见度", "CodeFormer visibility": "CodeFormer 可见度", "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)", @@ -286,7 +289,7 @@ "Create debug image": "生成调试(debug)图片", "Preprocess": "预处理", "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork; 必须指定一组具有 1:1 比例图像的目录", - "[wiki]": "[帮助]", + "[wiki]": "[wiki文档]", "Embedding": "Embedding", "Embedding Learning rate": "Embedding 学习率", "Hypernetwork Learning rate": "Hypernetwork 学习率", @@ -456,7 +459,17 @@ "Extension": "扩展", "URL": "网址", "Update": "更新", + "a1111-sd-webui-tagcomplete": "Tag自动补全", "unknown": "未知", + "deforum-for-automatic1111-webui": "Deforum", + "sd-dynamic-prompting": "动态提示词", + "stable-diffusion-webui-aesthetic-gradients": "美术风格梯度", + "stable-diffusion-webui-aesthetic-image-scorer": "美术风格评分", + "stable-diffusion-webui-artists-to-study": "艺术家图库", + "stable-diffusion-webui-dataset-tag-editor": "数据集标签编辑器", + "stable-diffusion-webui-images-browser": "图库浏览器", + "stable-diffusion-webui-inspiration": "灵感", + "stable-diffusion-webui-wildcards": "通配符", "Load from:": "加载自", "Extension index URL": "扩展列表链接", "URL for extension's git repository": "扩展的 git 仓库链接", @@ -486,8 +499,8 @@ "What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么", "fill it with colors of the image": "用图像的颜色(高强度模糊)填充它", "keep whatever was there originally": "保留原来的图像,不进行预处理", - "fill it with latent space noise": "用潜空间的噪声填充它", - "fill it with latent space zeroes": "用潜空间的零填充它", + "fill it with latent space noise": "于潜空间填充噪声", + "fill it with latent space zeroes": "于潜空间填零", "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域(包括预留像素长度的缓冲区域)放大到目标分辨率,进行局部重绘。\n然后缩小并粘贴回原始图像中", "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比", "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分", @@ -516,6 +529,8 @@ "Autocomplete options": "自动补全选项", "Enable Autocomplete": "开启Tag补全", "Append commas": "附加逗号", + "latest": "最新", + "behind": "落后", "Roll three": "抽三位出来", "Generate forever": "无限生成", "Cancel generate forever": "停止无限生成", @@ -536,15 +551,13 @@ "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成图像之前从模型(ckpt)中加载权重。你可以使用哈希值或文件名的一部分(如设置中所示)作为模型(ckpt)名称。建议用在Y轴上以减少过程中模型的切换", "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成过程中,Torch使用的显存(VRAM)峰值,不包括缓存的数据。\nTorch reserved: Torch 分配的显存(VRAM)的峰值量,包括所有活动和缓存数据。\nSys VRAM: 所有应用程序分配的显存(VRAM)的峰值量 / GPU 的总显存(VRAM)(峰值利用率%)", "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潜空间中的图像。而另一种方法是,从潜变量表达中直接解码并生成完整的图像,接着放大它,然后再将其编码回潜空间", - "Upscaler": "放大算法", "Start drawing": "开始绘制", "Description": "描述", "Action": "行动", - "Aesthetic Gradients": "美术风格", - "aesthetic-gradients": "美术风格", + "Aesthetic Gradients": "美术风格梯度", + "aesthetic-gradients": "美术风格梯度", "Wildcards": "通配符", - "stable-diffusion-webui-wildcards": "通配符", - "Dynamic Prompts": "动态提示", + "Dynamic Prompts": "动态提示词", "Image browser": "图库浏览器", "images-browser": "图库浏览器", "Inspiration": "灵感", @@ -554,7 +567,7 @@ "Dataset Tag Editor": "数据集标签编辑器", - "----无效----": "----以下内容无法被翻译,Bug----", + "----not work----": "----以下内容无法被翻译,Bug----", "Add a random artist to the prompt.": "随机添加一个艺术家到提示词中", "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面", "Save style": "储存为模版风格", @@ -574,10 +587,9 @@ "Style to apply; styles have components for both positive and negative prompts and apply to both": "要使用的模版风格; 模版风格包含正向和反向提示词,并应用于两者\n\ud83c\udfa8 随机添加一个艺术家到提示词中\n \u2199\ufe0f 从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面\n\ud83d\udcbe 将当前的提示词保存为模版风格(保存在styles.csv)\n\ud83d\udccb 将所选模板风格,应用于当前提示词\n如果你在文本中添加{prompt}标记,并保存为模版风格\n那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}", "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一个固定随机数生成器输出的值 - 以相同参数和随机种子生成的图像会得到相同的结果\n\ud83c\udfb2 将随机种子设置为-1,则每次都会使用一个新的随机数\n\u267b\ufe0f 重用上一次使用的随机种子,如果想要固定输出结果就会很有用", - "Upscaler 2 visibility": "放大算法 2 (Upscaler 2) 可见度", - "----已移除----": "----以下内容在webui新版本已移除----", + "----deprecated----": "----以下内容在webui新版本已移除----", "▼": "▼", "History": "历史记录", "Show Textbox": "显示文本框", diff --git a/localizations/zh_TW.json b/localizations/zh_TW.json index 7467db88..4e6dac44 100644 --- a/localizations/zh_TW.json +++ b/localizations/zh_TW.json @@ -408,6 +408,7 @@ "Training": "訓練", "Unload VAE and CLIP from VRAM when training": "訓練時從顯存(VRAM)中取消 VAE 和 CLIP 的載入", "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "訓練時將 VAE 和 CLIP 從顯存(VRAM)移放到內存(RAM),節省顯存(VRAM)", + "Move VAE and CLIP to RAM when training if possible. Saves VRAM.": "訓練時將 VAE 和 CLIP 從顯存(VRAM)移放到內存(RAM)如果可行的話,節省顯存(VRAM)", "Filename word regex": "檔案名用詞的正則表達式", "Filename join string": "檔案名連接用字串", "Number of repeats for a single input image per epoch; used only for displaying epoch number": "每個 epoch 中單個輸入圖像的重複次數; 僅用於顯示 epoch 數", @@ -590,6 +591,7 @@ "Artists to study": "藝術家圖庫", "Aesthetic Image Scorer": "美術風格評分", "Dataset Tag Editor": "數據集標記編輯器", + "Face restoration model": "面部修復模型", "Install": "安裝", "Installing...": "安裝中…", "Installed": "已安裝" diff --git a/modules/api/models.py b/modules/api/models.py index 9ee42a17..68fb45c6 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -131,6 +131,7 @@ class ExtrasBaseRequest(BaseModel): upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") + upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?") class ExtraBaseResponse(BaseModel): html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.") diff --git a/modules/scripts.py b/modules/scripts.py index 533db45c..28ce07f4 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -18,6 +18,9 @@ class Script: args_to = None
alwayson = False
+ """A gr.Group component that has all script's UI inside it"""
+ group = None
+
infotext_fields = None
"""if set in ui(), this is a list of pairs of gradio component + text; the text will be used when
parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example
@@ -218,8 +221,6 @@ class ScriptRunner: for control in controls:
control.custom_script_source = os.path.basename(script.filename)
- if not script.alwayson:
- control.visible = False
if script.infotext_fields is not None:
self.infotext_fields += script.infotext_fields
@@ -229,40 +230,41 @@ class ScriptRunner: script.args_to = len(inputs)
for script in self.alwayson_scripts:
- with gr.Group():
+ with gr.Group() as group:
create_script_ui(script, inputs, inputs_alwayson)
+ script.group = group
+
dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index")
dropdown.save_to_config = True
inputs[0] = dropdown
for script in self.selectable_scripts:
- create_script_ui(script, inputs, inputs_alwayson)
+ with gr.Group(visible=False) as group:
+ create_script_ui(script, inputs, inputs_alwayson)
+
+ script.group = group
def select_script(script_index):
- if 0 < script_index <= len(self.selectable_scripts):
- script = self.selectable_scripts[script_index-1]
- args_from = script.args_from
- args_to = script.args_to
- else:
- args_from = 0
- args_to = 0
+ selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None
- return [ui.gr_show(True if i == 0 else args_from <= i < args_to or is_alwayson) for i, is_alwayson in enumerate(inputs_alwayson)]
+ return [gr.update(visible=selected_script == s) for s in self.selectable_scripts]
def init_field(title):
+ """called when an initial value is set from ui-config.json to show script's UI components"""
+
if title == 'None':
return
+
script_index = self.titles.index(title)
- script = self.selectable_scripts[script_index]
- for i in range(script.args_from, script.args_to):
- inputs[i].visible = True
+ self.selectable_scripts[script_index].group.visible = True
dropdown.init_field = init_field
+
dropdown.change(
fn=select_script,
inputs=[dropdown],
- outputs=inputs
+ outputs=[script.group for script in self.selectable_scripts]
)
return inputs
diff --git a/modules/sd_models.py b/modules/sd_models.py index 5075fadb..ae427a5c 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -204,8 +204,9 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): checkpoints_loaded.popitem(last=False) # LRU
else:
- vae_name = sd_vae.get_filename(vae_file)
- print(f"Loading weights [{sd_model_hash}] with {vae_name} VAE from cache")
+ vae_name = sd_vae.get_filename(vae_file) if vae_file else None
+ vae_message = f" with {vae_name} VAE" if vae_name else ""
+ print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
checkpoints_loaded.move_to_end(checkpoint_key)
model.load_state_dict(checkpoints_loaded[checkpoint_key])
diff --git a/modules/ui.py b/modules/ui.py index 2609857e..6461002a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1052,6 +1052,8 @@ def create_ui(wrap_gradio_gpu_call): extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.")
show_extras_results = gr.Checkbox(label='Show result images', value=True)
+ submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
+
with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by'):
upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4)
@@ -1079,8 +1081,6 @@ def create_ui(wrap_gradio_gpu_call): with gr.Group():
upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False)
- submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
-
result_images, html_info_x, html_info = create_output_panel("extras", opts.outdir_extras_samples)
submit.click(
diff --git a/scripts/custom_code.py b/scripts/custom_code.py index a9b10c09..22e7b77a 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -14,7 +14,7 @@ class Script(scripts.Script): return cmd_opts.allow_code
def ui(self, is_img2img):
- code = gr.Textbox(label="Python code", visible=False, lines=1)
+ code = gr.Textbox(label="Python code", lines=1)
return [code]
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 2afd4aa5..cf71cb92 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -132,7 +132,7 @@ class Script(scripts.Script): info = gr.HTML("<p style=\"margin-bottom:0.75em\">Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8</p>")
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False)
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8)
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0)
color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05)
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index b0469110..ea45beb0 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -22,8 +22,8 @@ class Script(scripts.Script): return None
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
return [pixels, mask_blur, inpainting_fill, direction]
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index d187cd9c..3388bc77 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -83,13 +83,14 @@ def cmdargs(line): def load_prompt_file(file):
- if (file is None):
+ if file is None:
lines = []
else:
lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")]
return None, "\n".join(lines), gr.update(lines=7)
+
class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
@@ -107,9 +108,9 @@ class Script(scripts.Script): # We don't shrink back to 1, because that causes the control to ignore [enter], and it may
# be unclear to the user that shift-enter is needed.
prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt])
- return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt]
+ return [checkbox_iterate, checkbox_iterate_batch, prompt_txt]
- def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str):
+ def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str):
lines = [x.strip() for x in prompt_txt.splitlines()]
lines = [x for x in lines if len(x) > 0]
@@ -157,5 +158,4 @@ class Script(scripts.Script): if checkbox_iterate:
p.seed = p.seed + (p.batch_size * p.n_iter)
-
- return Processed(p, images, p.seed, "")
\ No newline at end of file + return Processed(p, images, p.seed, "")
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index cb37ff7e..01074291 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -18,8 +18,8 @@ class Script(scripts.Script): def ui(self, is_img2img):
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image to twice the dimensions; use width and height sliders to set tile size</p>")
- overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
- upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
+ overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64)
+ upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
return [info, overlap, upscaler_index]
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index f5255786..417ed0d4 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -263,12 +263,12 @@ class Script(scripts.Script): current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
- x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type")
- x_values = gr.Textbox(label="X values", visible=False, lines=1)
+ x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type")
+ x_values = gr.Textbox(label="X values", lines=1)
with gr.Row():
- y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, visible=False, type="index", elem_id="y_type")
- y_values = gr.Textbox(label="Y values", visible=False, lines=1)
+ y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type")
+ y_values = gr.Textbox(label="Y values", lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
include_lone_images = gr.Checkbox(label='Include Separate Images', value=False)
|