Date: Fri, 3 Nov 2023 00:59:19 -0600
Subject: Fix parenthesis auto selection
Fixes #13813
---
javascript/edit-attention.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'javascript')
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js
index 04464100..688c2f11 100644
--- a/javascript/edit-attention.js
+++ b/javascript/edit-attention.js
@@ -28,7 +28,7 @@ function keyupEditAttention(event) {
if (afterParen == -1) return false;
let afterOpeningParen = after.indexOf(OPEN);
- if (afterOpeningParen != -1 && afterOpeningParen < beforeParen) return false;
+ if (afterOpeningParen != -1 && afterOpeningParen < afterParen) return false;
// Set the selection to the text between the parenthesis
const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen);
--
cgit v1.2.3
From 6b8c661c49796bba093ca8a8301e81d28afb9832 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 5 Nov 2023 08:55:54 +0300
Subject: add a visible checkbox to input accordion
---
javascript/inputAccordion.js | 79 ++++++++++++++++++++++++++++++--------------
style.css | 5 +++
2 files changed, 59 insertions(+), 25 deletions(-)
(limited to 'javascript')
diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js
index f2839852..8fc01230 100644
--- a/javascript/inputAccordion.js
+++ b/javascript/inputAccordion.js
@@ -1,37 +1,66 @@
-var observerAccordionOpen = new MutationObserver(function(mutations) {
- mutations.forEach(function(mutationRecord) {
- var elem = mutationRecord.target;
- var open = elem.classList.contains('open');
+function inputAccordionChecked(id, checked) {
+ var accordion = gradioApp().getElementById(id);
+ accordion.visibleCheckbox.checked = checked;
+ accordion.onVisibleCheckboxChange();
+}
- var accordion = elem.parentNode;
- accordion.classList.toggle('input-accordion-open', open);
+function setupAccordion(accordion){
+ var labelWrap = accordion.querySelector('.label-wrap');
+ var gradioCheckbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input");
+ var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
+ var span = labelWrap.querySelector('span');
+ var linked = true;
- var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input");
- checkbox.checked = open;
- updateInput(checkbox);
+ var isOpen = function(){ return labelWrap.classList.contains('open'); }
- var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
- if (extra) {
- extra.style.display = open ? "" : "none";
- }
+ var observerAccordionOpen = new MutationObserver(function(mutations) {
+ mutations.forEach(function(mutationRecord) {
+ accordion.classList.toggle('input-accordion-open', isOpen());
+
+ if(linked){
+ accordion.visibleCheckbox.checked = isOpen();
+ accordion.onVisibleCheckboxChange();
+ }
+ });
});
-});
+ observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']});
-function inputAccordionChecked(id, checked) {
- var label = gradioApp().querySelector('#' + id + " .label-wrap");
- if (label.classList.contains('open') != checked) {
- label.click();
+ if (extra) {
+ labelWrap.insertBefore(extra, labelWrap.lastElementChild);
+ }
+
+ accordion.onChecked = function(checked){
+ if (isOpen() != checked) {
+ labelWrap.click();
+ }
}
+
+ var visibleCheckbox = document.createElement('INPUT');
+ visibleCheckbox.type = 'checkbox';
+ visibleCheckbox.checked = isOpen();
+ visibleCheckbox.id = accordion.id + "-visible-checkbox";
+ visibleCheckbox.className = gradioCheckbox.className + " input-accordion-checkbox";
+ span.insertBefore(visibleCheckbox, span.firstChild);
+
+ accordion.visibleCheckbox = visibleCheckbox;
+ accordion.onVisibleCheckboxChange = function(){
+ if(linked && isOpen() != visibleCheckbox.checked) {
+ labelWrap.click();
+ }
+
+ gradioCheckbox.checked = visibleCheckbox.checked;
+ updateInput(gradioCheckbox);
+ };
+
+ visibleCheckbox.addEventListener('click', function(event){
+ linked = false;
+ event.stopPropagation();
+ });
+ visibleCheckbox.addEventListener('input', accordion.onVisibleCheckboxChange);
}
onUiLoaded(function() {
for (var accordion of gradioApp().querySelectorAll('.input-accordion')) {
- var labelWrap = accordion.querySelector('.label-wrap');
- observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']});
-
- var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
- if (extra) {
- labelWrap.insertBefore(extra, labelWrap.lastElementChild);
- }
+ setupAccordion(accordion);
}
});
diff --git a/style.css b/style.css
index 115626cd..9a1181e8 100644
--- a/style.css
+++ b/style.css
@@ -204,6 +204,11 @@ div.block.gradio-accordion {
padding: 8px 8px;
}
+input[type="checkbox"].input-accordion-checkbox{
+ vertical-align: sub;
+ margin-right: 0.5em;
+}
+
/* txt2img/img2img specific */
--
cgit v1.2.3
From 16ab17429016a1154b9aa83244cdbfc7ba463d72 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 5 Nov 2023 09:20:05 +0300
Subject: eslint
---
javascript/inputAccordion.js | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
(limited to 'javascript')
diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js
index 8fc01230..7570309a 100644
--- a/javascript/inputAccordion.js
+++ b/javascript/inputAccordion.js
@@ -4,20 +4,22 @@ function inputAccordionChecked(id, checked) {
accordion.onVisibleCheckboxChange();
}
-function setupAccordion(accordion){
+function setupAccordion(accordion) {
var labelWrap = accordion.querySelector('.label-wrap');
var gradioCheckbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input");
var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
var span = labelWrap.querySelector('span');
var linked = true;
- var isOpen = function(){ return labelWrap.classList.contains('open'); }
+ var isOpen = function() {
+ return labelWrap.classList.contains('open');
+ };
var observerAccordionOpen = new MutationObserver(function(mutations) {
mutations.forEach(function(mutationRecord) {
accordion.classList.toggle('input-accordion-open', isOpen());
- if(linked){
+ if (linked) {
accordion.visibleCheckbox.checked = isOpen();
accordion.onVisibleCheckboxChange();
}
@@ -29,22 +31,22 @@ function setupAccordion(accordion){
labelWrap.insertBefore(extra, labelWrap.lastElementChild);
}
- accordion.onChecked = function(checked){
+ accordion.onChecked = function(checked) {
if (isOpen() != checked) {
labelWrap.click();
}
- }
+ };
var visibleCheckbox = document.createElement('INPUT');
visibleCheckbox.type = 'checkbox';
visibleCheckbox.checked = isOpen();
- visibleCheckbox.id = accordion.id + "-visible-checkbox";
+ visibleCheckbox.id = accordion.id + "-visible-checkbox";
visibleCheckbox.className = gradioCheckbox.className + " input-accordion-checkbox";
span.insertBefore(visibleCheckbox, span.firstChild);
accordion.visibleCheckbox = visibleCheckbox;
- accordion.onVisibleCheckboxChange = function(){
- if(linked && isOpen() != visibleCheckbox.checked) {
+ accordion.onVisibleCheckboxChange = function() {
+ if (linked && isOpen() != visibleCheckbox.checked) {
labelWrap.click();
}
@@ -52,7 +54,7 @@ function setupAccordion(accordion){
updateInput(gradioCheckbox);
};
- visibleCheckbox.addEventListener('click', function(event){
+ visibleCheckbox.addEventListener('click', function(event) {
linked = false;
event.stopPropagation();
});
--
cgit v1.2.3
From d9499f4301018ebd2977685d098381aa4111d2ae Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 5 Nov 2023 10:12:50 +0300
Subject: properly apply sort order for extra network cards when selected from
dropdown allow selection of default sort order in settings remove 'Default'
sort order, replace with 'Name'
---
javascript/extraNetworks.js | 27 ++++++++++++++++-----------
modules/shared_options.py | 2 ++
modules/ui_extra_networks.py | 6 ++++--
3 files changed, 22 insertions(+), 13 deletions(-)
(limited to 'javascript')
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js
index ac26718f..a4d1d9d9 100644
--- a/javascript/extraNetworks.js
+++ b/javascript/extraNetworks.js
@@ -27,7 +27,6 @@ function setupExtraNetworksForTab(tabname) {
var showDirsDiv = gradioApp().getElementById(tabname + '_extra_show_dirs');
var showDirs = gradioApp().querySelector('#' + tabname + '_extra_show_dirs input');
- sort.dataset.sortkey = 'sortDefault';
tabs.appendChild(searchDiv);
tabs.appendChild(sort);
tabs.appendChild(sortOrder);
@@ -49,20 +48,23 @@ function setupExtraNetworksForTab(tabname) {
elem.style.display = visible ? "" : "none";
});
+
+ applySort();
};
var applySort = function() {
+ var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card');
+
var reverse = sortOrder.classList.contains("sortReverse");
- var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim();
- sortKey = sortKey ? "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1) : "";
- var sortKeyStore = sortKey ? sortKey + (reverse ? "Reverse" : "") : "";
- if (!sortKey || sortKeyStore == sort.dataset.sortkey) {
+ var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim() || "name";
+ sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1);
+ var sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length;
+
+ if (sortKeyStore == sort.dataset.sortkey) {
return;
}
-
sort.dataset.sortkey = sortKeyStore;
- var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card');
cards.forEach(function(card) {
card.originalParentElement = card.parentElement;
});
@@ -88,15 +90,13 @@ function setupExtraNetworksForTab(tabname) {
};
search.addEventListener("input", applyFilter);
- applyFilter();
- ["change", "blur", "click"].forEach(function(evt) {
- sort.querySelector("input").addEventListener(evt, applySort);
- });
sortOrder.addEventListener("click", function() {
sortOrder.classList.toggle("sortReverse");
applySort();
});
+ applyFilter();
+ extraNetworksApplySort[tabname] = applySort;
extraNetworksApplyFilter[tabname] = applyFilter;
var showDirsUpdate = function() {
@@ -113,7 +113,12 @@ function applyExtraNetworkFilter(tabname) {
setTimeout(extraNetworksApplyFilter[tabname], 1);
}
+function applyExtraNetworkSort(tabname) {
+ setTimeout(extraNetworksApplySort[tabname], 1);
+}
+
var extraNetworksApplyFilter = {};
+var extraNetworksApplySort = {};
var activePromptTextarea = {};
function setupExtraNetworks() {
diff --git a/modules/shared_options.py b/modules/shared_options.py
index 0a82216f..6543e440 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -234,6 +234,8 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
"extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
"extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
+ "extra_networks_card_order_field": OptionInfo("Name", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Name', 'Date Created', 'Date Modified']}).needs_reload_ui(),
+ "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(),
"textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index 59d6ecc6..fc729917 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -381,8 +381,8 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
related_tabs.append(tab)
edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True)
- dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order")
- button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False, tooltip="Invert sort order")
+ dropdown_sort = gr.Dropdown(choices=['Name', 'Date Created', 'Date Modified', ], value=shared.opts.extra_networks_card_order_field, elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order")
+ button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes=["sortorder"] + ([] if shared.opts.extra_networks_card_order == "Ascending" else ["sortReverse"]), visible=False, tooltip="Invert sort order")
button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False)
checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False)
@@ -395,6 +395,8 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
for tab in related_tabs:
tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False)
+ dropdown_sort.change(fn=lambda: None, _js="function(){ applyExtraNetworkSort('" + tabname + "'); }")
+
def pages_html():
if not ui.pages_contents:
return refresh()
--
cgit v1.2.3
From 4d4a9e733219f8c065a4ab6c5ab42836db7330fe Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 5 Nov 2023 19:19:55 +0300
Subject: added compact prompt option
---
extensions-builtin/mobile/javascript/mobile.js | 2 +
javascript/extraNetworks.js | 33 ++++
modules/shared_items.py | 2 +
modules/shared_options.py | 1 +
modules/ui.py | 247 +++++++++----------------
modules/ui_common.py | 15 +-
modules/ui_extra_networks.py | 16 +-
modules/ui_extra_networks_checkpoints.py | 2 +
modules/ui_toprow.py | 141 ++++++++++++++
style.css | 23 ++-
10 files changed, 314 insertions(+), 168 deletions(-)
create mode 100644 modules/ui_toprow.py
(limited to 'javascript')
diff --git a/extensions-builtin/mobile/javascript/mobile.js b/extensions-builtin/mobile/javascript/mobile.js
index 652f07ac..bff1aced 100644
--- a/extensions-builtin/mobile/javascript/mobile.js
+++ b/extensions-builtin/mobile/javascript/mobile.js
@@ -12,6 +12,8 @@ function isMobile() {
}
function reportWindowSize() {
+ if (gradioApp().querySelector('.toprow-compact-tools')) return; // not applicable for compact prompt layout
+
var currentlyMobile = isMobile();
if (currentlyMobile == isSetupForMobile) return;
isSetupForMobile = currentlyMobile;
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js
index a4d1d9d9..a1bf29a8 100644
--- a/javascript/extraNetworks.js
+++ b/javascript/extraNetworks.js
@@ -26,6 +26,8 @@ function setupExtraNetworksForTab(tabname) {
var refresh = gradioApp().getElementById(tabname + '_extra_refresh');
var showDirsDiv = gradioApp().getElementById(tabname + '_extra_show_dirs');
var showDirs = gradioApp().querySelector('#' + tabname + '_extra_show_dirs input');
+ var promptContainer = gradioApp().querySelector('.prompt-container-compact#' + tabname + '_prompt_container');
+ var negativePrompt = gradioApp().querySelector('#' + tabname + '_neg_prompt');
tabs.appendChild(searchDiv);
tabs.appendChild(sort);
@@ -109,6 +111,37 @@ function setupExtraNetworksForTab(tabname) {
showDirsUpdate();
}
+function extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt) {
+ if (!gradioApp().querySelector('.toprow-compact-tools')) return; // only applicable for compact prompt layout
+
+ var promptContainer = gradioApp().getElementById(tabname + '_prompt_container');
+ var prompt = gradioApp().getElementById(tabname + '_prompt_row');
+ var negPrompt = gradioApp().getElementById(tabname + '_neg_prompt_row');
+ var elem = id ? gradioApp().getElementById(id) : null;
+
+ if (showNegativePrompt && elem) {
+ elem.insertBefore(negPrompt, elem.firstChild);
+ } else {
+ promptContainer.insertBefore(negPrompt, promptContainer.firstChild);
+ }
+
+ if (showPrompt && elem) {
+ elem.insertBefore(prompt, elem.firstChild);
+ } else {
+ promptContainer.insertBefore(prompt, promptContainer.firstChild);
+ }
+}
+
+
+function extraNetworksUrelatedTabSelected(tabname) { // called from python when user selects an unrelated tab (generate)
+ extraNetworksMovePromptToTab(tabname, '', false, false);
+}
+
+function extraNetworksTabSelected(tabname, id, showPrompt, showNegativePrompt) { // called from python when user selects an extra networks tab
+ extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt);
+
+}
+
function applyExtraNetworkFilter(tabname) {
setTimeout(extraNetworksApplyFilter[tabname], 1);
}
diff --git a/modules/shared_items.py b/modules/shared_items.py
index b1459f8c..5024b426 100644
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -67,6 +67,8 @@ def reload_hypernetworks():
ui_reorder_categories_builtin_items = [
+ "prompt",
+ "image",
"inpaint",
"sampler",
"accordions",
diff --git a/modules/shared_options.py b/modules/shared_options.py
index 6543e440..4e3d7541 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -272,6 +272,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(),
"hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(),
"disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
+ "compact_prompt_box": OptionInfo(True, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(),
}))
diff --git a/modules/ui.py b/modules/ui.py
index bcf39199..2454eb36 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -12,7 +12,7 @@ from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
from modules import gradio_extensons # noqa: F401
-from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks
+from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, scripts, sd_samplers, processing, ui_extra_networks, ui_toprow
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow
from modules.paths import script_path
from modules.ui_common import create_refresh_button
@@ -25,7 +25,6 @@ import modules.hypernetworks.ui as hypernetworks_ui
import modules.textual_inversion.ui as textual_inversion_ui
import modules.textual_inversion.textual_inversion as textual_inversion
import modules.shared as shared
-import modules.images
from modules import prompt_parser
from modules.sd_hijack import model_hijack
from modules.generation_parameters_copypaste import image_from_url_text
@@ -177,79 +176,6 @@ def update_negative_prompt_token_counter(text, steps):
return update_token_counter(text, steps, is_positive=False)
-class Toprow:
- """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation"""
-
- def __init__(self, is_img2img):
- id_part = "img2img" if is_img2img else "txt2img"
- self.id_part = id_part
-
- with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
- with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6):
- with gr.Row():
- with gr.Column(scale=80):
- with gr.Row():
- self.prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
- self.prompt_img = gr.File(label="", elem_id=f"{id_part}_prompt_image", file_count="single", type="binary", visible=False)
-
- with gr.Row():
- with gr.Column(scale=80):
- with gr.Row():
- self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
-
- self.button_interrogate = None
- self.button_deepbooru = None
- if is_img2img:
- with gr.Column(scale=1, elem_classes="interrogate-col"):
- self.button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
- self.button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
-
- with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"):
- with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"):
- self.interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt")
- self.skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip")
- self.submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
-
- self.skip.click(
- fn=lambda: shared.state.skip(),
- inputs=[],
- outputs=[],
- )
-
- self.interrupt.click(
- fn=lambda: shared.state.interrupt(),
- inputs=[],
- outputs=[],
- )
-
- with gr.Row(elem_id=f"{id_part}_tools"):
- self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.")
- self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt", tooltip="Clear prompt")
- self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{id_part}_style_apply", tooltip="Apply all selected styles to prompts.")
- self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False, tooltip="Restore progress")
-
- self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"])
- self.token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
- self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"])
- self.negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button")
-
- self.clear_prompt_button.click(
- fn=lambda *x: x,
- _js="confirm_clear_prompt",
- inputs=[self.prompt, self.negative_prompt],
- outputs=[self.prompt, self.negative_prompt],
- )
-
- self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt)
- self.ui_styles.setup_apply_button(self.apply_styles)
-
- self.prompt_img.change(
- fn=modules.images.image_data,
- inputs=[self.prompt_img],
- outputs=[self.prompt, self.prompt_img],
- show_progress=False,
- )
-
def setup_progressbar(*args, **kwargs):
pass
@@ -288,8 +214,8 @@ def apply_setting(key, value):
return getattr(opts, key)
-def create_output_panel(tabname, outdir):
- return ui_common.create_output_panel(tabname, outdir)
+def create_output_panel(tabname, outdir, toprow=None):
+ return ui_common.create_output_panel(tabname, outdir, toprow)
def create_sampler_and_steps_selection(choices, tabname):
@@ -336,7 +262,7 @@ def create_ui():
scripts.scripts_txt2img.initialize_scripts(is_img2img=False)
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
- toprow = Toprow(is_img2img=False)
+ toprow = ui_toprow.Toprow(is_img2img=False, is_compact=shared.opts.compact_prompt_box)
dummy_component = gr.Label(visible=False)
@@ -348,6 +274,9 @@ def create_ui():
scripts.scripts_txt2img.prepare_ui()
for category in ordered_ui_categories():
+ if category == "prompt":
+ toprow.create_inline_toprow_prompts()
+
if category == "sampler":
steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img")
@@ -442,7 +371,7 @@ def create_ui():
show_progress=False,
)
- txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
+ txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples, toprow)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']),
@@ -554,7 +483,7 @@ def create_ui():
scripts.scripts_img2img.initialize_scripts(is_img2img=True)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
- toprow = Toprow(is_img2img=True)
+ toprow = ui_toprow.Toprow(is_img2img=True, is_compact=shared.opts.compact_prompt_box)
extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs")
extra_tabs.__enter__()
@@ -577,85 +506,89 @@ def create_ui():
button = gr.Button(title)
copy_image_buttons.append((button, name, elem))
- with gr.Tabs(elem_id="mode_img2img"):
- img2img_selected_tab = gr.State(0)
-
- with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
- init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height)
- add_copy_image_controls('img2img', init_img)
-
- with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
- sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color)
- add_copy_image_controls('sketch', sketch)
-
- with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
- init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color)
- add_copy_image_controls('inpaint', init_img_with_mask)
-
- with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
- inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color)
- inpaint_color_sketch_orig = gr.State(None)
- add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
-
- def update_orig(image, state):
- if image is not None:
- same_size = state is not None and state.size == image.size
- has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
- edited = same_size and has_exact_match
- return image if not edited or state is None else state
-
- inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
-
- with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
- init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
- init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask")
-
- with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
- hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
- gr.HTML(
- "Process images in a directory on the same machine where the server is running." +
- "
Use an empty output directory to save pictures normally instead of writing to the output directory." +
- f"
Add inpaint batch mask directory to enable inpaint batch processing."
- f"{hidden}
"
- )
- img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
- img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
- img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
- with gr.Accordion("PNG info", open=False):
- img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info")
- img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir")
- img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.")
-
- img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
-
- for i, tab in enumerate(img2img_tabs):
- tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
-
- def copy_image(img):
- if isinstance(img, dict) and 'image' in img:
- return img['image']
-
- return img
-
- for button, name, elem in copy_image_buttons:
- button.click(
- fn=copy_image,
- inputs=[elem],
- outputs=[copy_image_destinations[name]],
- )
- button.click(
- fn=lambda: None,
- _js=f"switch_to_{name.replace(' ', '_')}",
- inputs=[],
- outputs=[],
- )
-
- with FormRow():
- resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
-
scripts.scripts_img2img.prepare_ui()
for category in ordered_ui_categories():
+ if category == "prompt":
+ toprow.create_inline_toprow_prompts()
+
+ if category == "image":
+ with gr.Tabs(elem_id="mode_img2img"):
+ img2img_selected_tab = gr.State(0)
+
+ with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
+ init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height)
+ add_copy_image_controls('img2img', init_img)
+
+ with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
+ sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color)
+ add_copy_image_controls('sketch', sketch)
+
+ with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
+ init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color)
+ add_copy_image_controls('inpaint', init_img_with_mask)
+
+ with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
+ inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color)
+ inpaint_color_sketch_orig = gr.State(None)
+ add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
+
+ def update_orig(image, state):
+ if image is not None:
+ same_size = state is not None and state.size == image.size
+ has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
+ edited = same_size and has_exact_match
+ return image if not edited or state is None else state
+
+ inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
+
+ with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
+ init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
+ init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask")
+
+ with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
+ hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
+ gr.HTML(
+ "Process images in a directory on the same machine where the server is running." +
+ "
Use an empty output directory to save pictures normally instead of writing to the output directory." +
+ f"
Add inpaint batch mask directory to enable inpaint batch processing."
+ f"{hidden}
"
+ )
+ img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
+ img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
+ img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
+ with gr.Accordion("PNG info", open=False):
+ img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info")
+ img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir")
+ img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.")
+
+ img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
+
+ for i, tab in enumerate(img2img_tabs):
+ tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
+
+ def copy_image(img):
+ if isinstance(img, dict) and 'image' in img:
+ return img['image']
+
+ return img
+
+ for button, name, elem in copy_image_buttons:
+ button.click(
+ fn=copy_image,
+ inputs=[elem],
+ outputs=[copy_image_destinations[name]],
+ )
+ button.click(
+ fn=lambda: None,
+ _js=f"switch_to_{name.replace(' ', '_')}",
+ inputs=[],
+ outputs=[],
+ )
+
+ with FormRow():
+ resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
+
if category == "sampler":
steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img")
@@ -769,7 +702,7 @@ def create_ui():
if category not in {"accordions"}:
scripts.scripts_img2img.setup_ui_for_section(category)
- img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
+ img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples, toprow)
img2img_args = dict(
fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']),
diff --git a/modules/ui_common.py b/modules/ui_common.py
index 84a7d7f2..032ec4af 100644
--- a/modules/ui_common.py
+++ b/modules/ui_common.py
@@ -104,7 +104,7 @@ def save_files(js_data, images, do_make_zip, index):
return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}")
-def create_output_panel(tabname, outdir):
+def create_output_panel(tabname, outdir, toprow=None):
def open_folder(f):
if not os.path.exists(f):
@@ -130,12 +130,15 @@ Requested path was: {f}
else:
sp.Popen(["xdg-open", path])
- with gr.Column(variant='panel', elem_id=f"{tabname}_results"):
- with gr.Group(elem_id=f"{tabname}_gallery_container"):
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None)
+ with gr.Column(elem_id=f"{tabname}_results"):
+ if toprow:
+ toprow.create_inline_toprow_image()
- generation_info = None
- with gr.Column():
+ with gr.Column(variant='panel', elem_id=f"{tabname}_results_panel"):
+ with gr.Group(elem_id=f"{tabname}_gallery_container"):
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None)
+
+ generation_info = None
with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"):
open_folder_button = ToolButton(folder_symbol, elem_id=f'{tabname}_open_folder', visible=not shared.cmd_opts.hide_ui_dir_config, tooltip="Open images output directory.")
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index fc729917..7907cd63 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -103,6 +103,7 @@ class ExtraNetworksPage:
self.name = title.lower()
self.id_page = self.name.replace(" ", "_")
self.card_page = shared.html("extra-networks-card.html")
+ self.allow_prompt = True
self.allow_negative_prompt = False
self.metadata = {}
self.items = {}
@@ -367,7 +368,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
related_tabs = []
for page in ui.stored_extra_pages:
- with gr.Tab(page.title, id=page.id_page) as tab:
+ with gr.Tab(page.title, elem_id=f"{tabname}_{page.id_page}", elem_classes=["extra-page"]) as tab:
elem_id = f"{tabname}_{page.id_page}_cards_html"
page_elem = gr.HTML('Loading...', elem_id=elem_id)
ui.pages.append(page_elem)
@@ -389,11 +390,18 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False)
ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False)
+ tab_controls = [edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs]
+
for tab in unrelated_tabs:
- tab.select(fn=lambda: [gr.update(visible=False) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False)
+ tab.select(fn=lambda: [gr.update(visible=False) for _ in tab_controls], _js='function(){ extraNetworksUrelatedTabSelected("' + tabname + '"); }', inputs=[], outputs=tab_controls, show_progress=False)
+
+ for page, tab in zip(ui.stored_extra_pages, related_tabs):
+ allow_prompt = "true" if page.allow_prompt else "false"
+ allow_negative_prompt = "true" if page.allow_negative_prompt else "false"
+
+ jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');'
- for tab in related_tabs:
- tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False)
+ tab.select(fn=lambda: [gr.update(visible=True) for _ in tab_controls], _js='function(){ ' + jscode + ' }', inputs=[], outputs=tab_controls, show_progress=False)
dropdown_sort.change(fn=lambda: None, _js="function(){ applyExtraNetworkSort('" + tabname + "'); }")
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py
index ca6c2607..2fc0ed43 100644
--- a/modules/ui_extra_networks_checkpoints.py
+++ b/modules/ui_extra_networks_checkpoints.py
@@ -10,6 +10,8 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
def __init__(self):
super().__init__('Checkpoints')
+ self.allow_prompt = False
+
def refresh(self):
shared.refresh_checkpoints()
diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py
new file mode 100644
index 00000000..985b5a2d
--- /dev/null
+++ b/modules/ui_toprow.py
@@ -0,0 +1,141 @@
+import gradio as gr
+
+from modules import shared, ui_prompt_styles
+import modules.images
+
+from modules.ui_components import ToolButton
+
+
+class Toprow:
+ """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation"""
+
+ prompt = None
+ prompt_img = None
+ negative_prompt = None
+
+ button_interrogate = None
+ button_deepbooru = None
+
+ interrupt = None
+ skip = None
+ submit = None
+
+ paste = None
+ clear_prompt_button = None
+ apply_styles = None
+ restore_progress_button = None
+
+ token_counter = None
+ token_button = None
+ negative_token_counter = None
+ negative_token_button = None
+
+ ui_styles = None
+
+ submit_box = None
+
+ def __init__(self, is_img2img, is_compact=False):
+ id_part = "img2img" if is_img2img else "txt2img"
+ self.id_part = id_part
+ self.is_img2img = is_img2img
+ self.is_compact = is_compact
+
+ if not is_compact:
+ with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
+ self.create_classic_toprow()
+ else:
+ self.create_submit_box()
+
+ def create_classic_toprow(self):
+ self.create_prompts()
+
+ with gr.Column(scale=1, elem_id=f"{self.id_part}_actions_column"):
+ self.create_submit_box()
+
+ self.create_tools_row()
+
+ self.create_styles_ui()
+
+ def create_inline_toprow_prompts(self):
+ if not self.is_compact:
+ return
+
+ self.create_prompts()
+
+ with gr.Row(elem_classes=["toprow-compact-stylerow"]):
+ with gr.Column(elem_classes=["toprow-compact-tools"]):
+ self.create_tools_row()
+ with gr.Column():
+ self.create_styles_ui()
+
+ def create_inline_toprow_image(self):
+ if not self.is_compact:
+ return
+
+ self.submit_box.render()
+
+ def create_prompts(self):
+ with gr.Column(elem_id=f"{self.id_part}_prompt_container", elem_classes=["prompt-container-compact"] if self.is_compact else [], scale=6):
+ with gr.Row(elem_id=f"{self.id_part}_prompt_row", elem_classes=["prompt-row"]):
+ self.prompt = gr.Textbox(label="Prompt", elem_id=f"{self.id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
+ self.prompt_img = gr.File(label="", elem_id=f"{self.id_part}_prompt_image", file_count="single", type="binary", visible=False)
+
+ with gr.Row(elem_id=f"{self.id_part}_neg_prompt_row", elem_classes=["prompt-row"]):
+ self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{self.id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
+
+ self.prompt_img.change(
+ fn=modules.images.image_data,
+ inputs=[self.prompt_img],
+ outputs=[self.prompt, self.prompt_img],
+ show_progress=False,
+ )
+
+ def create_submit_box(self):
+ with gr.Row(elem_id=f"{self.id_part}_generate_box", elem_classes=["generate-box"] + (["generate-box-compact"] if self.is_compact else []), render=not self.is_compact) as submit_box:
+ self.submit_box = submit_box
+
+ self.interrupt = gr.Button('Interrupt', elem_id=f"{self.id_part}_interrupt", elem_classes="generate-box-interrupt")
+ self.skip = gr.Button('Skip', elem_id=f"{self.id_part}_skip", elem_classes="generate-box-skip")
+ self.submit = gr.Button('Generate', elem_id=f"{self.id_part}_generate", variant='primary')
+
+ self.skip.click(
+ fn=lambda: shared.state.skip(),
+ inputs=[],
+ outputs=[],
+ )
+
+ self.interrupt.click(
+ fn=lambda: shared.state.interrupt(),
+ inputs=[],
+ outputs=[],
+ )
+
+ def create_tools_row(self):
+ with gr.Row(elem_id=f"{self.id_part}_tools"):
+ from modules.ui import paste_symbol, clear_prompt_symbol, restore_progress_symbol
+
+ self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.")
+ self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{self.id_part}_clear_prompt", tooltip="Clear prompt")
+ self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{self.id_part}_style_apply", tooltip="Apply all selected styles to prompts.")
+
+ if self.is_img2img:
+ self.button_interrogate = ToolButton('📎', tooltip='Interrogate CLIP - use CLIP neural network to create a text describing the image, and put it into the prompt field', elem_id="interrogate")
+ self.button_deepbooru = ToolButton('📦', tooltip='Interrogate DeepBooru - use DeepBooru neural network to create a text describing the image, and put it into the prompt field', elem_id="deepbooru")
+
+ self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{self.id_part}_restore_progress", visible=False, tooltip="Restore progress")
+
+ self.token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_token_counter", elem_classes=["token-counter"])
+ self.token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_token_button")
+ self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_negative_token_counter", elem_classes=["token-counter"])
+ self.negative_token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_negative_token_button")
+
+ self.clear_prompt_button.click(
+ fn=lambda *x: x,
+ _js="confirm_clear_prompt",
+ inputs=[self.prompt, self.negative_prompt],
+ outputs=[self.prompt, self.negative_prompt],
+ )
+
+ def create_styles_ui(self):
+ self.ui_styles = ui_prompt_styles.UiPromptStyles(self.id_part, self.prompt, self.negative_prompt)
+ self.ui_styles.setup_apply_button(self.apply_styles)
diff --git a/style.css b/style.css
index 9a1181e8..73162022 100644
--- a/style.css
+++ b/style.css
@@ -296,6 +296,13 @@ input[type="checkbox"].input-accordion-checkbox{
min-height: 4.5em;
}
+#txt2img_generate, #img2img_generate {
+ min-height: 4.5em;
+}
+.generate-box-compact #txt2img_generate, .generate-box-compact #img2img_generate {
+ min-height: 3em;
+}
+
@media screen and (min-width: 2500px) {
#txt2img_gallery, #img2img_gallery {
min-height: 768px;
@@ -403,6 +410,15 @@ div#extras_scale_to_tab div.form{
min-width: 0.5em;
}
+div.toprow-compact-stylerow{
+ margin: 0.5em 0;
+}
+
+div.toprow-compact-tools{
+ min-width: fit-content !important;
+ max-width: fit-content;
+}
+
/* settings */
#quicksettings {
align-items: end;
@@ -525,7 +541,8 @@ table.popup-table .link{
height: 20px;
background: #b4c0cc;
border-radius: 3px !important;
- top: -20px;
+ top: -14px;
+ left: 0px;
width: 100%;
}
@@ -823,6 +840,10 @@ footer {
/* extra networks UI */
+.extra-page .prompt{
+ margin: 0 0 0.5em 0;
+}
+
.extra-network-cards{
height: calc(100vh - 24rem);
overflow: clip scroll;
--
cgit v1.2.3
From 9ba991cad8a15a99f71f5b2ec5feff7dd9d270d7 Mon Sep 17 00:00:00 2001
From: GerryDE
Date: Tue, 7 Nov 2023 03:09:08 +0100
Subject: Add option to set notification sound volume
---
javascript/notification.js | 6 +++++-
modules/shared_options.py | 1 +
2 files changed, 6 insertions(+), 1 deletion(-)
(limited to 'javascript')
diff --git a/javascript/notification.js b/javascript/notification.js
index 6d799561..3ee972ae 100644
--- a/javascript/notification.js
+++ b/javascript/notification.js
@@ -26,7 +26,11 @@ onAfterUiUpdate(function() {
lastHeadImg = headImg;
// play notification sound if available
- gradioApp().querySelector('#audio_notification audio')?.play();
+ const notificationAudio = gradioApp().querySelector('#audio_notification audio');
+ if (notificationAudio) {
+ notificationAudio.volume = opts.notification_volume / 100.0 || 1.0;
+ notificationAudio.play();
+ }
if (document.hasFocus()) return;
diff --git a/modules/shared_options.py b/modules/shared_options.py
index a9964fcb..d40db530 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -64,6 +64,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."),
"notification_audio": OptionInfo(True, "Play notification sound after image generation").info("notification.mp3 should be present in the root directory").needs_reload_ui(),
+ "notification_volume": OptionInfo(100, "Notification sound volume", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}).info("in %"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
--
cgit v1.2.3
From 2a40d3c603448d15e209814366f2d6ab25e52398 Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 26 Nov 2023 14:58:47 +0300
Subject: compact prompt layout: preserve scroll when switching between lora
tabs
---
javascript/extraNetworks.js | 4 ++++
modules/ui_extra_networks.py | 5 ++++-
style.css | 12 ++++++++++--
3 files changed, 18 insertions(+), 3 deletions(-)
(limited to 'javascript')
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js
index a1bf29a8..a787372c 100644
--- a/javascript/extraNetworks.js
+++ b/javascript/extraNetworks.js
@@ -130,6 +130,10 @@ function extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePromp
} else {
promptContainer.insertBefore(prompt, promptContainer.firstChild);
}
+
+ if (elem) {
+ elem.classList.toggle('extra-page-prompts-active', showNegativePrompt || showPrompt);
+ }
}
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index f03e2033..f3b23cc9 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -370,6 +370,9 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
for page in ui.stored_extra_pages:
with gr.Tab(page.title, elem_id=f"{tabname}_{page.id_page}", elem_classes=["extra-page"]) as tab:
+ with gr.Column(elem_id=f"{tabname}_{page.id_page}_prompts", elem_classes=["extra-page-prompts"]):
+ pass
+
elem_id = f"{tabname}_{page.id_page}_cards_html"
page_elem = gr.HTML('Loading...', elem_id=elem_id)
ui.pages.append(page_elem)
@@ -400,7 +403,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
allow_prompt = "true" if page.allow_prompt else "false"
allow_negative_prompt = "true" if page.allow_negative_prompt else "false"
- jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');'
+ jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}_prompts" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');'
tab.select(fn=lambda: [gr.update(visible=True) for _ in tab_controls], _js='function(){ ' + jscode + ' }', inputs=[], outputs=tab_controls, show_progress=False)
diff --git a/style.css b/style.css
index 73162022..f8b42636 100644
--- a/style.css
+++ b/style.css
@@ -840,8 +840,16 @@ footer {
/* extra networks UI */
-.extra-page .prompt{
- margin: 0 0 0.5em 0;
+.extra-page > div.gap{
+ gap: 0;
+}
+
+.extra-page-prompts{
+ margin-bottom: 0;
+}
+
+.extra-page-prompts.extra-page-prompts-active{
+ margin-bottom: 1em;
}
.extra-network-cards{
--
cgit v1.2.3
From f0f100e67b78f686dc73cf3c8cad422e45cc9b8a Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sun, 26 Nov 2023 17:56:16 +0300
Subject: add categories to settings
---
javascript/settings.js | 25 ++++++++++++++++
modules/options.py | 75 ++++++++++++++++++++++++++++++++++++++++++-----
modules/shared_options.py | 49 ++++++++++++++++++-------------
style.css | 9 ++++++
4 files changed, 130 insertions(+), 28 deletions(-)
(limited to 'javascript')
diff --git a/javascript/settings.js b/javascript/settings.js
index 4e79ec00..e6009290 100644
--- a/javascript/settings.js
+++ b/javascript/settings.js
@@ -44,3 +44,28 @@ onUiLoaded(function() {
buttonShowAllPages.addEventListener("click", settingsShowAllTabs);
});
+
+
+onOptionsChanged(function() {
+ if (gradioApp().querySelector('#settings .settings-category')) return;
+
+ var sectionMap = {};
+ gradioApp().querySelectorAll('#settings > div > button').forEach(function(x) {
+ sectionMap[x.textContent.trim()] = x;
+ });
+
+ opts._categories.forEach(function(x) {
+ var section = x[0];
+ var category = x[1];
+
+ var span = document.createElement('SPAN');
+ span.textContent = category;
+ span.className = 'settings-category';
+
+ var sectionElem = sectionMap[section];
+ if (!sectionElem) return;
+
+ sectionElem.parentElement.insertBefore(span, sectionElem);
+ });
+});
+
diff --git a/modules/options.py b/modules/options.py
index 40cb4799..4fead690 100644
--- a/modules/options.py
+++ b/modules/options.py
@@ -1,5 +1,6 @@
import json
import sys
+from dataclasses import dataclass
import gradio as gr
@@ -8,13 +9,14 @@ from modules.shared_cmd_options import cmd_opts
class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False):
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False, category_id=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = section
+ self.category_id = category_id
self.refresh = refresh
self.do_not_save = False
@@ -63,7 +65,11 @@ class OptionHTML(OptionInfo):
def options_section(section_identifier, options_dict):
for v in options_dict.values():
- v.section = section_identifier
+ if len(section_identifier) == 2:
+ v.section = section_identifier
+ elif len(section_identifier) == 3:
+ v.section = section_identifier[0:2]
+ v.category_id = section_identifier[2]
return options_dict
@@ -206,6 +212,17 @@ class Options:
d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
+
+ item_categories = {}
+ for item in self.data_labels.values():
+ category = categories.mapping.get(item.category_id)
+ category = "Uncategorized" if category is None else category.label
+ if category not in item_categories:
+ item_categories[category] = item.section[1]
+
+ # _categories is a list of pairs: [section, category]. Each section (a setting page) will get a special heading above it with the category as text.
+ d["_categories"] = [[v, k] for k, v in item_categories.items()] + [["Defaults", "Other"]]
+
return json.dumps(d)
def add_option(self, key, info):
@@ -214,15 +231,40 @@ class Options:
self.data[key] = info.default
def reorder(self):
- """reorder settings so that all items related to section always go together"""
+ """Reorder settings so that:
+ - all items related to section always go together
+ - all sections belonging to a category go together
+ - sections inside a category are ordered alphabetically
+ - categories are ordered by creation order
+
+ Category is a superset of sections: for category "postprocessing" there could be multiple sections: "face restoration", "upscaling".
+
+ This function also changes items' category_id so that all items belonging to a section have the same category_id.
+ """
+
+ category_ids = {}
+ section_categories = {}
- section_ids = {}
settings_items = self.data_labels.items()
for _, item in settings_items:
- if item.section not in section_ids:
- section_ids[item.section] = len(section_ids)
+ if item.section not in section_categories:
+ section_categories[item.section] = item.category_id
+
+ for _, item in settings_items:
+ item.category_id = section_categories.get(item.section)
+
+ for category_id in categories.mapping:
+ if category_id not in category_ids:
+ category_ids[category_id] = len(category_ids)
- self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
+ def sort_key(x):
+ item: OptionInfo = x[1]
+ category_order = category_ids.get(item.category_id, len(category_ids))
+ section_order = item.section[1]
+
+ return category_order, section_order
+
+ self.data_labels = dict(sorted(settings_items, key=sort_key))
def cast_value(self, key, value):
"""casts an arbitrary to the same type as this setting's value with key
@@ -245,3 +287,22 @@ class Options:
value = expected_type(value)
return value
+
+
+@dataclass
+class OptionsCategory:
+ id: str
+ label: str
+
+class OptionsCategories:
+ def __init__(self):
+ self.mapping = {}
+
+ def register_category(self, category_id, label):
+ if category_id in self.mapping:
+ return category_id
+
+ self.mapping[category_id] = OptionsCategory(category_id, label)
+
+
+categories = OptionsCategories()
diff --git a/modules/shared_options.py b/modules/shared_options.py
index 9bcd7914..04e68a71 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -3,7 +3,7 @@ import gradio as gr
from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
from modules.shared_cmd_options import cmd_opts
-from modules.options import options_section, OptionInfo, OptionHTML
+from modules.options import options_section, OptionInfo, OptionHTML, categories
options_templates = {}
hide_dirs = shared.hide_dirs
@@ -21,7 +21,14 @@ restricted_opts = {
"outdir_init_images"
}
-options_templates.update(options_section(('saving-images', "Saving images/grids"), {
+categories.register_category("saving", "Saving images")
+categories.register_category("sd", "Stable Diffusion")
+categories.register_category("ui", "User Interface")
+categories.register_category("system", "System")
+categories.register_category("postprocessing", "Postprocessing")
+categories.register_category("training", "Training")
+
+options_templates.update(options_section(('saving-images', "Saving images/grids", "saving"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
@@ -67,7 +74,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"notification_volume": OptionInfo(100, "Notification sound volume", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}).info("in %"),
}))
-options_templates.update(options_section(('saving-paths', "Paths for saving"), {
+options_templates.update(options_section(('saving-paths', "Paths for saving", "saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
@@ -79,7 +86,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs),
}))
-options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
+options_templates.update(options_section(('saving-to-dirs', "Saving to a directory", "saving"), {
"save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
@@ -87,21 +94,21 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
}))
-options_templates.update(options_section(('upscaling', "Upscaling"), {
+options_templates.update(options_section(('upscaling', "Upscaling", "postprocessing"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}),
}))
-options_templates.update(options_section(('face-restoration', "Face restoration"), {
+options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), {
"face_restoration": OptionInfo(False, "Restore faces", infotext='Face restoration').info("will use a third-party model on generation result to reconstruct faces"),
"face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
-options_templates.update(options_section(('system', "System"), {
+options_templates.update(options_section(('system', "System", "system"), {
"auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}),
"enable_console_prompts": OptionInfo(shared.cmd_opts.enable_console_prompts, "Print prompts to console when generating with txt2img and img2img."),
"show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(),
@@ -116,13 +123,13 @@ options_templates.update(options_section(('system', "System"), {
"dump_stacks_on_signal": OptionInfo(False, "Print stack traces before exiting the program with ctrl+c."),
}))
-options_templates.update(options_section(('API', "API"), {
+options_templates.update(options_section(('API', "API", "system"), {
"api_enable_requests": OptionInfo(True, "Allow http:// and https:// URLs for input images in API", restrict_api=True),
"api_forbid_local_requests": OptionInfo(True, "Forbid URLs to local resources", restrict_api=True),
"api_useragent": OptionInfo("", "User agent for requests", restrict_api=True),
}))
-options_templates.update(options_section(('training', "Training"), {
+options_templates.update(options_section(('training', "Training", "training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
@@ -137,7 +144,7 @@ options_templates.update(options_section(('training', "Training"), {
"training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
}))
-options_templates.update(options_section(('sd', "Stable Diffusion"), {
+options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles(shared.opts.sd_checkpoint_dropdown_use_short)}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'),
"sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
"sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"),
@@ -154,14 +161,14 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"hires_fix_refiner_pass": OptionInfo("second pass", "Hires fix: which pass to enable refiner for", gr.Radio, {"choices": ["first pass", "second pass", "both passes"]}, infotext="Hires refiner"),
}))
-options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
+options_templates.update(options_section(('sdxl', "Stable Diffusion XL", "sd"), {
"sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
"sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
"sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
"sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
}))
-options_templates.update(options_section(('vae', "VAE"), {
+options_templates.update(options_section(('vae', "VAE", "sd"), {
"sd_vae_explanation": OptionHTML("""
VAE is a neural network that transforms a standard RGB
image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling
@@ -176,7 +183,7 @@ For img2img, VAE is used to process user's input image before the sampling, and
"sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Decoder').info("method to decode latent to image"),
}))
-options_templates.update(options_section(('img2img', "img2img"), {
+options_templates.update(options_section(('img2img', "img2img", "sd"), {
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'),
"initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.0, "maximum": 1.5, "step": 0.001}, infotext='Noise multiplier'),
"img2img_extra_noise": OptionInfo(0.0, "Extra noise multiplier for img2img and hires fix", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Extra noise').info("0 = disabled (default); should be lower than denoising strength"),
@@ -192,7 +199,7 @@ options_templates.update(options_section(('img2img', "img2img"), {
"img2img_batch_show_results_limit": OptionInfo(32, "Show the first N batch img2img results in UI", gr.Slider, {"minimum": -1, "maximum": 1000, "step": 1}).info('0: disable, -1: show all images. Too many images can cause lag'),
}))
-options_templates.update(options_section(('optimizations', "Optimizations"), {
+options_templates.update(options_section(('optimizations', "Optimizations", "sd"), {
"cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
"s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
@@ -203,7 +210,7 @@ options_templates.update(options_section(('optimizations', "Optimizations"), {
"batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
}))
-options_templates.update(options_section(('compatibility', "Compatibility"), {
+options_templates.update(options_section(('compatibility', "Compatibility", "sd"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
"no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
@@ -228,7 +235,7 @@ options_templates.update(options_section(('interrogate', "Interrogate"), {
"deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
}))
-options_templates.update(options_section(('extra_networks', "Extra Networks"), {
+options_templates.update(options_section(('extra_networks', "Extra Networks", "sd"), {
"extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
"extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
"extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
@@ -245,7 +252,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
}))
-options_templates.update(options_section(('ui', "User interface"), {
+options_templates.update(options_section(('ui', "User interface", "ui"), {
"localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
"gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(),
"gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
@@ -280,7 +287,7 @@ options_templates.update(options_section(('ui', "User interface"), {
}))
-options_templates.update(options_section(('infotext', "Infotext"), {
+options_templates.update(options_section(('infotext', "Infotext", "ui"), {
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
"add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"),
@@ -295,7 +302,7 @@ options_templates.update(options_section(('infotext', "Infotext"), {
}))
-options_templates.update(options_section(('ui', "Live previews"), {
+options_templates.update(options_section(('ui', "Live previews", "ui"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
"live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
@@ -308,7 +315,7 @@ options_templates.update(options_section(('ui', "Live previews"), {
"live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"),
}))
-options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
+options_templates.update(options_section(('sampler-params', "Sampler parameters", "sd"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(),
"eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unpredictable results"),
"eta_ancestral": OptionInfo(1.0, "Eta for k-diffusion samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; currently only applies to ancestral samplers (i.e. Euler a) and SDE samplers"),
@@ -330,7 +337,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
}))
-options_templates.update(options_section(('postprocessing', "Postprocessing"), {
+options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), {
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
diff --git a/style.css b/style.css
index f8b42636..6e3ca841 100644
--- a/style.css
+++ b/style.css
@@ -462,6 +462,15 @@ div.toprow-compact-tools{
padding: 4px;
}
+#settings > div.tab-nav .settings-category{
+ display: block;
+ margin: 1em 0 0.25em 0;
+ font-weight: bold;
+ text-decoration: underline;
+ cursor: default;
+ user-select: none;
+}
+
#settings_result{
height: 1.4em;
margin: 0 1.2em;
--
cgit v1.2.3
From 01c8f1803a77c63b2ebfd3cbbd41659fb914f274 Mon Sep 17 00:00:00 2001
From: missionfloyd
Date: Thu, 30 Nov 2023 22:36:12 -0700
Subject: Close popups with escape key
---
javascript/extraNetworks.js | 6 ++++++
1 file changed, 6 insertions(+)
(limited to 'javascript')
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js
index a787372c..98a7abb7 100644
--- a/javascript/extraNetworks.js
+++ b/javascript/extraNetworks.js
@@ -392,3 +392,9 @@ function extraNetworksRefreshSingleCard(page, tabname, name) {
}
});
}
+
+window.addEventListener("keydown", function(event) {
+ if (event.key == "Escape") {
+ closePopup();
+ }
+});
--
cgit v1.2.3
From 11d23e8ca55c097ecfa255a05b63f194e25f08be Mon Sep 17 00:00:00 2001
From: AUTOMATIC1111 <16777216c@gmail.com>
Date: Sat, 2 Dec 2023 18:01:11 +0300
Subject: remove Train/Preprocessing tab and put all its functionality into
extras batch images mode
---
javascript/ui.js | 17 ++
modules/api/api.py | 15 --
modules/api/models.py | 3 -
modules/postprocessing.py | 92 +++++++---
modules/scripts_postprocessing.py | 86 ++++++++-
modules/shared_options.py | 1 +
modules/textual_inversion/preprocess.py | 232 ------------------------
modules/textual_inversion/ui.py | 7 -
modules/ui.py | 107 -----------
modules/ui_postprocessing.py | 16 +-
modules/ui_toprow.py | 6 +-
scripts/postprocessing_caption.py | 30 +++
scripts/postprocessing_codeformer.py | 16 +-
scripts/postprocessing_create_flipped_copies.py | 32 ++++
scripts/postprocessing_focal_crop.py | 54 ++++++
scripts/postprocessing_gfpgan.py | 13 +-
scripts/postprocessing_split_oversized.py | 71 ++++++++
scripts/postprocessing_upscale.py | 12 ++
scripts/processing_autosized_crop.py | 64 +++++++
19 files changed, 460 insertions(+), 414 deletions(-)
delete mode 100644 modules/textual_inversion/preprocess.py
create mode 100644 scripts/postprocessing_caption.py
create mode 100644 scripts/postprocessing_create_flipped_copies.py
create mode 100644 scripts/postprocessing_focal_crop.py
create mode 100644 scripts/postprocessing_split_oversized.py
create mode 100644 scripts/processing_autosized_crop.py
(limited to 'javascript')
diff --git a/javascript/ui.js b/javascript/ui.js
index 2e262602..410fc44e 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -170,6 +170,23 @@ function submit_img2img() {
return res;
}
+function submit_extras() {
+ showSubmitButtons('extras', false);
+
+ var id = randomId();
+
+ requestProgress(id, gradioApp().getElementById('extras_gallery_container'), gradioApp().getElementById('extras_gallery'), function() {
+ showSubmitButtons('extras', true);
+ });
+
+ var res = create_submit_args(arguments);
+
+ res[0] = id;
+
+ console.log(res);
+ return res;
+}
+
function restoreProgressTxt2img() {
showRestoreProgressButton("txt2img", false);
var id = localGet("txt2img_task_id");
diff --git a/modules/api/api.py b/modules/api/api.py
index 09083874..b3d74e51 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -22,7 +22,6 @@ from modules.api import models
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
-from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin, Image
from modules.sd_models_config import find_checkpoint_config_near_filename
@@ -235,7 +234,6 @@ class Api:
self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"])
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
- self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
@@ -675,19 +673,6 @@ class Api:
finally:
shared.state.end()
- def preprocess(self, args: dict):
- try:
- shared.state.begin(job="preprocess")
- preprocess(**args) # quick operation unless blip/booru interrogation is enabled
- shared.state.end()
- return models.PreprocessResponse(info='preprocess complete')
- except KeyError as e:
- return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
- except Exception as e:
- return models.PreprocessResponse(info=f"preprocess error: {e}")
- finally:
- shared.state.end()
-
def train_embedding(self, args: dict):
try:
shared.state.begin(job="train_embedding")
diff --git a/modules/api/models.py b/modules/api/models.py
index a0d80af8..33894b3e 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -202,9 +202,6 @@ class TrainResponse(BaseModel):
class CreateResponse(BaseModel):
info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.")
-class PreprocessResponse(BaseModel):
- info: str = Field(title="Preprocess info", description="Response string from preprocessing task.")
-
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
diff --git a/modules/postprocessing.py b/modules/postprocessing.py
index 0a134ee4..3c85a74c 100644
--- a/modules/postprocessing.py
+++ b/modules/postprocessing.py
@@ -6,7 +6,7 @@ from modules import shared, images, devices, scripts, scripts_postprocessing, ui
from modules.shared import opts
-def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True):
+def run_postprocessing(id_task, extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True):
devices.torch_gc()
shared.state.begin(job="extras")
@@ -29,11 +29,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
image_list = shared.listfiles(input_dir)
for filename in image_list:
- try:
- image = Image.open(filename)
- except Exception:
- continue
- yield image, filename
+ yield filename, filename
else:
assert image, 'image not selected'
yield image, None
@@ -45,37 +41,85 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
infotext = ''
- for image_data, name in get_images(extras_mode, image, image_folder, input_dir):
+ data_to_process = list(get_images(extras_mode, image, image_folder, input_dir))
+ shared.state.job_count = len(data_to_process)
+
+ for image_placeholder, name in data_to_process:
image_data: Image.Image
+ shared.state.nextjob()
shared.state.textinfo = name
+ shared.state.skipped = False
+
+ if shared.state.interrupted:
+ break
+
+ if isinstance(image_placeholder, str):
+ try:
+ image_data = Image.open(image_placeholder)
+ except Exception:
+ continue
+ else:
+ image_data = image_placeholder
+
+ shared.state.assign_current_image(image_data)
parameters, existing_pnginfo = images.read_info_from_image(image_data)
if parameters:
existing_pnginfo["parameters"] = parameters
- pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB"))
+ initial_pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB"))
- scripts.scripts_postproc.run(pp, args)
+ scripts.scripts_postproc.run(initial_pp, args)
- if opts.use_original_name_batch and name is not None:
- basename = os.path.splitext(os.path.basename(name))[0]
- forced_filename = basename
- else:
- basename = ''
- forced_filename = None
+ if shared.state.skipped:
+ continue
+
+ used_suffixes = {}
+ for pp in [initial_pp, *initial_pp.extra_images]:
+ suffix = pp.get_suffix(used_suffixes)
+
+ if opts.use_original_name_batch and name is not None:
+ basename = os.path.splitext(os.path.basename(name))[0]
+ forced_filename = basename + suffix
+ else:
+ basename = ''
+ forced_filename = None
+
+ infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None])
+
+ if opts.enable_pnginfo:
+ pp.image.info = existing_pnginfo
+ pp.image.info["postprocessing"] = infotext
+
+ if save_output:
+ fullfn, _ = images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename, suffix=suffix)
- infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None])
+ if pp.caption:
+ caption_filename = os.path.splitext(fullfn)[0] + ".txt"
+ if os.path.isfile(caption_filename):
+ with open(caption_filename, encoding="utf8") as file:
+ existing_caption = file.read().strip()
+ else:
+ existing_caption = ""
- if opts.enable_pnginfo:
- pp.image.info = existing_pnginfo
- pp.image.info["postprocessing"] = infotext
+ action = shared.opts.postprocessing_existing_caption_action
+ if action == 'Prepend' and existing_caption:
+ caption = f"{existing_caption} {pp.caption}"
+ elif action == 'Append' and existing_caption:
+ caption = f"{pp.caption} {existing_caption}"
+ elif action == 'Keep' and existing_caption:
+ caption = existing_caption
+ else:
+ caption = pp.caption
- if save_output:
- images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename)
+ caption = caption.strip()
+ if caption:
+ with open(caption_filename, "w", encoding="utf8") as file:
+ file.write(caption)
- if extras_mode != 2 or show_extras_results:
- outputs.append(pp.image)
+ if extras_mode != 2 or show_extras_results:
+ outputs.append(pp.image)
image_data.close()
@@ -99,9 +143,11 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
"upscaler_2_visibility": extras_upscaler_2_visibility,
},
"GFPGAN": {
+ "enable": True,
"gfpgan_visibility": gfpgan_visibility,
},
"CodeFormer": {
+ "enable": True,
"codeformer_visibility": codeformer_visibility,
"codeformer_weight": codeformer_weight,
},
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py
index bac1335d..901cad08 100644
--- a/modules/scripts_postprocessing.py
+++ b/modules/scripts_postprocessing.py
@@ -1,13 +1,56 @@
+import dataclasses
import os
import gradio as gr
from modules import errors, shared
+@dataclasses.dataclass
+class PostprocessedImageSharedInfo:
+ target_width: int = None
+ target_height: int = None
+
+
class PostprocessedImage:
def __init__(self, image):
self.image = image
self.info = {}
+ self.shared = PostprocessedImageSharedInfo()
+ self.extra_images = []
+ self.nametags = []
+ self.disable_processing = False
+ self.caption = None
+
+ def get_suffix(self, used_suffixes=None):
+ used_suffixes = {} if used_suffixes is None else used_suffixes
+ suffix = "-".join(self.nametags)
+ if suffix:
+ suffix = "-" + suffix
+
+ if suffix not in used_suffixes:
+ used_suffixes[suffix] = 1
+ return suffix
+
+ for i in range(1, 100):
+ proposed_suffix = suffix + "-" + str(i)
+
+ if proposed_suffix not in used_suffixes:
+ used_suffixes[proposed_suffix] = 1
+ return proposed_suffix
+
+ return suffix
+
+ def create_copy(self, new_image, *, nametags=None, disable_processing=False):
+ pp = PostprocessedImage(new_image)
+ pp.shared = self.shared
+ pp.nametags = self.nametags.copy()
+ pp.info = self.info.copy()
+ pp.disable_processing = disable_processing
+
+ if nametags is not None:
+ pp.nametags += nametags
+
+ return pp
class ScriptPostprocessing:
@@ -42,10 +85,17 @@ class ScriptPostprocessing:
pass
- def image_changed(self):
- pass
+ def process_firstpass(self, pp: PostprocessedImage, **args):
+ """
+ Called for all scripts before calling process(). Scripts can examine the image here and set fields
+ of the pp object to communicate things to other scripts.
+ args contains a dictionary with all values returned by components from ui()
+ """
+ pass
+ def image_changed(self):
+ pass
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
@@ -118,16 +168,42 @@ class ScriptPostprocessingRunner:
return inputs
def run(self, pp: PostprocessedImage, args):
- for script in self.scripts_in_preferred_order():
- shared.state.job = script.name
+ scripts = []
+ for script in self.scripts_in_preferred_order():
script_args = args[script.args_from:script.args_to]
process_args = {}
for (name, _component), value in zip(script.controls.items(), script_args):
process_args[name] = value
- script.process(pp, **process_args)
+ scripts.append((script, process_args))
+
+ for script, process_args in scripts:
+ script.process_firstpass(pp, **process_args)
+
+ all_images = [pp]
+
+ for script, process_args in scripts:
+ if shared.state.skipped:
+ break
+
+ shared.state.job = script.name
+
+ for single_image in all_images.copy():
+
+ if not single_image.disable_processing:
+ script.process(single_image, **process_args)
+
+ for extra_image in single_image.extra_images:
+ if not isinstance(extra_image, PostprocessedImage):
+ extra_image = single_image.create_copy(extra_image)
+
+ all_images.append(extra_image)
+
+ single_image.extra_images.clear()
+
+ pp.extra_images = all_images[1:]
def create_args_for_run(self, scripts_args):
if not self.ui_created:
diff --git a/modules/shared_options.py b/modules/shared_options.py
index d8a27180..859dee40 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -357,6 +357,7 @@ options_templates.update(options_section(('postprocessing', "Postprocessing", "p
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+ 'postprocessing_existing_caption_action': OptionInfo("Ignore", "Action for existing captions", gr.Radio, {"choices": ["Ignore", "Keep", "Prepend", "Append"]}).info("when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both"),
}))
options_templates.update(options_section((None, "Hidden options"), {
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
deleted file mode 100644
index 789fa083..00000000
--- a/modules/textual_inversion/preprocess.py
+++ /dev/null
@@ -1,232 +0,0 @@
-import os
-from PIL import Image, ImageOps
-import math
-import tqdm
-
-from modules import shared, images, deepbooru
-from modules.textual_inversion import autocrop
-
-
-def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.15, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
- try:
- if process_caption:
- shared.interrogator.load()
-
- if process_caption_deepbooru:
- deepbooru.model.start()
-
- preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
-
- finally:
-
- if process_caption:
- shared.interrogator.send_blip_to_ram()
-
- if process_caption_deepbooru:
- deepbooru.model.stop()
-
-
-def listfiles(dirname):
- return os.listdir(dirname)
-
-
-class PreprocessParams:
- src = None
- dstdir = None
- subindex = 0
- flip = False
- process_caption = False
- process_caption_deepbooru = False
- preprocess_txt_action = None
-
-
-def save_pic_with_caption(image, index, params: PreprocessParams, existing_caption=None):
- caption = ""
-
- if params.process_caption:
- caption += shared.interrogator.generate_caption(image)
-
- if params.process_caption_deepbooru:
- if caption:
- caption += ", "
- caption += deepbooru.model.tag_multi(image)
-
- filename_part = params.src
- filename_part = os.path.splitext(filename_part)[0]
- filename_part = os.path.basename(filename_part)
-
- basename = f"{index:05}-{params.subindex}-{filename_part}"
- image.save(os.path.join(params.dstdir, f"{basename}.png"))
-
- if params.preprocess_txt_action == 'prepend' and existing_caption:
- caption = f"{existing_caption} {caption}"
- elif params.preprocess_txt_action == 'append' and existing_caption:
- caption = f"{caption} {existing_caption}"
- elif params.preprocess_txt_action == 'copy' and existing_caption:
- caption = existing_caption
-
- caption = caption.strip()
-
- if caption:
- with open(os.path.join(params.dstdir, f"{basename}.txt"), "w", encoding="utf8") as file:
- file.write(caption)
-
- params.subindex += 1
-
-
-def save_pic(image, index, params, existing_caption=None):
- save_pic_with_caption(image, index, params, existing_caption=existing_caption)
-
- if params.flip:
- save_pic_with_caption(ImageOps.mirror(image), index, params, existing_caption=existing_caption)
-
-
-def split_pic(image, inverse_xy, width, height, overlap_ratio):
- if inverse_xy:
- from_w, from_h = image.height, image.width
- to_w, to_h = height, width
- else:
- from_w, from_h = image.width, image.height
- to_w, to_h = width, height
- h = from_h * to_w // from_w
- if inverse_xy:
- image = image.resize((h, to_w))
- else:
- image = image.resize((to_w, h))
-
- split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
- y_step = (h - to_h) / (split_count - 1)
- for i in range(split_count):
- y = int(y_step * i)
- if inverse_xy:
- splitted = image.crop((y, 0, y + to_h, to_w))
- else:
- splitted = image.crop((0, y, to_w, y + to_h))
- yield splitted
-
-# not using torchvision.transforms.CenterCrop because it doesn't allow float regions
-def center_crop(image: Image, w: int, h: int):
- iw, ih = image.size
- if ih / h < iw / w:
- sw = w * ih / h
- box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
- else:
- sh = h * iw / w
- box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
- return image.resize((w, h), Image.Resampling.LANCZOS, box)
-
-
-def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
- iw, ih = image.size
- err = lambda w, h: 1-(lambda x: x if x < 1 else 1/x)(iw/ih/(w/h))
- wh = max(((w, h) for w in range(mindim, maxdim+1, 64) for h in range(mindim, maxdim+1, 64)
- if minarea <= w * h <= maxarea and err(w, h) <= threshold),
- key= lambda wh: (wh[0]*wh[1], -err(*wh))[::1 if objective=='Maximize area' else -1],
- default=None
- )
- return wh and center_crop(image, *wh)
-
-
-def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
- width = process_width
- height = process_height
- src = os.path.abspath(process_src)
- dst = os.path.abspath(process_dst)
- split_threshold = max(0.0, min(1.0, split_threshold))
- overlap_ratio = max(0.0, min(0.9, overlap_ratio))
-
- assert src != dst, 'same directory specified as source and destination'
-
- os.makedirs(dst, exist_ok=True)
-
- files = listfiles(src)
-
- shared.state.job = "preprocess"
- shared.state.textinfo = "Preprocessing..."
- shared.state.job_count = len(files)
-
- params = PreprocessParams()
- params.dstdir = dst
- params.flip = process_flip
- params.process_caption = process_caption
- params.process_caption_deepbooru = process_caption_deepbooru
- params.preprocess_txt_action = preprocess_txt_action
-
- pbar = tqdm.tqdm(files)
- for index, imagefile in enumerate(pbar):
- params.subindex = 0
- filename = os.path.join(src, imagefile)
- try:
- img = Image.open(filename)
- img = ImageOps.exif_transpose(img)
- img = img.convert("RGB")
- except Exception:
- continue
-
- description = f"Preprocessing [Image {index}/{len(files)}]"
- pbar.set_description(description)
- shared.state.textinfo = description
-
- params.src = filename
-
- existing_caption = None
- existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt"
- if os.path.exists(existing_caption_filename):
- with open(existing_caption_filename, 'r', encoding="utf8") as file:
- existing_caption = file.read()
-
- if shared.state.interrupted:
- break
-
- if img.height > img.width:
- ratio = (img.width * height) / (img.height * width)
- inverse_xy = False
- else:
- ratio = (img.height * width) / (img.width * height)
- inverse_xy = True
-
- process_default_resize = True
-
- if process_split and ratio < 1.0 and ratio <= split_threshold:
- for splitted in split_pic(img, inverse_xy, width, height, overlap_ratio):
- save_pic(splitted, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_focal_crop and img.height != img.width:
-
- dnn_model_path = None
- try:
- dnn_model_path = autocrop.download_and_cache_models()
- except Exception as e:
- print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
-
- autocrop_settings = autocrop.Settings(
- crop_width = width,
- crop_height = height,
- face_points_weight = process_focal_crop_face_weight,
- entropy_points_weight = process_focal_crop_entropy_weight,
- corner_points_weight = process_focal_crop_edges_weight,
- annotate_image = process_focal_crop_debug,
- dnn_model_path = dnn_model_path,
- )
- for focal in autocrop.crop_image(img, autocrop_settings):
- save_pic(focal, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_multicrop:
- cropped = multicrop_pic(img, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
- if cropped is not None:
- save_pic(cropped, index, params, existing_caption=existing_caption)
- else:
- print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)")
- process_default_resize = False
-
- if process_keep_original_size:
- save_pic(img, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_default_resize:
- img = images.resize_image(1, img, width, height)
- save_pic(img, index, params, existing_caption=existing_caption)
-
- shared.state.nextjob()
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
index 35c4feef..f149ad1f 100644
--- a/modules/textual_inversion/ui.py
+++ b/modules/textual_inversion/ui.py
@@ -3,7 +3,6 @@ import html
import gradio as gr
import modules.textual_inversion.textual_inversion
-import modules.textual_inversion.preprocess
from modules import sd_hijack, shared
@@ -15,12 +14,6 @@ def create_embedding(name, initialization_text, nvpt, overwrite_old):
return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", ""
-def preprocess(*args):
- modules.textual_inversion.preprocess.preprocess(*args)
-
- return f"Preprocessing {'interrupted' if shared.state.interrupted else 'finished'}.", ""
-
-
def train_embedding(*args):
assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
diff --git a/modules/ui.py b/modules/ui.py
index 08e0ad77..d80486dd 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -912,71 +912,6 @@ def create_ui():
with gr.Column():
create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork")
- with gr.Tab(label="Preprocess images", id="preprocess_images"):
- process_src = gr.Textbox(label='Source directory', elem_id="train_process_src")
- process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst")
- process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width")
- process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height")
- preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action")
-
- with gr.Row():
- process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size")
- process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
- process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
- process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
- process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop")
- process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
- process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
-
- with gr.Row(visible=False) as process_split_extra_row:
- process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold")
- process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio")
-
- with gr.Row(visible=False) as process_focal_crop_row:
- process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight")
- process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
- process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
- process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
-
- with gr.Column(visible=False) as process_multicrop_col:
- gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
- with gr.Row():
- process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim")
- process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim")
- with gr.Row():
- process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea")
- process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea")
- with gr.Row():
- process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
- process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
-
- with gr.Row():
- with gr.Column(scale=3):
- gr.HTML(value="")
-
- with gr.Column():
- with gr.Row():
- interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing")
- run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess")
-
- process_split.change(
- fn=lambda show: gr_show(show),
- inputs=[process_split],
- outputs=[process_split_extra_row],
- )
-
- process_focal_crop.change(
- fn=lambda show: gr_show(show),
- inputs=[process_focal_crop],
- outputs=[process_focal_crop_row],
- )
-
- process_multicrop.change(
- fn=lambda show: gr_show(show),
- inputs=[process_multicrop],
- outputs=[process_multicrop_col],
- )
-
def get_textual_inversion_template_names():
return sorted(textual_inversion.textual_inversion_templates)
@@ -1077,42 +1012,6 @@ def create_ui():
]
)
- run_preprocess.click(
- fn=wrap_gradio_gpu_call(textual_inversion_ui.preprocess, extra_outputs=[gr.update()]),
- _js="start_training_textual_inversion",
- inputs=[
- dummy_component,
- process_src,
- process_dst,
- process_width,
- process_height,
- preprocess_txt_action,
- process_keep_original_size,
- process_flip,
- process_split,
- process_caption,
- process_caption_deepbooru,
- process_split_threshold,
- process_overlap_ratio,
- process_focal_crop,
- process_focal_crop_face_weight,
- process_focal_crop_entropy_weight,
- process_focal_crop_edges_weight,
- process_focal_crop_debug,
- process_multicrop,
- process_multicrop_mindim,
- process_multicrop_maxdim,
- process_multicrop_minarea,
- process_multicrop_maxarea,
- process_multicrop_objective,
- process_multicrop_threshold,
- ],
- outputs=[
- ti_output,
- ti_outcome,
- ],
- )
-
train_embedding.click(
fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
@@ -1186,12 +1085,6 @@ def create_ui():
outputs=[],
)
- interrupt_preprocessing.click(
- fn=lambda: shared.state.interrupt(),
- inputs=[],
- outputs=[],
- )
-
loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file)
settings = ui_settings.UiSettings()
diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py
index 802e1ce7..fbad0800 100644
--- a/modules/ui_postprocessing.py
+++ b/modules/ui_postprocessing.py
@@ -1,9 +1,10 @@
import gradio as gr
-from modules import scripts, shared, ui_common, postprocessing, call_queue
+from modules import scripts, shared, ui_common, postprocessing, call_queue, ui_toprow
import modules.generation_parameters_copypaste as parameters_copypaste
def create_ui():
+ dummy_component = gr.Label(visible=False)
tab_index = gr.State(value=0)
with gr.Row(equal_height=False, variant='compact'):
@@ -20,11 +21,13 @@ def create_ui():
extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir")
show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results")
- submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
-
script_inputs = scripts.scripts_postproc.setup_ui()
with gr.Column():
+ toprow = ui_toprow.Toprow(is_compact=True, is_img2img=False, id_part="extras")
+ toprow.create_inline_toprow_image()
+ submit = toprow.submit
+
result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples)
tab_single.select(fn=lambda: 0, inputs=[], outputs=[tab_index])
@@ -33,7 +36,9 @@ def create_ui():
submit.click(
fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing, extra_outputs=[None, '']),
+ _js="submit_extras",
inputs=[
+ dummy_component,
tab_index,
extras_image,
image_batch,
@@ -45,8 +50,9 @@ def create_ui():
outputs=[
result_images,
html_info_x,
- html_info,
- ]
+ html_log,
+ ],
+ show_progress=False,
)
parameters_copypaste.add_paste_fields("extras", extras_image, None)
diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py
index 985b5a2d..88838f97 100644
--- a/modules/ui_toprow.py
+++ b/modules/ui_toprow.py
@@ -34,8 +34,10 @@ class Toprow:
submit_box = None
- def __init__(self, is_img2img, is_compact=False):
- id_part = "img2img" if is_img2img else "txt2img"
+ def __init__(self, is_img2img, is_compact=False, id_part=None):
+ if id_part is None:
+ id_part = "img2img" if is_img2img else "txt2img"
+
self.id_part = id_part
self.is_img2img = is_img2img
self.is_compact = is_compact
diff --git a/scripts/postprocessing_caption.py b/scripts/postprocessing_caption.py
new file mode 100644
index 00000000..243e3ad9
--- /dev/null
+++ b/scripts/postprocessing_caption.py
@@ -0,0 +1,30 @@
+from modules import scripts_postprocessing, ui_components, deepbooru, shared
+import gradio as gr
+
+
+class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing):
+ name = "Caption"
+ order = 4000
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Caption") as enable:
+ option = gr.CheckboxGroup(value=["Deepbooru"], choices=["Deepbooru", "BLIP"], show_label=False)
+
+ return {
+ "enable": enable,
+ "option": option,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option):
+ if not enable:
+ return
+
+ captions = [pp.caption]
+
+ if "Deepbooru" in option:
+ captions.append(deepbooru.model.tag(pp.image))
+
+ if "BLIP" in option:
+ captions.append(shared.interrogator.generate_caption(pp.image))
+
+ pp.caption = ", ".join([x for x in captions if x])
diff --git a/scripts/postprocessing_codeformer.py b/scripts/postprocessing_codeformer.py
index a7d80d40..e1e156dd 100644
--- a/scripts/postprocessing_codeformer.py
+++ b/scripts/postprocessing_codeformer.py
@@ -1,28 +1,28 @@
from PIL import Image
import numpy as np
-from modules import scripts_postprocessing, codeformer_model
+from modules import scripts_postprocessing, codeformer_model, ui_components
import gradio as gr
-from modules.ui_components import FormRow
-
class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing):
name = "CodeFormer"
order = 3000
def ui(self):
- with FormRow():
- codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, elem_id="extras_codeformer_visibility")
- codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight")
+ with ui_components.InputAccordion(False, label="CodeFormer") as enable:
+ with gr.Row():
+ codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_codeformer_visibility")
+ codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight")
return {
+ "enable": enable,
"codeformer_visibility": codeformer_visibility,
"codeformer_weight": codeformer_weight,
}
- def process(self, pp: scripts_postprocessing.PostprocessedImage, codeformer_visibility, codeformer_weight):
- if codeformer_visibility == 0:
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, codeformer_visibility, codeformer_weight):
+ if codeformer_visibility == 0 or not enable:
return
restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight)
diff --git a/scripts/postprocessing_create_flipped_copies.py b/scripts/postprocessing_create_flipped_copies.py
new file mode 100644
index 00000000..3425571d
--- /dev/null
+++ b/scripts/postprocessing_create_flipped_copies.py
@@ -0,0 +1,32 @@
+from PIL import ImageOps, Image
+
+from modules import scripts_postprocessing, ui_components
+import gradio as gr
+
+
+class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.ScriptPostprocessing):
+ name = "Create flipped copies"
+ order = 4000
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Create flipped copies") as enable:
+ with gr.Row():
+ option = gr.CheckboxGroup(value=["Horizontal"], choices=["Horizontal", "Vertical", "Both"], show_label=False)
+
+ return {
+ "enable": enable,
+ "option": option,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option):
+ if not enable:
+ return
+
+ if "Horizontal" in option:
+ pp.extra_images.append(ImageOps.mirror(pp.image))
+
+ if "Vertical" in option:
+ pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM))
+
+ if "Both" in option:
+ pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM).transpose(Image.Transpose.FLIP_LEFT_RIGHT))
diff --git a/scripts/postprocessing_focal_crop.py b/scripts/postprocessing_focal_crop.py
new file mode 100644
index 00000000..d3baf298
--- /dev/null
+++ b/scripts/postprocessing_focal_crop.py
@@ -0,0 +1,54 @@
+
+from modules import scripts_postprocessing, ui_components, errors
+import gradio as gr
+
+from modules.textual_inversion import autocrop
+
+
+class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing):
+ name = "Auto focal point crop"
+ order = 4000
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Auto focal point crop") as enable:
+ face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight")
+ entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight")
+ edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight")
+ debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
+
+ return {
+ "enable": enable,
+ "face_weight": face_weight,
+ "entropy_weight": entropy_weight,
+ "edges_weight": edges_weight,
+ "debug": debug,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, face_weight, entropy_weight, edges_weight, debug):
+ if not enable:
+ return
+
+ if not pp.shared.target_width or not pp.shared.target_height:
+ return
+
+ dnn_model_path = None
+ try:
+ dnn_model_path = autocrop.download_and_cache_models()
+ except Exception:
+ errors.report("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", exc_info=True)
+
+ autocrop_settings = autocrop.Settings(
+ crop_width=pp.shared.target_width,
+ crop_height=pp.shared.target_height,
+ face_points_weight=face_weight,
+ entropy_points_weight=entropy_weight,
+ corner_points_weight=edges_weight,
+ annotate_image=debug,
+ dnn_model_path=dnn_model_path,
+ )
+
+ result, *others = autocrop.crop_image(pp.image, autocrop_settings)
+
+ pp.image = result
+ pp.extra_images = [pp.create_copy(x, nametags=["focal-crop-debug"], disable_processing=True) for x in others]
+
diff --git a/scripts/postprocessing_gfpgan.py b/scripts/postprocessing_gfpgan.py
index d854f3f7..6e756605 100644
--- a/scripts/postprocessing_gfpgan.py
+++ b/scripts/postprocessing_gfpgan.py
@@ -1,26 +1,25 @@
from PIL import Image
import numpy as np
-from modules import scripts_postprocessing, gfpgan_model
+from modules import scripts_postprocessing, gfpgan_model, ui_components
import gradio as gr
-from modules.ui_components import FormRow
-
class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
name = "GFPGAN"
order = 2000
def ui(self):
- with FormRow():
- gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility")
+ with ui_components.InputAccordion(False, label="GFPGAN") as enable:
+ gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_gfpgan_visibility")
return {
+ "enable": enable,
"gfpgan_visibility": gfpgan_visibility,
}
- def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility):
- if gfpgan_visibility == 0:
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, gfpgan_visibility):
+ if gfpgan_visibility == 0 or not enable:
return
restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8))
diff --git a/scripts/postprocessing_split_oversized.py b/scripts/postprocessing_split_oversized.py
new file mode 100644
index 00000000..c4a03160
--- /dev/null
+++ b/scripts/postprocessing_split_oversized.py
@@ -0,0 +1,71 @@
+import math
+
+from modules import scripts_postprocessing, ui_components
+import gradio as gr
+
+
+def split_pic(image, inverse_xy, width, height, overlap_ratio):
+ if inverse_xy:
+ from_w, from_h = image.height, image.width
+ to_w, to_h = height, width
+ else:
+ from_w, from_h = image.width, image.height
+ to_w, to_h = width, height
+ h = from_h * to_w // from_w
+ if inverse_xy:
+ image = image.resize((h, to_w))
+ else:
+ image = image.resize((to_w, h))
+
+ split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
+ y_step = (h - to_h) / (split_count - 1)
+ for i in range(split_count):
+ y = int(y_step * i)
+ if inverse_xy:
+ splitted = image.crop((y, 0, y + to_h, to_w))
+ else:
+ splitted = image.crop((0, y, to_w, y + to_h))
+ yield splitted
+
+
+class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostprocessing):
+ name = "Split oversized images"
+ order = 4000
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Split oversized images") as enable:
+ with gr.Row():
+ split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold")
+ overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio")
+
+ return {
+ "enable": enable,
+ "split_threshold": split_threshold,
+ "overlap_ratio": overlap_ratio,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, split_threshold, overlap_ratio):
+ if not enable:
+ return
+
+ width = pp.shared.target_width
+ height = pp.shared.target_height
+
+ if not width or not height:
+ return
+
+ if pp.image.height > pp.image.width:
+ ratio = (pp.image.width * height) / (pp.image.height * width)
+ inverse_xy = False
+ else:
+ ratio = (pp.image.height * width) / (pp.image.width * height)
+ inverse_xy = True
+
+ if ratio >= 1.0 and ratio > split_threshold:
+ return
+
+ result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio)
+
+ pp.image = result
+ pp.extra_images = [pp.create_copy(x) for x in others]
+
diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py
index eb42a29e..ed709688 100644
--- a/scripts/postprocessing_upscale.py
+++ b/scripts/postprocessing_upscale.py
@@ -81,6 +81,14 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
return image
+ def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0):
+ if upscale_mode == 1:
+ pp.shared.target_width = upscale_to_width
+ pp.shared.target_height = upscale_to_height
+ else:
+ pp.shared.target_width = int(pp.image.width * upscale_by)
+ pp.shared.target_height = int(pp.image.height * upscale_by)
+
def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0):
if upscaler_1_name == "None":
upscaler_1_name = None
@@ -126,6 +134,10 @@ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
"upscaler_name": upscaler_name,
}
+ def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
+ pp.shared.target_width = int(pp.image.width * upscale_by)
+ pp.shared.target_height = int(pp.image.height * upscale_by)
+
def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
if upscaler_name is None or upscaler_name == "None":
return
diff --git a/scripts/processing_autosized_crop.py b/scripts/processing_autosized_crop.py
new file mode 100644
index 00000000..c0980226
--- /dev/null
+++ b/scripts/processing_autosized_crop.py
@@ -0,0 +1,64 @@
+from PIL import Image
+
+from modules import scripts_postprocessing, ui_components
+import gradio as gr
+
+
+def center_crop(image: Image, w: int, h: int):
+ iw, ih = image.size
+ if ih / h < iw / w:
+ sw = w * ih / h
+ box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
+ else:
+ sh = h * iw / w
+ box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
+ return image.resize((w, h), Image.Resampling.LANCZOS, box)
+
+
+def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
+ iw, ih = image.size
+ err = lambda w, h: 1 - (lambda x: x if x < 1 else 1 / x)(iw / ih / (w / h))
+ wh = max(((w, h) for w in range(mindim, maxdim + 1, 64) for h in range(mindim, maxdim + 1, 64)
+ if minarea <= w * h <= maxarea and err(w, h) <= threshold),
+ key=lambda wh: (wh[0] * wh[1], -err(*wh))[::1 if objective == 'Maximize area' else -1],
+ default=None
+ )
+ return wh and center_crop(image, *wh)
+
+
+class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocessing):
+ name = "Auto-sized crop"
+ order = 4000
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Auto-sized crop") as enable:
+ gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
+ with gr.Row():
+ mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim")
+ maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim")
+ with gr.Row():
+ minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea")
+ maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea")
+ with gr.Row():
+ objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective")
+ threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold")
+
+ return {
+ "enable": enable,
+ "mindim": mindim,
+ "maxdim": maxdim,
+ "minarea": minarea,
+ "maxarea": maxarea,
+ "objective": objective,
+ "threshold": threshold,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, mindim, maxdim, minarea, maxarea, objective, threshold):
+ if not enable:
+ return
+
+ cropped = multicrop_pic(pp.image, mindim, maxdim, minarea, maxarea, objective, threshold)
+ if cropped is not None:
+ pp.image = cropped
+ else:
+ print(f"skipped {pp.image.width}x{pp.image.height} image (can't find suitable size within error threshold)")
--
cgit v1.2.3