aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.eslintrc.js9
-rw-r--r--javascript/aspectRatioOverlay.js2
-rw-r--r--javascript/contextMenus.js4
-rw-r--r--javascript/dragdrop.js53
-rw-r--r--javascript/generationParams.js2
-rw-r--r--javascript/hints.js81
-rw-r--r--javascript/imageMaskFix.js2
-rw-r--r--javascript/imageParams.js18
-rw-r--r--javascript/imageviewer.js2
-rw-r--r--javascript/notification.js2
-rw-r--r--javascript/token-counters.js83
-rw-r--r--javascript/ui.js73
-rw-r--r--modules/api/api.py2
-rw-r--r--modules/devices.py18
-rw-r--r--modules/errors.py8
-rw-r--r--modules/generation_parameters_copypaste.py16
-rw-r--r--modules/images.py31
-rw-r--r--modules/img2img.py3
-rw-r--r--modules/processing.py4
-rw-r--r--modules/script_callbacks.py20
-rw-r--r--modules/sd_hijack.py20
-rw-r--r--modules/sd_models.py24
-rw-r--r--modules/sd_samplers_kdiffusion.py50
-rw-r--r--modules/sd_unet.py92
-rw-r--r--modules/shared.py8
-rw-r--r--modules/shared_items.py11
-rw-r--r--modules/ui.py6
-rw-r--r--modules/ui_common.py9
-rw-r--r--requirements.txt2
-rw-r--r--requirements_versions.txt2
-rw-r--r--script.js63
-rw-r--r--scripts/xyz_grid.py6
-rw-r--r--webui.py23
-rwxr-xr-xwebui.sh9
34 files changed, 563 insertions, 195 deletions
diff --git a/.eslintrc.js b/.eslintrc.js
index 944cc869..f33aca09 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -50,13 +50,14 @@ module.exports = {
globals: {
//script.js
gradioApp: "readonly",
+ executeCallbacks: "readonly",
+ onAfterUiUpdate: "readonly",
+ onOptionsChanged: "readonly",
onUiLoaded: "readonly",
onUiUpdate: "readonly",
- onOptionsChanged: "readonly",
uiCurrentTab: "writable",
- uiElementIsVisible: "readonly",
uiElementInSight: "readonly",
- executeCallbacks: "readonly",
+ uiElementIsVisible: "readonly",
//ui.js
opts: "writable",
all_gallery_buttons: "readonly",
@@ -84,5 +85,7 @@ module.exports = {
// imageviewer.js
modalPrevImage: "readonly",
modalNextImage: "readonly",
+ // token-counters.js
+ setupTokenCounters: "readonly",
}
};
diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js
index 1c08a1a9..2cf2d571 100644
--- a/javascript/aspectRatioOverlay.js
+++ b/javascript/aspectRatioOverlay.js
@@ -81,7 +81,7 @@ function dimensionChange(e, is_width, is_height) {
}
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
var arPreviewRect = gradioApp().querySelector('#imageARPreview');
if (arPreviewRect) {
arPreviewRect.style.display = 'none';
diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js
index f14af1d4..d60a10c4 100644
--- a/javascript/contextMenus.js
+++ b/javascript/contextMenus.js
@@ -167,6 +167,4 @@ var addContextMenuEventListener = initResponse[2];
})();
//End example Context Menu Items
-onUiUpdate(function() {
- addContextMenuEventListener();
-});
+onAfterUiUpdate(addContextMenuEventListener);
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js
index 77a24a07..5803daea 100644
--- a/javascript/dragdrop.js
+++ b/javascript/dragdrop.js
@@ -48,12 +48,27 @@ function dropReplaceImage(imgWrap, files) {
}
}
+function eventHasFiles(e) {
+ if (!e.dataTransfer || !e.dataTransfer.files) return false;
+ if (e.dataTransfer.files.length > 0) return true;
+ if (e.dataTransfer.items.length > 0 && e.dataTransfer.items[0].kind == "file") return true;
+
+ return false;
+}
+
+function dragDropTargetIsPrompt(target) {
+ if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true;
+ if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true;
+ return false;
+}
+
window.document.addEventListener('dragover', e => {
const target = e.composedPath()[0];
- const imgWrap = target.closest('[data-testid="image"]');
- if (!imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1) {
- return;
- }
+ if (!eventHasFiles(e)) return;
+
+ var targetImage = target.closest('[data-testid="image"]');
+ if (!dragDropTargetIsPrompt(target) && !targetImage) return;
+
e.stopPropagation();
e.preventDefault();
e.dataTransfer.dropEffect = 'copy';
@@ -61,17 +76,31 @@ window.document.addEventListener('dragover', e => {
window.document.addEventListener('drop', e => {
const target = e.composedPath()[0];
- if (target.placeholder.indexOf("Prompt") == -1) {
- return;
+ if (!eventHasFiles(e)) return;
+
+ if (dragDropTargetIsPrompt(target)) {
+ e.stopPropagation();
+ e.preventDefault();
+
+ let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
+
+ const imgParent = gradioApp().getElementById(prompt_target);
+ const files = e.dataTransfer.files;
+ const fileInput = imgParent.querySelector('input[type="file"]');
+ if (fileInput) {
+ fileInput.files = files;
+ fileInput.dispatchEvent(new Event('change'));
+ }
}
- const imgWrap = target.closest('[data-testid="image"]');
- if (!imgWrap) {
+
+ var targetImage = target.closest('[data-testid="image"]');
+ if (targetImage) {
+ e.stopPropagation();
+ e.preventDefault();
+ const files = e.dataTransfer.files;
+ dropReplaceImage(targetImage, files);
return;
}
- e.stopPropagation();
- e.preventDefault();
- const files = e.dataTransfer.files;
- dropReplaceImage(imgWrap, files);
});
window.addEventListener('paste', e => {
diff --git a/javascript/generationParams.js b/javascript/generationParams.js
index a877f8a5..7c0fd221 100644
--- a/javascript/generationParams.js
+++ b/javascript/generationParams.js
@@ -1,7 +1,7 @@
// attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes
let txt2img_gallery, img2img_gallery, modal = undefined;
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
if (!txt2img_gallery) {
txt2img_gallery = attachGalleryListeners("txt2img");
}
diff --git a/javascript/hints.js b/javascript/hints.js
index 46f342cb..05ae5f22 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -116,17 +116,25 @@ var titles = {
"Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction."
};
-function updateTooltipForSpan(span) {
- if (span.title) return; // already has a title
+function updateTooltip(element) {
+ if (element.title) return; // already has a title
- let tooltip = localization[titles[span.textContent]] || titles[span.textContent];
+ let text = element.textContent;
+ let tooltip = localization[titles[text]] || titles[text];
if (!tooltip) {
- tooltip = localization[titles[span.value]] || titles[span.value];
+ let value = element.value;
+ if (value) tooltip = localization[titles[value]] || titles[value];
}
if (!tooltip) {
- for (const c of span.classList) {
+ // Gradio dropdown options have `data-value`.
+ let dataValue = element.dataset.value;
+ if (dataValue) tooltip = localization[titles[dataValue]] || titles[dataValue];
+ }
+
+ if (!tooltip) {
+ for (const c of element.classList) {
if (c in titles) {
tooltip = localization[titles[c]] || titles[c];
break;
@@ -135,34 +143,53 @@ function updateTooltipForSpan(span) {
}
if (tooltip) {
- span.title = tooltip;
+ element.title = tooltip;
}
}
-function updateTooltipForSelect(select) {
- if (select.onchange != null) return;
+// Nodes to check for adding tooltips.
+const tooltipCheckNodes = new Set();
+// Timer for debouncing tooltip check.
+let tooltipCheckTimer = null;
- select.onchange = function() {
- select.title = localization[titles[select.value]] || titles[select.value] || "";
- };
+function processTooltipCheckNodes() {
+ for (const node of tooltipCheckNodes) {
+ updateTooltip(node);
+ }
+ tooltipCheckNodes.clear();
}
-var observedTooltipElements = {SPAN: 1, BUTTON: 1, SELECT: 1, P: 1};
-
-onUiUpdate(function(m) {
- m.forEach(function(record) {
- record.addedNodes.forEach(function(node) {
- if (observedTooltipElements[node.tagName]) {
- updateTooltipForSpan(node);
- }
- if (node.tagName == "SELECT") {
- updateTooltipForSelect(node);
+onUiUpdate(function(mutationRecords) {
+ for (const record of mutationRecords) {
+ if (record.type === "childList" && record.target.classList.contains("options")) {
+ // This smells like a Gradio dropdown menu having changed,
+ // so let's enqueue an update for the input element that shows the current value.
+ let wrap = record.target.parentNode;
+ let input = wrap?.querySelector("input");
+ if (input) {
+ input.title = ""; // So we'll even have a chance to update it.
+ tooltipCheckNodes.add(input);
}
-
- if (node.querySelectorAll) {
- node.querySelectorAll('span, button, select, p').forEach(updateTooltipForSpan);
- node.querySelectorAll('select').forEach(updateTooltipForSelect);
+ }
+ for (const node of record.addedNodes) {
+ if (node.nodeType === Node.ELEMENT_NODE && !node.classList.contains("hide")) {
+ if (!node.title) {
+ if (
+ node.tagName === "SPAN" ||
+ node.tagName === "BUTTON" ||
+ node.tagName === "P" ||
+ node.tagName === "INPUT" ||
+ (node.tagName === "LI" && node.classList.contains("item")) // Gradio dropdown item
+ ) {
+ tooltipCheckNodes.add(node);
+ }
+ }
+ node.querySelectorAll('span, button, p').forEach(n => tooltipCheckNodes.add(n));
}
- });
- });
+ }
+ }
+ if (tooltipCheckNodes.size) {
+ clearTimeout(tooltipCheckTimer);
+ tooltipCheckTimer = setTimeout(processTooltipCheckNodes, 1000);
+ }
});
diff --git a/javascript/imageMaskFix.js b/javascript/imageMaskFix.js
index 3c9b8a6f..900c56f3 100644
--- a/javascript/imageMaskFix.js
+++ b/javascript/imageMaskFix.js
@@ -39,5 +39,5 @@ function imageMaskResize() {
});
}
-onUiUpdate(imageMaskResize);
+onAfterUiUpdate(imageMaskResize);
window.addEventListener('resize', imageMaskResize);
diff --git a/javascript/imageParams.js b/javascript/imageParams.js
deleted file mode 100644
index 057e2d39..00000000
--- a/javascript/imageParams.js
+++ /dev/null
@@ -1,18 +0,0 @@
-window.onload = (function() {
- window.addEventListener('drop', e => {
- const target = e.composedPath()[0];
- if (target.placeholder.indexOf("Prompt") == -1) return;
-
- let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
-
- e.stopPropagation();
- e.preventDefault();
- const imgParent = gradioApp().getElementById(prompt_target);
- const files = e.dataTransfer.files;
- const fileInput = imgParent.querySelector('input[type="file"]');
- if (fileInput) {
- fileInput.files = files;
- fileInput.dispatchEvent(new Event('change'));
- }
- });
-});
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 78e24eb9..677e95c1 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -170,7 +170,7 @@ function modalTileImageToggle(event) {
event.stopPropagation();
}
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
var fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img');
if (fullImg_preview != null) {
fullImg_preview.forEach(setupImageForLightbox);
diff --git a/javascript/notification.js b/javascript/notification.js
index a68a76f2..76c5715d 100644
--- a/javascript/notification.js
+++ b/javascript/notification.js
@@ -4,7 +4,7 @@ let lastHeadImg = null;
let notificationButton = null;
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
if (notificationButton == null) {
notificationButton = gradioApp().getElementById('request_notifications');
diff --git a/javascript/token-counters.js b/javascript/token-counters.js
new file mode 100644
index 00000000..9d81a723
--- /dev/null
+++ b/javascript/token-counters.js
@@ -0,0 +1,83 @@
+let promptTokenCountDebounceTime = 800;
+let promptTokenCountTimeouts = {};
+var promptTokenCountUpdateFunctions = {};
+
+function update_txt2img_tokens(...args) {
+ // Called from Gradio
+ update_token_counter("txt2img_token_button");
+ if (args.length == 2) {
+ return args[0];
+ }
+ return args;
+}
+
+function update_img2img_tokens(...args) {
+ // Called from Gradio
+ update_token_counter("img2img_token_button");
+ if (args.length == 2) {
+ return args[0];
+ }
+ return args;
+}
+
+function update_token_counter(button_id) {
+ if (opts.disable_token_counters) {
+ return;
+ }
+ if (promptTokenCountTimeouts[button_id]) {
+ clearTimeout(promptTokenCountTimeouts[button_id]);
+ }
+ promptTokenCountTimeouts[button_id] = setTimeout(
+ () => gradioApp().getElementById(button_id)?.click(),
+ promptTokenCountDebounceTime,
+ );
+}
+
+
+function recalculatePromptTokens(name) {
+ promptTokenCountUpdateFunctions[name]?.();
+}
+
+function recalculate_prompts_txt2img() {
+ // Called from Gradio
+ recalculatePromptTokens('txt2img_prompt');
+ recalculatePromptTokens('txt2img_neg_prompt');
+ return Array.from(arguments);
+}
+
+function recalculate_prompts_img2img() {
+ // Called from Gradio
+ recalculatePromptTokens('img2img_prompt');
+ recalculatePromptTokens('img2img_neg_prompt');
+ return Array.from(arguments);
+}
+
+function setupTokenCounting(id, id_counter, id_button) {
+ var prompt = gradioApp().getElementById(id);
+ var counter = gradioApp().getElementById(id_counter);
+ var textarea = gradioApp().querySelector(`#${id} > label > textarea`);
+
+ if (opts.disable_token_counters) {
+ counter.style.display = "none";
+ return;
+ }
+
+ if (counter.parentElement == prompt.parentElement) {
+ return;
+ }
+
+ prompt.parentElement.insertBefore(counter, prompt);
+ prompt.parentElement.style.position = "relative";
+
+ promptTokenCountUpdateFunctions[id] = function() {
+ update_token_counter(id_button);
+ };
+ textarea.addEventListener("input", promptTokenCountUpdateFunctions[id]);
+}
+
+function setupTokenCounters() {
+ setupTokenCounting('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button');
+ setupTokenCounting('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button');
+ setupTokenCounting('img2img_prompt', 'img2img_token_counter', 'img2img_token_button');
+ setupTokenCounting('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button');
+}
diff --git a/javascript/ui.js b/javascript/ui.js
index 648a5290..d70a681b 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -248,29 +248,8 @@ function confirm_clear_prompt(prompt, negative_prompt) {
}
-var promptTokecountUpdateFuncs = {};
-
-function recalculatePromptTokens(name) {
- if (promptTokecountUpdateFuncs[name]) {
- promptTokecountUpdateFuncs[name]();
- }
-}
-
-function recalculate_prompts_txt2img() {
- recalculatePromptTokens('txt2img_prompt');
- recalculatePromptTokens('txt2img_neg_prompt');
- return Array.from(arguments);
-}
-
-function recalculate_prompts_img2img() {
- recalculatePromptTokens('img2img_prompt');
- recalculatePromptTokens('img2img_neg_prompt');
- return Array.from(arguments);
-}
-
-
var opts = {};
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
if (Object.keys(opts).length != 0) return;
var json_elem = gradioApp().getElementById('settings_json');
@@ -302,28 +281,7 @@ onUiUpdate(function() {
json_elem.parentElement.style.display = "none";
- function registerTextarea(id, id_counter, id_button) {
- var prompt = gradioApp().getElementById(id);
- var counter = gradioApp().getElementById(id_counter);
- var textarea = gradioApp().querySelector("#" + id + " > label > textarea");
-
- if (counter.parentElement == prompt.parentElement) {
- return;
- }
-
- prompt.parentElement.insertBefore(counter, prompt);
- prompt.parentElement.style.position = "relative";
-
- promptTokecountUpdateFuncs[id] = function() {
- update_token_counter(id_button);
- };
- textarea.addEventListener("input", promptTokecountUpdateFuncs[id]);
- }
-
- registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button');
- registerTextarea('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button');
- registerTextarea('img2img_prompt', 'img2img_token_counter', 'img2img_token_button');
- registerTextarea('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button');
+ setupTokenCounters();
var show_all_pages = gradioApp().getElementById('settings_show_all_pages');
var settings_tabs = gradioApp().querySelector('#settings div');
@@ -354,33 +312,6 @@ onOptionsChanged(function() {
});
let txt2img_textarea, img2img_textarea = undefined;
-let wait_time = 800;
-let token_timeouts = {};
-
-function update_txt2img_tokens(...args) {
- update_token_counter("txt2img_token_button");
- if (args.length == 2) {
- return args[0];
- }
- return args;
-}
-
-function update_img2img_tokens(...args) {
- update_token_counter(
- "img2img_token_button"
- );
- if (args.length == 2) {
- return args[0];
- }
- return args;
-}
-
-function update_token_counter(button_id) {
- if (token_timeouts[button_id]) {
- clearTimeout(token_timeouts[button_id]);
- }
- token_timeouts[button_id] = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
-}
function restart_reload() {
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
diff --git a/modules/api/api.py b/modules/api/api.py
index eee99bbb..6a456861 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -700,4 +700,4 @@ class Api:
def launch(self, server_name, port):
self.app.include_router(self.router)
- uvicorn.run(self.app, host=server_name, port=port)
+ uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=0)
diff --git a/modules/devices.py b/modules/devices.py
index d8a34a0f..1ed6ffdc 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -1,5 +1,7 @@
import sys
import contextlib
+from functools import lru_cache
+
import torch
from modules import errors
@@ -154,3 +156,19 @@ def test_for_nans(x, where):
message += " Use --disable-nan-check commandline argument to disable this check."
raise NansException(message)
+
+
+@lru_cache
+def first_time_calculation():
+ """
+ just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
+ spends about 2.7 seconds doing that, at least wih NVidia.
+ """
+
+ x = torch.zeros((1, 1)).to(device, dtype)
+ linear = torch.nn.Linear(1, 1).to(device, dtype)
+ linear(x)
+
+ x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
+ conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
+ conv2d(x)
diff --git a/modules/errors.py b/modules/errors.py
index f6b80dbb..da4694f8 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -12,9 +12,13 @@ def print_error_explanation(message):
print('=' * max_len, file=sys.stderr)
-def display(e: Exception, task):
+def display(e: Exception, task, *, full_traceback=False):
print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ te = traceback.TracebackException.from_exception(e)
+ if full_traceback:
+ # include frames leading up to the try-catch block
+ te.stack = traceback.StackSummary(traceback.extract_stack()[:-2] + te.stack)
+ print(*te.format(), sep="", file=sys.stderr)
message = str(e)
if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message:
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index d5f0a49b..81aef502 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -306,6 +306,18 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "RNG" not in res:
res["RNG"] = "GPU"
+ if "Schedule type" not in res:
+ res["Schedule type"] = "Automatic"
+
+ if "Schedule max sigma" not in res:
+ res["Schedule max sigma"] = 0
+
+ if "Schedule min sigma" not in res:
+ res["Schedule min sigma"] = 0
+
+ if "Schedule rho" not in res:
+ res["Schedule rho"] = 0
+
return res
@@ -318,6 +330,10 @@ infotext_to_setting_name_mapping = [
('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'),
+ ('Schedule type', 'k_sched_type'),
+ ('Schedule max sigma', 'sigma_max'),
+ ('Schedule min sigma', 'sigma_min'),
+ ('Schedule rho', 'rho'),
('Noise multiplier', 'initial_noise_multiplier'),
('Eta', 'eta_ancestral'),
('Eta DDIM', 'eta_ddim'),
diff --git a/modules/images.py b/modules/images.py
index 4e8cd993..e21e554c 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -21,6 +21,8 @@ from modules import sd_samplers, shared, script_callbacks, errors
from modules.paths_internal import roboto_ttf_file
from modules.shared import opts
+import modules.sd_vae as sd_vae
+
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
@@ -336,8 +338,20 @@ def sanitize_filename_part(text, replace_spaces=True):
class FilenameGenerator:
+ def get_vae_filename(self): #get the name of the VAE file.
+ if sd_vae.loaded_vae_file is None:
+ return "NoneType"
+ file_name = os.path.basename(sd_vae.loaded_vae_file)
+ split_file_name = file_name.split('.')
+ if len(split_file_name) > 1 and split_file_name[0] == '':
+ return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
+ else:
+ return split_file_name[0]
+
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
+ 'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
+ 'seed_last': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.all_seeds[-1],
'steps': lambda self: self.p and self.p.steps,
'cfg': lambda self: self.p and self.p.cfg_scale,
'width': lambda self: self.image.width,
@@ -354,19 +368,23 @@ class FilenameGenerator:
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
'prompt_words': lambda self: self.prompt_words(),
- 'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.batch_index + 1,
- 'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.n_iter == 1 and self.p.batch_size == 1 else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
+ 'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 or self.zip else self.p.batch_index + 1,
+ 'batch_size': lambda self: self.p.batch_size,
+ 'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if (self.p.n_iter == 1 and self.p.batch_size == 1) or self.zip else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt<prompt1|default><prompt2>..]
'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
+ 'vae_filename': lambda self: self.get_vae_filename(),
+
}
default_time_format = '%Y%m%d%H%M%S'
- def __init__(self, p, seed, prompt, image):
+ def __init__(self, p, seed, prompt, image, zip=False):
self.p = p
self.seed = seed
self.prompt = prompt
self.image = image
+ self.zip = zip
def hasprompt(self, *args):
lower = self.prompt.lower()
@@ -665,9 +683,10 @@ def read_info_from_image(image):
items['exif comment'] = exif_comment
geninfo = exif_comment
- for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
- 'loop', 'background', 'timestamp', 'duration']:
- items.pop(field, None)
+ for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
+ 'loop', 'background', 'timestamp', 'duration', 'progressive', 'progression',
+ 'icc_profile', 'chromaticity']:
+ items.pop(field, None)
if items.get("Software", None) == "NovelAI":
try:
diff --git a/modules/img2img.py b/modules/img2img.py
index d704bf90..4c12c2c5 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -92,7 +92,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
elif mode == 2: # inpaint
image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
- mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
+ mask = mask.convert('L').point(lambda x: 255 if x > 128 else 0, mode='1')
+ mask = ImageChops.lighter(alpha_mask, mask).convert('L')
image = image.convert("RGB")
elif mode == 3: # inpaint sketch
image = inpaint_color_sketch
diff --git a/modules/processing.py b/modules/processing.py
index 29a3743f..b75f2515 100644
--- a/modules/processing.py
+++ b/