diff options
35 files changed, 550 insertions, 288 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d2f96e5..1ae81232 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,34 @@ +## Upcoming 1.2.0
+
+### Features:
+ * do not load wait for stable diffusion model to load at startup
+ * add filename patterns: [denoising]
+ * directory hiding for extra networks: dirs starting with . will hide their cards on extra network tabs unless specifically searched for
+ * Lora: for the `<...>` text in prompt, use name of Lora that is in the metdata of the file, if present, instead of filename (both can be used to activate lora)
+ * Lora: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
+ * Lora: Fix some Loras not working (ones that have 3x3 convolution layer)
+ * Lora: add an option to use old method of applying loras (producing same results as with kohya-ss)
+
+### Minor:
+ * --subpath option for gradio for use with reverse proxy
+ * linux/OSX: use existing virtualenv if already active (the VIRTUAL_ENV environment variable)
+ * possible frontend optimization: do not apply localizations if there are none
+ * Add extra `None` option for VAE in XYZ plot
+ * print error to console when batch processing in img2img fails
+ * create HTML for extra network pages only on demand
+ * allow directories starting with . to still list their models for lora, checkpoints, etc
+
+### Extensions:
+ * Tooltip localization support
+
+### Bug Fixes:
+ * re-add /docs endpoint
+ * fix gamepad navigation
+ * make the lightbox fullscreen image function properly
+ * fix squished thumbnails in extras tab
+ * keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed)
+
+
## 1.1.1
### Bug Fixes:
* fix an error that prevents running webui on torch<2.0 without --disable-safe-unpickle
diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py index 45f899fc..ccb249ac 100644 --- a/extensions-builtin/Lora/extra_networks_lora.py +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -1,6 +1,7 @@ from modules import extra_networks, shared
import lora
+
class ExtraNetworkLora(extra_networks.ExtraNetwork):
def __init__(self):
super().__init__('lora')
diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 6f246921..d488b5ae 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -4,7 +4,7 @@ import re import torch
from typing import Union
-from modules import shared, devices, sd_models, errors
+from modules import shared, devices, sd_models, errors, scripts
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
@@ -93,6 +93,7 @@ class LoraOnDisk: self.metadata = m
self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text
+ self.alias = self.metadata.get('ss_output_name', self.name)
class LoraModule:
@@ -165,8 +166,10 @@ def load_lora(name, filename): module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
elif type(sd_module) == torch.nn.MultiheadAttention:
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
- elif type(sd_module) == torch.nn.Conv2d:
+ elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (1, 1):
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
+ elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (3, 3):
+ module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (3, 3), bias=False)
else:
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
continue
@@ -199,11 +202,11 @@ def load_loras(names, multipliers=None): loaded_loras.clear()
- loras_on_disk = [available_loras.get(name, None) for name in names]
+ loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
if any([x is None for x in loras_on_disk]):
list_available_loras()
- loras_on_disk = [available_loras.get(name, None) for name in names]
+ loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
for i, name in enumerate(names):
lora = already_loaded.get(name, None)
@@ -232,6 +235,8 @@ def lora_calc_updown(lora, module, target): if up.shape[2:] == (1, 1) and down.shape[2:] == (1, 1):
updown = (up.squeeze(2).squeeze(2) @ down.squeeze(2).squeeze(2)).unsqueeze(2).unsqueeze(3)
+ elif up.shape[2:] == (3, 3) or down.shape[2:] == (3, 3):
+ updown = torch.nn.functional.conv2d(down.permute(1, 0, 2, 3), up).permute(1, 0, 2, 3)
else:
updown = up @ down
@@ -240,6 +245,19 @@ def lora_calc_updown(lora, module, target): return updown
+def lora_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
+ weights_backup = getattr(self, "lora_weights_backup", None)
+
+ if weights_backup is None:
+ return
+
+ if isinstance(self, torch.nn.MultiheadAttention):
+ self.in_proj_weight.copy_(weights_backup[0])
+ self.out_proj.weight.copy_(weights_backup[1])
+ else:
+ self.weight.copy_(weights_backup)
+
+
def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
"""
Applies the currently selected set of Loras to the weights of torch layer self.
@@ -264,12 +282,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu self.lora_weights_backup = weights_backup
if current_names != wanted_names:
- if weights_backup is not None:
- if isinstance(self, torch.nn.MultiheadAttention):
- self.in_proj_weight.copy_(weights_backup[0])
- self.out_proj.weight.copy_(weights_backup[1])
- else:
- self.weight.copy_(weights_backup)
+ lora_restore_weights_from_backup(self)
for lora in loaded_loras:
module = lora.modules.get(lora_layer_name, None)
@@ -300,12 +313,45 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu setattr(self, "lora_current_names", wanted_names)
+def lora_forward(module, input, original_forward):
+ """
+ Old way of applying Lora by executing operations during layer's forward.
+ Stacking many loras this way results in big performance degradation.
+ """
+
+ if len(loaded_loras) == 0:
+ return original_forward(module, input)
+
+ input = devices.cond_cast_unet(input)
+
+ lora_restore_weights_from_backup(module)
+ lora_reset_cached_weight(module)
+
+ res = original_forward(module, input)
+
+ lora_layer_name = getattr(module, 'lora_layer_name', None)
+ for lora in loaded_loras:
+ module = lora.modules.get(lora_layer_name, None)
+ if module is None:
+ continue
+
+ module.up.to(device=devices.device)
+ module.down.to(device=devices.device)
+
+ res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
+
+ return res
+
+
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
setattr(self, "lora_current_names", ())
setattr(self, "lora_weights_backup", None)
def lora_Linear_forward(self, input):
+ if shared.opts.lora_functional:
+ return lora_forward(self, input, torch.nn.Linear_forward_before_lora)
+
lora_apply_weights(self)
return torch.nn.Linear_forward_before_lora(self, input)
@@ -318,6 +364,9 @@ def lora_Linear_load_state_dict(self, *args, **kwargs): def lora_Conv2d_forward(self, input):
+ if shared.opts.lora_functional:
+ return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora)
+
lora_apply_weights(self)
return torch.nn.Conv2d_forward_before_lora(self, input)
@@ -343,24 +392,60 @@ def lora_MultiheadAttention_load_state_dict(self, *args, **kwargs): def list_available_loras():
available_loras.clear()
+ available_lora_aliases.clear()
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
- candidates = \
- glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.pt'), recursive=True) + \
- glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.safetensors'), recursive=True) + \
- glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.ckpt'), recursive=True)
-
+ candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
for filename in sorted(candidates, key=str.lower):
if os.path.isdir(filename):
continue
name = os.path.splitext(os.path.basename(filename))[0]
+ entry = LoraOnDisk(name, filename)
+
+ available_loras[name] = entry
+
+ available_lora_aliases[name] = entry
+ available_lora_aliases[entry.alias] = entry
+
+
+re_lora_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
+
+
+def infotext_pasted(infotext, params):
+ if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
+ return # if the other extension is active, it will handle those fields, no need to do anything
+
+ added = []
+
+ for k, v in params.items():
+ if not k.startswith("AddNet Model "):
+ continue
+
+ num = k[13:]
+
+ if params.get("AddNet Module " + num) != "LoRA":
+ continue
+
+ name = params.get("AddNet Model " + num)
+ if name is None:
+ continue
+
+ m = re_lora_name.match(name)
+ if m:
+ name = m.group(1)
+
+ multiplier = params.get("AddNet Weight A " + num, "1.0")
+
+ added.append(f"<lora:{name}:{multiplier}>")
- available_loras[name] = LoraOnDisk(name, filename)
+ if added:
+ params["Prompt"] += "\n" + "".join(added)
available_loras = {}
+available_lora_aliases = {}
loaded_loras = []
list_available_loras()
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 3fc38ab9..a67b8a69 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -49,8 +49,14 @@ torch.nn.MultiheadAttention._load_from_state_dict = lora.lora_MultiheadAttention script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
script_callbacks.on_script_unloaded(unload)
script_callbacks.on_before_ui(before_ui)
+script_callbacks.on_infotext_pasted(lora.infotext_pasted)
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
"sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras),
}))
+
+
+shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
+ "lora_functional": shared.OptionInfo(False, "Lora: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
+}))
diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index 68b11332..a0edbc1e 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -21,7 +21,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): "preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(lora_on_disk.filename),
- "prompt": json.dumps(f"<lora:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
+ "prompt": json.dumps(f"<lora:{lora_on_disk.alias}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
"local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
}
diff --git a/html/extra-networks-card.html b/html/extra-networks-card.html index ef4b613a..1d546217 100644 --- a/html/extra-networks-card.html +++ b/html/extra-networks-card.html @@ -6,7 +6,7 @@ <ul> <a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a> </ul> - <span style="display:none" class='search_term'>{search_term}</span> + <span style="display:none" class='search_term{serach_only}'>{search_term}</span> </div> <span class='name'>{name}</span> <span class='description'>{description}</span> diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js index a8278cca..5160081d 100644 --- a/javascript/aspectRatioOverlay.js +++ b/javascript/aspectRatioOverlay.js @@ -45,29 +45,24 @@ function dimensionChange(e, is_width, is_height){ var viewportOffset = targetElement.getBoundingClientRect();
- viewportscale = Math.min( targetElement.clientWidth/targetElement.naturalWidth, targetElement.clientHeight/targetElement.naturalHeight )
+ var viewportscale = Math.min( targetElement.clientWidth/targetElement.naturalWidth, targetElement.clientHeight/targetElement.naturalHeight )
- scaledx = targetElement.naturalWidth*viewportscale
- scaledy = targetElement.naturalHeight*viewportscale
+ var scaledx = targetElement.naturalWidth*viewportscale
+ var scaledy = targetElement.naturalHeight*viewportscale
- cleintRectTop = (viewportOffset.top+window.scrollY)
- cleintRectLeft = (viewportOffset.left+window.scrollX)
- cleintRectCentreY = cleintRectTop + (targetElement.clientHeight/2)
- cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth/2)
+ var cleintRectTop = (viewportOffset.top+window.scrollY)
+ var cleintRectLeft = (viewportOffset.left+window.scrollX)
+ var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight/2)
+ var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth/2)
- viewRectTop = cleintRectCentreY-(scaledy/2)
- viewRectLeft = cleintRectCentreX-(scaledx/2)
- arRectWidth = scaledx
- arRectHeight = scaledy
+ var arscale = Math.min( scaledx/currentWidth, scaledy/currentHeight )
+ var arscaledx = currentWidth*arscale
+ var arscaledy = currentHeight*arscale
- arscale = Math.min( arRectWidth/currentWidth, arRectHeight/currentHeight )
- arscaledx = currentWidth*arscale
- arscaledy = currentHeight*arscale
-
- arRectTop = cleintRectCentreY-(arscaledy/2)
- arRectLeft = cleintRectCentreX-(arscaledx/2)
- arRectWidth = arscaledx
- arRectHeight = arscaledy
+ var arRectTop = cleintRectCentreY-(arscaledy/2)
+ var arRectLeft = cleintRectCentreX-(arscaledx/2)
+ var arRectWidth = arscaledx
+ var arRectHeight = arscaledy
arPreviewRect.style.top = arRectTop+'px';
arPreviewRect.style.left = arRectLeft+'px';
diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index 9468c107..42f301ab 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -4,7 +4,7 @@ contextMenuInit = function(){ let menuSpecs = new Map();
const uid = function(){
- return Date.now().toString(36) + Math.random().toString(36).substr(2);
+ return Date.now().toString(36) + Math.random().toString(36).substring(2);
}
function showContextMenu(event,element,menuEntries){
@@ -16,8 +16,7 @@ contextMenuInit = function(){ oldMenu.remove()
}
- let tabButton = uiCurrentTab
- let baseStyle = window.getComputedStyle(tabButton)
+ let baseStyle = window.getComputedStyle(uiCurrentTab)
const contextMenu = document.createElement('nav')
contextMenu.id = "context-menu"
@@ -36,7 +35,7 @@ contextMenuInit = function(){ menuEntries.forEach(function(entry){
let contextMenuEntry = document.createElement('a')
contextMenuEntry.innerHTML = entry['name']
- contextMenuEntry.addEventListener("click", function(e) {
+ contextMenuEntry.addEventListener("click", function() {
entry['func']();
})
contextMenuList.append(contextMenuEntry);
@@ -63,7 +62,7 @@ contextMenuInit = function(){ function appendContextMenuOption(targetElementSelector,entryName,entryFunction){
- currentItems = menuSpecs.get(targetElementSelector)
+ var currentItems = menuSpecs.get(targetElementSelector)
if(!currentItems){
currentItems = []
@@ -79,7 +78,7 @@ contextMenuInit = function(){ }
function removeContextMenuOption(uid){
- menuSpecs.forEach(function(v,k) {
+ menuSpecs.forEach(function(v) {
let index = -1
v.forEach(function(e,ei){if(e['id']==uid){index=ei}})
if(index>=0){
@@ -112,7 +111,6 @@ contextMenuInit = function(){ if(e.composedPath()[0].matches(k)){
showContextMenu(e,e.composedPath()[0],v)
e.preventDefault()
- return
}
})
});
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 588c7b77..d2c2f190 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -69,8 +69,8 @@ function keyupEditAttention(event){ event.preventDefault();
- closeCharacter = ')'
- delta = opts.keyedit_precision_attention
+ var closeCharacter = ')'
+ var delta = opts.keyedit_precision_attention
if (selectionStart > 0 && text[selectionStart - 1] == '<'){
closeCharacter = '>'
@@ -91,8 +91,8 @@ function keyupEditAttention(event){ selectionEnd += 1;
}
- end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1;
- weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end));
+ var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1;
+ var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end));
if (isNaN(weight)) return;
weight += isPlus ? delta : -delta;
diff --git a/javascript/extensions.js b/javascript/extensions.js index 3c2f995a..2a2d2f8e 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -1,14 +1,14 @@ -function extensions_apply(_, _, disable_all){
+function extensions_apply(_disabled_list, _update_list, disable_all){
var disable = []
var update = []
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
if(x.name.startsWith("enable_") && ! x.checked)
- disable.push(x.name.substr(7))
+ disable.push(x.name.substring(7))
if(x.name.startsWith("update_") && x.checked)
- update.push(x.name.substr(7))
+ update.push(x.name.substring(7))
})
restart_reload()
@@ -16,12 +16,12 @@ function extensions_apply(_, _, disable_all){ return [JSON.stringify(disable), JSON.stringify(update), disable_all]
}
-function extensions_check(_, _){
+function extensions_check(){
var disable = []
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
if(x.name.startsWith("enable_") && ! x.checked)
- disable.push(x.name.substr(7))
+ disable.push(x.name.substring(7))
})
gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){
@@ -41,7 +41,7 @@ function install_extension_from_index(button, url){ button.disabled = "disabled"
button.value = "Installing..."
- textarea = gradioApp().querySelector('#extension_to_install textarea')
+ var textarea = gradioApp().querySelector('#extension_to_install textarea')
textarea.value = url
updateInput(textarea)
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 25322138..c85bc79a 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -1,4 +1,3 @@ -
function setupExtraNetworksForTab(tabname){
gradioApp().querySelector('#'+tabname+'_extra_tabs').classList.add('extra-networks')
@@ -10,16 +9,34 @@ function setupExtraNetworksForTab(tabname){ tabs.appendChild(search)
tabs.appendChild(refresh)
- search.addEventListener("input", function(evt){
- searchTerm = search.value.toLowerCase()
+ var applyFilter = function(){
+ var searchTerm = search.value.toLowerCase()
gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){
- text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase()
- elem.style.display = text.indexOf(searchTerm) == -1 ? "none" : ""
+ var searchOnly = elem.querySelector('.search_only')
+ var text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase()
+
+ var visible = text.indexOf(searchTerm) != -1
+
+ if(searchOnly && searchTerm.length < 4){
+ visible = false
+ }
+
+ elem.style.display = visible ? "" : "none"
})
- });
+ }
+
+ search.addEventListener("input", applyFilter);
+ applyFilter();
+
+ extraNetworksApplyFilter[tabname] = applyFilter;
+}
+
+function applyExtraNetworkFilter(tabname){
+ setTimeout(extraNetworksApplyFilter[tabname], 1);
}
+var extraNetworksApplyFilter = {}
var activePromptTextarea = {};
function setupExtraNetworks(){
@@ -55,7 +72,7 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text){ var partToSearch = m[1]
var replaced = false
- var newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, index){
+ var newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found){
m = found.match(re_extranet);
if(m[1] == partToSearch){
replaced = true;
@@ -96,9 +113,9 @@ function saveCardPreview(event, tabname, filename){ }
function extraNetworksSearchButton(tabs_id, event){
- searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea')
- button = event.target
- text = button.classList.contains("search-all") ? "" : button.textContent.trim()
+ var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea')
+ var button = event.target
+ var text = button.classList.contains("search-all") ? "" : button.textContent.trim()
searchTextarea.value = text
updateInput(searchTextarea)
@@ -133,7 +150,7 @@ function popup(contents){ }
function extraNetworksShowMetadata(text){
- elem = document.createElement('pre')
+ var elem = document.createElement('pre')
elem.classList.add('popup-metadata');
elem.textContent = text;
@@ -165,7 +182,7 @@ function requestGet(url, data, handler, errorHandler){ }
function extraNetworksRequestMetadata(event, extraPage, cardName){
- showError = function(){ extraNetworksShowMetadata("there was an error getting metadata"); }
+ var showError = function(){ extraNetworksShowMetadata("there was an error getting metadata"); }
requestGet("./sd_extra_networks/metadata", {"page": extraPage, "item": cardName}, function(data){
if(data && data.metadata){
diff --git a/javascript/generationParams.js b/javascript/generationParams.js index 1266a266..ef64ee2e 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -23,7 +23,7 @@ let modalObserver = new MutationObserver(function(mutations) { }); function attachGalleryListeners(tab_name) { - gallery = gradioApp().querySelector('#'+tab_name+'_gallery') + var gallery = gradioApp().querySelector('#'+tab_name+'_gallery') gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name+"_generation_info_button").click()); gallery?.addEventListener('keydown', (e) => { if (e.keyCode == 37 || e.keyCode == 39) // left or right arrow diff --git a/javascript/hints.js b/javascript/hints.js index e7d17d36..3746df99 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -66,8 +66,8 @@ titles = { "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", - "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [clip_skip], [batch_number], [generation_number], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp], [hasprompt<prompt1|default><prompt2>..]; leave empty for default.", - "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [clip_skip], [batch_number], [generation_number], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp], [hasprompt<prompt1|default><prompt2>..]; leave empty for default.", + "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [denoising], [clip_skip], [batch_number], [generation_number], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp], [hasprompt<prompt1|default><prompt2>..]; leave empty for default.", + "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [denoising], [clip_skip], [batch_number], [generation_number], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp], [hasprompt<prompt1|default><prompt2>..]; leave empty for default.", "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle", "Loopback": "Performs img2img processing multiple times. Output images are used as input for the next loop.", @@ -118,16 +118,18 @@ titles = { onUiUpdate(function(){ gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){ - tooltip = titles[span.textContent]; |