aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.yml2
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml5
-rw-r--r--.gitignore3
-rw-r--r--README.md39
-rw-r--r--extensions/put extension here.txt0
-rw-r--r--javascript/aspectRatioOverlay.js55
-rw-r--r--javascript/dragdrop.js2
-rw-r--r--javascript/images_history.js120
-rw-r--r--modules/devices.py19
-rw-r--r--modules/extras.py19
-rw-r--r--modules/generation_parameters_copypaste.py18
-rw-r--r--modules/hypernetworks/hypernetwork.py26
-rw-r--r--modules/hypernetworks/ui.py8
-rw-r--r--modules/images_history.py503
-rw-r--r--modules/img2img.py3
-rw-r--r--modules/interrogate.py12
-rw-r--r--modules/lowvram.py9
-rw-r--r--modules/processing.py99
-rw-r--r--modules/script_callbacks.py42
-rw-r--r--modules/scripts.py246
-rw-r--r--modules/sd_hijack.py29
-rw-r--r--modules/sd_hijack_inpainting.py331
-rw-r--r--modules/sd_models.py33
-rw-r--r--modules/sd_samplers.py64
-rw-r--r--modules/shared.py15
-rw-r--r--modules/textual_inversion/dataset.py4
-rw-r--r--modules/textual_inversion/image_embedding.py5
-rw-r--r--modules/textual_inversion/preprocess.py85
-rw-r--r--modules/textual_inversion/textual_inversion.py6
-rw-r--r--modules/textual_inversion/ui.py4
-rw-r--r--modules/txt2img.py7
-rw-r--r--modules/ui.py127
-rw-r--r--scripts/outpainting_mk_2.py139
-rw-r--r--scripts/xy_grid.py1
-rw-r--r--webui.py12
35 files changed, 1568 insertions, 524 deletions
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 35802a53..9c2ff313 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -45,6 +45,8 @@ body:
attributes:
label: Commit where the problem happens
description: Which commit are you running ? (copy the **Commit hash** shown in the cmd/terminal when you launch the UI)
+ validations:
+ required: true
- type: dropdown
id: platforms
attributes:
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 00000000..f58c94a9
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: false
+contact_links:
+ - name: WebUI Community Support
+ url: https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions
+ about: Please ask and answer questions here.
diff --git a/.gitignore b/.gitignore
index f9c3357c..8fa05852 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,4 +27,5 @@ __pycache__
notification.mp3
/SwinIR
/textual_inversion
-.vscode \ No newline at end of file
+.vscode
+/extensions
diff --git a/README.md b/README.md
index 859a91b6..1a0e4f6a 100644
--- a/README.md
+++ b/README.md
@@ -11,6 +11,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- One click install and run script (but you still must install python and git)
- Outpainting
- Inpainting
+- Color Sketch
- Prompt Matrix
- Stable Diffusion Upscale
- Attention, specify parts of text that the model should pay more attention to
@@ -23,6 +24,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- have as many embeddings as you want and use any names you like for them
- use multiple embeddings with different numbers of vectors per token
- works with half precision floating point numbers
+ - train embeddings on 8GB (also reports of 6GB working)
- Extras tab with:
- GFPGAN, neural network that fixes faces
- CodeFormer, face restoration tool as an alternative to GFPGAN
@@ -37,14 +39,14 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- Interrupt processing at any time
- 4GB video card support (also reports of 2GB working)
- Correct seeds for batches
-- Prompt length validation
- - get length of prompt in tokens as you type
- - get a warning after generation if some text was truncated
+- Live prompt token length validation
- Generation parameters
- parameters you used to generate images are saved with that image
- in PNG chunks for PNG, in EXIF for JPEG
- can drag the image to PNG info tab to restore generation parameters and automatically copy them into UI
- can be disabled in settings
+ - drag and drop an image/text-parameters to promptbox
+- Read Generation Parameters Button, loads parameters in promptbox to UI
- Settings page
- Running arbitrary python code from UI (must run with --allow-code to enable)
- Mouseover hints for most UI elements
@@ -59,10 +61,10 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- CLIP interrogator, a button that tries to guess prompt from an image
- Prompt Editing, a way to change prompt mid-generation, say to start making a watermelon and switch to anime girl midway
- Batch Processing, process a group of files using img2img
-- Img2img Alternative
+- Img2img Alternative, reverse Euler method of cross attention control
- Highres Fix, a convenience option to produce high resolution pictures in one click without usual distortions
- Reloading checkpoints on the fly
-- Checkpoint Merger, a tab that allows you to merge two checkpoints into one
+- Checkpoint Merger, a tab that allows you to merge up to 3 checkpoints into one
- [Custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community
- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once
- separate prompts using uppercase `AND`
@@ -70,14 +72,35 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args)
- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args)
+- History tab: view, direct and delete images conveniently within the UI
+- Generate forever option
+- Training tab
+ - hypernetworks and embeddings options
+ - Preprocessing images: cropping, mirroring, autotagging using BLIP or deepdanbooru (for anime)
+- Clip skip
+- Use Hypernetworks
+- Use VAEs
+- Estimated completion time in progress bar
+- API
+- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
+- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
+
+## Where are Aesthetic Gradients?!?!
+Aesthetic Gradients are now an extension. You can install it using git:
+
+```commandline
+git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients extensions/aesthetic-gradients
+```
+
+After running this command, make sure that you have `aesthetic-gradients` dir in webui's `extensions` directory and restart
+the UI. The interface for Aesthetic Gradients should appear exactly the same as it was.
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
-Alternatively, use Google Colab:
+Alternatively, use online services (like Google Colab):
-- [Colab, maintained by Akaibu](https://colab.research.google.com/drive/1kw3egmSn-KgWsikYvOMjJkVDsPLjEMzl)
-- [Colab, original by me, outdated](https://colab.research.google.com/drive/1Iy-xW9t1-OQWhb0hNxueGij8phCyluOh).
+- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services)
### Automatic Installation on Windows
1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH"
diff --git a/extensions/put extension here.txt b/extensions/put extension here.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/extensions/put extension here.txt
diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js
index 96f1c00d..66f26a22 100644
--- a/javascript/aspectRatioOverlay.js
+++ b/javascript/aspectRatioOverlay.js
@@ -3,12 +3,12 @@ let currentWidth = null;
let currentHeight = null;
let arFrameTimeout = setTimeout(function(){},0);
-function dimensionChange(e,dimname){
+function dimensionChange(e, is_width, is_height){
- if(dimname == 'Width'){
+ if(is_width){
currentWidth = e.target.value*1.0
}
- if(dimname == 'Height'){
+ if(is_height){
currentHeight = e.target.value*1.0
}
@@ -18,22 +18,13 @@ function dimensionChange(e,dimname){
return;
}
- var img2imgMode = gradioApp().querySelector('#mode_img2img.tabs > div > button.rounded-t-lg.border-gray-200')
- if(img2imgMode){
- img2imgMode=img2imgMode.innerText
- }else{
- return;
- }
-
- var redrawImage = gradioApp().querySelector('div[data-testid=image] img');
- var inpaintImage = gradioApp().querySelector('#img2maskimg div[data-testid=image] img')
-
var targetElement = null;
- if(img2imgMode=='img2img' && redrawImage){
- targetElement = redrawImage;
- }else if(img2imgMode=='Inpaint' && inpaintImage){
- targetElement = inpaintImage;
+ var tabIndex = get_tab_index('mode_img2img')
+ if(tabIndex == 0){
+ targetElement = gradioApp().querySelector('div[data-testid=image] img');
+ } else if(tabIndex == 1){
+ targetElement = gradioApp().querySelector('#img2maskimg div[data-testid=image] img');
}
if(targetElement){
@@ -98,22 +89,20 @@ onUiUpdate(function(){
var inImg2img = Boolean(gradioApp().querySelector("button.rounded-t-lg.border-gray-200"))
if(inImg2img){
let inputs = gradioApp().querySelectorAll('input');
- inputs.forEach(function(e){
- let parentLabel = e.parentElement.querySelector('label')
- if(parentLabel && parentLabel.innerText){
- if(!e.classList.contains('scrollwatch')){
- if(parentLabel.innerText == 'Width' || parentLabel.innerText == 'Height'){
- e.addEventListener('input', function(e){dimensionChange(e,parentLabel.innerText)} )
- e.classList.add('scrollwatch')
- }
- if(parentLabel.innerText == 'Width'){
- currentWidth = e.value*1.0
- }
- if(parentLabel.innerText == 'Height'){
- currentHeight = e.value*1.0
- }
- }
- }
+ inputs.forEach(function(e){
+ var is_width = e.parentElement.id == "img2img_width"
+ var is_height = e.parentElement.id == "img2img_height"
+
+ if((is_width || is_height) && !e.classList.contains('scrollwatch')){
+ e.addEventListener('input', function(e){dimensionChange(e, is_width, is_height)} )
+ e.classList.add('scrollwatch')
+ }
+ if(is_width){
+ currentWidth = e.value*1.0
+ }
+ if(is_height){
+ currentHeight = e.value*1.0
+ }
})
}
});
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js
index 070cf255..3ed1cb3c 100644
--- a/javascript/dragdrop.js
+++ b/javascript/dragdrop.js
@@ -43,7 +43,7 @@ function dropReplaceImage( imgWrap, files ) {
window.document.addEventListener('dragover', e => {
const target = e.composedPath()[0];
const imgWrap = target.closest('[data-testid="image"]');
- if ( !imgWrap && target.placeholder.indexOf("Prompt") == -1) {
+ if ( !imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1) {
return;
}
e.stopPropagation();
diff --git a/javascript/images_history.js b/javascript/images_history.js
index f7d052c3..c9aa76f8 100644
--- a/javascript/images_history.js
+++ b/javascript/images_history.js
@@ -17,14 +17,6 @@ var images_history_click_image = function(){
images_history_set_image_info(this);
}
-var images_history_click_tab = function(){
- var tabs_box = gradioApp().getElementById("images_history_tab");
- if (!tabs_box.classList.contains(this.getAttribute("tabname"))) {
- gradioApp().getElementById(this.getAttribute("tabname") + "_images_history_renew_page").click();
- tabs_box.classList.add(this.getAttribute("tabname"))
- }
-}
-
function images_history_disabled_del(){
gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){
btn.setAttribute('disabled','disabled');
@@ -43,7 +35,6 @@ function images_history_get_parent_by_tagname(item, tagname){
var parent = item.parentElement;
tagname = tagname.toUpperCase()
while(parent.tagName != tagname){
- console.log(parent.tagName, tagname)
parent = parent.parentElement;
}
return parent;
@@ -88,15 +79,15 @@ function images_history_set_image_info(button){
}
-function images_history_get_current_img(tabname, image_path, files){
+function images_history_get_current_img(tabname, img_index, files){
return [
- gradioApp().getElementById(tabname + '_images_history_set_index').getAttribute("img_index"),
- image_path,
+ tabname,
+ gradioApp().getElementById(tabname + '_images_history_set_index').getAttribute("img_index"),
files
];
}
-function images_history_delete(del_num, tabname, img_path, img_file_name, page_index, filenames, image_index){
+function images_history_delete(del_num, tabname, image_index){
image_index = parseInt(image_index);
var tab = gradioApp().getElementById(tabname + '_images_history');
var set_btn = tab.querySelector(".images_history_set_index");
@@ -107,6 +98,7 @@ function images_history_delete(del_num, tabname, img_path, img_file_name, page_i
}
});
var img_num = buttons.length / 2;
+ del_num = Math.min(img_num - image_index, del_num)
if (img_num <= del_num){
setTimeout(function(tabname){
gradioApp().getElementById(tabname + '_images_history_renew_page').click();
@@ -114,30 +106,28 @@ function images_history_delete(del_num, tabname, img_path, img_file_name, page_i
} else {
var next_img
for (var i = 0; i < del_num; i++){
- if (image_index + i < image_index + img_num){
- buttons[image_index + i].style.display = 'none';
- buttons[image_index + img_num + 1].style.display = 'none';
- next_img = image_index + i + 1
- }
+ buttons[image_index + i].style.display = 'none';
+ buttons[image_index + i + img_num].style.display = 'none';
+ next_img = image_index + i + 1
}
var bnt;
if (next_img >= img_num){
- btn = buttons[image_index - del_num];
+ btn = buttons[image_index - 1];
} else {
btn = buttons[next_img];
}
setTimeout(function(btn){btn.click()}, 30, btn);
}
images_history_disabled_del();
- return [del_num, tabname, img_path, img_file_name, page_index, filenames, image_index];
+
}
-function images_history_turnpage(img_path, page_index, image_index, tabname){
+function images_history_turnpage(tabname){
+ gradioApp().getElementById(tabname + '_images_history_del_button').setAttribute('disabled','disabled');
var buttons = gradioApp().getElementById(tabname + '_images_history').querySelectorAll(".gallery-item");
buttons.forEach(function(elem) {
elem.style.display = 'block';
- })
- return [img_path, page_index, image_index, tabname];
+ })
}
function images_history_enable_del_buttons(){
@@ -147,60 +137,64 @@ function images_history_enable_del_buttons(){
}
function images_history_init(){
- var load_txt2img_button = gradioApp().getElementById('txt2img_images_history_renew_page')
- if (load_txt2img_button){
+ var tabnames = gradioApp().getElementById("images_history_tabnames_list")
+ if (tabnames){
+ images_history_tab_list = tabnames.querySelector("textarea").value.split(",")
for (var i in images_history_tab_list ){
- tab = images_history_tab_list[i];
+ var tab = images_history_tab_list[i];
gradioApp().getElementById(tab + '_images_history').classList.add("images_history_cantainor");
gradioApp().getElementById(tab + '_images_history_set_index').classList.add("images_history_set_index");
gradioApp().getElementById(tab + '_images_history_del_button').classList.add("images_history_del_button");
- gradioApp().getElementById(tab + '_images_history_gallery').classList.add("images_history_gallery");
-
+ gradioApp().getElementById(tab + '_images_history_gallery').classList.add("images_history_gallery");
+ gradioApp().getElementById(tab + "_images_history_start").setAttribute("style","padding:20px;font-size:25px");
+ }
+
+ //preload
+ if (gradioApp().getElementById("images_history_preload").querySelector("input").checked ){
+ var tabs_box = gradioApp().getElementById("tab_images_history").querySelector("div").querySelector("div").querySelector("div");
+ tabs_box.setAttribute("id", "images_history_tab");
+ var tab_btns = tabs_box.querySelectorAll("button");
+ for (var i in images_history_tab_list){
+ var tabname = images_history_tab_list[i]
+ tab_btns[i].setAttribute("tabname", tabname);
+ tab_btns[i].addEventListener('click', function(){
+ var tabs_box = gradioApp().getElementById("images_history_tab");
+ if (!tabs_box.classList.contains(this.getAttribute("tabname"))) {
+ gradioApp().getElementById(this.getAttribute("tabname") + "_images_history_start").click();
+ tabs_box.classList.add(this.getAttribute("tabname"))
+ }
+ });
+ }
+ tab_btns[0].click()
}
- var tabs_box = gradioApp().getElementById("tab_images_history").querySelector("div").querySelector("div").querySelector("div");
- tabs_box.setAttribute("id", "images_history_tab");
- var tab_btns = tabs_box.querySelectorAll("button");
- for (var i in images_history_tab_list){
- var tabname = images_history_tab_list[i]
- tab_btns[i].setAttribute("tabname", tabname);
-
- // this refreshes history upon tab switch
- // until the history is known to work well, which is not the case now, we do not do this at startup
- //tab_btns[i].addEventListener('click', images_history_click_tab);
- }
- tabs_box.classList.add(images_history_tab_list[0]);
-
- // same as above, at page load
- //load_txt2img_button.click();
} else {
setTimeout(images_history_init, 500);
}
}
-var images_history_tab_list = ["txt2img", "img2img", "extras"];
+var images_history_tab_list = "";
setTimeout(images_history_init, 500);
document.addEventListener("DOMContentLoaded", function() {
var mutationObserver = new MutationObserver(function(m){
- for (var i in images_history_tab_list ){
- let tabname = images_history_tab_list[i]
- var buttons = gradioApp().querySelectorAll('#' + tabname + '_images_history .gallery-item');
- buttons.forEach(function(bnt){
- bnt.addEventListener('click', images_history_click_image, true);
- });
-
- // same as load_txt2img_button.click() above
- /*
- var cls_btn = gradioApp().getElementById(tabname + '_images_history_gallery').querySelector("svg");
- if (cls_btn){
- cls_btn.addEventListener('click', function(){
- gradioApp().getElementById(tabname + '_images_history_renew_page').click();
- }, false);
- }*/
-
- }
+ if (images_history_tab_list != ""){
+ for (var i in images_history_tab_list ){
+ let tabname = images_history_tab_list[i]
+ var buttons = gradioApp().querySelectorAll('#' + tabname + '_images_history .gallery-item');
+ buttons.forEach(function(bnt){
+ bnt.addEventListener('click', images_history_click_image, true);
+ });
+
+ var cls_btn = gradioApp().getElementById(tabname + '_images_history_gallery').querySelector("svg");
+ if (cls_btn){
+ cls_btn.addEventListener('click', function(){
+ gradioApp().getElementById(tabname + '_images_history_renew_page').click();
+ }, false);
+ }
+
+ }
+ }
});
- mutationObserver.observe( gradioApp(), { childList:true, subtree:true });
-
+ mutationObserver.observe(gradioApp(), { childList:true, subtree:true });
});
diff --git a/modules/devices.py b/modules/devices.py
index eb422583..dc1f3cdd 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -1,7 +1,6 @@
+import sys, os, shlex
import contextlib
-
import torch
-
from modules import errors
# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility
@@ -9,10 +8,22 @@ has_mps = getattr(torch, 'has_mps', False)
cpu = torch.device("cpu")
+def extract_device_id(args, name):
+ for x in range(len(args)):
+ if name in args[x]: return args[x+1]
+ return None
def get_optimal_device():
if torch.cuda.is_available():
- return torch.device("cuda")
+ from modules import shared
+
+ device_id = shared.cmd_opts.device_id
+
+ if device_id is not None:
+ cuda_device = f"cuda:{device_id}"
+ return torch.device(cuda_device)
+ else:
+ return torch.device("cuda")
if has_mps:
return torch.device("mps")
@@ -34,7 +45,7 @@ def enable_tf32():
errors.run(enable_tf32, "Enabling TF32")
-device = device_interrogate = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device()
+device = device_interrogate = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = None
dtype = torch.float16
dtype_vae = torch.float16
diff --git a/modules/extras.py b/modules/extras.py
index b853fa5b..22c5a1c1 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -39,9 +39,12 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
if input_dir == '':
return outputs, "Please select an input directory.", ''
- image_list = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]
+ image_list = [file for file in [os.path.join(input_dir, x) for x in sorted(os.listdir(input_dir))] if os.path.isfile(file)]
for img in image_list:
- image = Image.open(img)
+ try:
+ image = Image.open(img)
+ except Exception:
+ continue
imageArr.append(image)
imageNameArr.append(img)
else:
@@ -118,10 +121,14 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
while len(cached_images) > 2:
del cached_images[next(iter(cached_images.keys()))]
-
- images.save_image(image, path=outpath, basename="", seed=None, prompt=None, extension=opts.samples_format, info=info, short_filename=True,
- no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo,
- forced_filename=image_name if opts.use_original_name_batch else None)
+
+ if opts.use_original_name_batch and image_name != None:
+ basename = os.path.splitext(os.path.basename(image_name))[0]
+ else:
+ basename = ''
+
+ images.save_image(image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=info, short_filename=True,
+ no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None)
if opts.enable_pnginfo:
image.info = existing_pnginfo
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 0f041449..f73647da 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -4,13 +4,22 @@ import gradio as gr
from modules.shared import script_path
from modules import shared
-re_param_code = r"\s*([\w ]+):\s*([^,]+)(?:,|$)"
+re_param_code = r'\s*([\w ]+):\s*("(?:\\|\"|[^\"])+"|[^,]*)(?:,|$)'
re_param = re.compile(re_param_code)
re_params = re.compile(r"^(?:" + re_param_code + "){3,}$")
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
type_of_gr_update = type(gr.update())
+def quote(text):
+ if ',' not in str(text):
+ return text
+
+ text = str(text)
+ text = text.replace('\\', '\\\\')
+ text = text.replace('"', '\\"')
+ return f'"{text}"'
+
def parse_generation_parameters(x: str):
"""parses generation parameters string, the one you see in text field under the picture in UI:
```
@@ -83,7 +92,12 @@ def connect_paste(button, paste_fields, input_comp, js=None):
else:
try:
valtype = type(output.value)
- val = valtype(v)
+
+ if valtype == bool and v == "False":
+ val = False
+ else:
+ val = valtype(v)
+
res.append(gr.update(value=val))
except Exception:
res.append(gr.update())
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index e493f366..b7a04038 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -41,12 +41,12 @@ class HypernetworkModule(torch.nn.Module):
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
# Add an activation func
- if activation_func == "linear":
+ if activation_func == "linear" or activation_func is None:
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
else:
- raise NotImplementedError(
+ raise RuntimeError(
"Valid activation funcs: 'linear', 'relu', 'leakyrelu', 'elu', 'swish'"
)
@@ -65,7 +65,7 @@ class HypernetworkModule(torch.nn.Module):
self.load_state_dict(state_dict)
else:
for layer in self.linear:
- if isinstance(layer, torch.nn.Linear):
+ if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
layer.weight.data.normal_(mean=0.0, std=0.01)
layer.bias.data.zero_()
@@ -93,7 +93,7 @@ class HypernetworkModule(torch.nn.Module):
def trainables(self):
layer_structure = []
for layer in self.linear:
- if isinstance(layer, torch.nn.Linear):
+ if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
layer_structure += [layer.weight, layer.bias]
return layer_structure
@@ -272,6 +272,9 @@ def stack_conds(conds):
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+ # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
+ from modules import images
+
assert hypernetwork_name, 'hypernetwork not selected'
path = shared.hypernetworks.get(hypernetwork_name, None)
@@ -314,6 +317,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
last_saved_file = "<none>"
last_saved_image = "<none>"
+ forced_filename = "<none>"
ititial_step = hypernetwork.step or 0
if ititial_step > steps:
@@ -353,7 +357,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
pbar.set_description(f"loss: {mean_loss:.7f}")
if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0: