aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rw-r--r--javascript/imageviewer.js174
-rw-r--r--launch.py141
-rw-r--r--models/deepbooru/Put your deepbooru release project folder here.txt0
-rw-r--r--modules/bsrgan_model.py2
-rw-r--r--modules/deepbooru.py73
-rw-r--r--modules/esrgan_model.py4
-rw-r--r--modules/esrgan_model_arch.py (renamed from modules/esrgam_model_arch.py)0
-rw-r--r--modules/extras.py12
-rw-r--r--modules/hypernetwork.py24
-rw-r--r--modules/images.py39
-rw-r--r--modules/ldsr_model.py2
-rw-r--r--modules/paths.py1
-rw-r--r--modules/processing.py22
-rw-r--r--modules/realesrgan_model.py2
-rw-r--r--modules/safe.py89
-rw-r--r--modules/scunet_model.py2
-rw-r--r--modules/sd_hijack.py12
-rw-r--r--modules/sd_hijack_optimizations.py6
-rw-r--r--modules/sd_models.py20
-rw-r--r--modules/shared.py19
-rw-r--r--modules/swinir_model.py2
-rw-r--r--modules/ui.py136
-rw-r--r--modules/upscaler.py7
-rw-r--r--scripts/prompts_from_file.py4
-rw-r--r--scripts/xy_grid.py59
-rw-r--r--style.css17
-rw-r--r--txt2img_Screenshot.pngbin539132 -> 337094 bytes
-rw-r--r--webui.py3
29 files changed, 627 insertions, 247 deletions
diff --git a/README.md b/README.md
index 63dd0c18..561eb03d 100644
--- a/README.md
+++ b/README.md
@@ -66,6 +66,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- separate prompts using uppercase `AND`
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
+- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args)
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
@@ -123,4 +124,5 @@ The documentation was moved from this README over to the project's [wiki](https:
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
+- DeepDanbooru - interrogator for anime diffusors https://github.com/KichangKim/DeepDanbooru
- (You)
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 6a00c0da..65a33dd7 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -1,72 +1,97 @@
// A full size 'lightbox' preview modal shown when left clicking on gallery previews
-
function closeModal() {
- gradioApp().getElementById("lightboxModal").style.display = "none";
+ gradioApp().getElementById("lightboxModal").style.display = "none";
}
function showModal(event) {
- const source = event.target || event.srcElement;
- const modalImage = gradioApp().getElementById("modalImage")
- const lb = gradioApp().getElementById("lightboxModal")
- modalImage.src = source.src
- if (modalImage.style.display === 'none') {
- lb.style.setProperty('background-image', 'url(' + source.src + ')');
- }
- lb.style.display = "block";
- lb.focus()
- event.stopPropagation()
+ const source = event.target || event.srcElement;
+ const modalImage = gradioApp().getElementById("modalImage")
+ const lb = gradioApp().getElementById("lightboxModal")
+ modalImage.src = source.src
+ if (modalImage.style.display === 'none') {
+ lb.style.setProperty('background-image', 'url(' + source.src + ')');
+ }
+ lb.style.display = "block";
+ lb.focus()
+ event.stopPropagation()
}
function negmod(n, m) {
- return ((n % m) + m) % m;
+ return ((n % m) + m) % m;
}
-function modalImageSwitch(offset){
- var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all")
- var galleryButtons = []
- allgalleryButtons.forEach(function(elem){
- if(elem.parentElement.offsetParent){
- galleryButtons.push(elem);
+function updateOnBackgroundChange() {
+ const modalImage = gradioApp().getElementById("modalImage")
+ if (modalImage && modalImage.offsetParent) {
+ let allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
+ let currentButton = null
+ allcurrentButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ currentButton = elem;
+ }
+ })
+
+ if (modalImage.src != currentButton.children[0].src) {
+ modalImage.src = currentButton.children[0].src;
+ if (modalImage.style.display === 'none') {
+ modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ }
+ }
}
- })
-
- if(galleryButtons.length>1){
- var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
- var currentButton = null
- allcurrentButtons.forEach(function(elem){
- if(elem.parentElement.offsetParent){
- currentButton = elem;
+}
+
+function modalImageSwitch(offset) {
+ var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all")
+ var galleryButtons = []
+ allgalleryButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ galleryButtons.push(elem);
}
- })
-
- var result = -1
- galleryButtons.forEach(function(v, i){ if(v==currentButton) { result = i } })
-
- if(result != -1){
- nextButton = galleryButtons[negmod((result+offset),galleryButtons.length)]
- nextButton.click()
- const modalImage = gradioApp().getElementById("modalImage");
- const modal = gradioApp().getElementById("lightboxModal");
- modalImage.src = nextButton.children[0].src;
- if (modalImage.style.display === 'none') {
- modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ })
+
+ if (galleryButtons.length > 1) {
+ var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
+ var currentButton = null
+ allcurrentButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ currentButton = elem;
+ }
+ })
+
+ var result = -1
+ galleryButtons.forEach(function(v, i) {
+ if (v == currentButton) {
+ result = i
+ }
+ })
+
+ if (result != -1) {
+ nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)]
+ nextButton.click()
+ const modalImage = gradioApp().getElementById("modalImage");
+ const modal = gradioApp().getElementById("lightboxModal");
+ modalImage.src = nextButton.children[0].src;
+ if (modalImage.style.display === 'none') {
+ modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ }
+ setTimeout(function() {
+ modal.focus()
+ }, 10)
}
- setTimeout( function(){modal.focus()},10)
- }
- }
+ }
}
-function modalNextImage(event){
- modalImageSwitch(1)
- event.stopPropagation()
+function modalNextImage(event) {
+ modalImageSwitch(1)
+ event.stopPropagation()
}
-function modalPrevImage(event){
- modalImageSwitch(-1)
- event.stopPropagation()
+function modalPrevImage(event) {
+ modalImageSwitch(-1)
+ event.stopPropagation()
}
-function modalKeyHandler(event){
+function modalKeyHandler(event) {
switch (event.key) {
case "ArrowLeft":
modalPrevImage(event)
@@ -80,24 +105,22 @@ function modalKeyHandler(event){
}
}
-function showGalleryImage(){
+function showGalleryImage() {
setTimeout(function() {
fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain')
-
- if(fullImg_preview != null){
+
+ if (fullImg_preview != null) {
fullImg_preview.forEach(function function_name(e) {
if (e.dataset.modded)
return;
e.dataset.modded = true;
if(e && e.parentElement.tagName == 'DIV'){
-
e.style.cursor='pointer'
-
e.addEventListener('click', function (evt) {
if(!opts.js_modal_lightbox) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
showModal(evt)
- },true);
+ }, true);
}
});
}
@@ -105,21 +128,21 @@ function showGalleryImage(){
}, 100);
}
-function modalZoomSet(modalImage, enable){
- if( enable ){
+function modalZoomSet(modalImage, enable) {
+ if (enable) {
modalImage.classList.add('modalImageFullscreen');
- } else{
+ } else {
modalImage.classList.remove('modalImageFullscreen');
}
}
-function modalZoomToggle(event){
+function modalZoomToggle(event) {
modalImage = gradioApp().getElementById("modalImage");
modalZoomSet(modalImage, !modalImage.classList.contains('modalImageFullscreen'))
event.stopPropagation()
}
-function modalTileImageToggle(event){
+function modalTileImageToggle(event) {
const modalImage = gradioApp().getElementById("modalImage");
const modal = gradioApp().getElementById("lightboxModal");
const isTiling = modalImage.style.display === 'none';
@@ -134,17 +157,18 @@ function modalTileImageToggle(event){
event.stopPropagation()
}
-function galleryImageHandler(e){
- if(e && e.parentElement.tagName == 'BUTTON'){
+function galleryImageHandler(e) {
+ if (e && e.parentElement.tagName == 'BUTTON') {
e.onclick = showGalleryImage;
}
}
-onUiUpdate(function(){
+onUiUpdate(function() {
fullImg_preview = gradioApp().querySelectorAll('img.w-full')
- if(fullImg_preview != null){
- fullImg_preview.forEach(galleryImageHandler);
+ if (fullImg_preview != null) {
+ fullImg_preview.forEach(galleryImageHandler);
}
+ updateOnBackgroundChange();
})
document.addEventListener("DOMContentLoaded", function() {
@@ -152,13 +176,13 @@ document.addEventListener("DOMContentLoaded", function() {
const modal = document.createElement('div')
modal.onclick = closeModal;
modal.id = "lightboxModal";
- modal.tabIndex=0
+ modal.tabIndex = 0
modal.addEventListener('keydown', modalKeyHandler, true)
const modalControls = document.createElement('div')
modalControls.className = 'modalControls gradio-container';
modal.append(modalControls);
-
+
const modalZoom = document.createElement('span')
modalZoom.className = 'modalZoom cursor';
modalZoom.innerHTML = '⤡'
@@ -183,30 +207,30 @@ document.addEventListener("DOMContentLoaded", function() {
const modalImage = document.createElement('img')
modalImage.id = 'modalImage';
modalImage.onclick = closeModal;
- modalImage.tabIndex=0
+ modalImage.tabIndex = 0
modalImage.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalImage)
const modalPrev = document.createElement('a')
modalPrev.className = 'modalPrev';
modalPrev.innerHTML = '❮'
- modalPrev.tabIndex=0
- modalPrev.addEventListener('click',modalPrevImage,true);
+ modalPrev.tabIndex = 0
+ modalPrev.addEventListener('click', modalPrevImage, true);
modalPrev.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalPrev)
const modalNext = document.createElement('a')
modalNext.className = 'modalNext';
modalNext.innerHTML = '❯'
- modalNext.tabIndex=0
- modalNext.addEventListener('click',modalNextImage,true);
+ modalNext.tabIndex = 0
+ modalNext.addEventListener('click', modalNextImage, true);
modalNext.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalNext)
gradioApp().getRootNode().appendChild(modal)
-
+
document.body.appendChild(modalFragment);
-
+
});
diff --git a/launch.py b/launch.py
index 1d65a779..f42f557d 100644
--- a/launch.py
+++ b/launch.py
@@ -7,38 +7,14 @@ import shlex
import platform
dir_repos = "repositories"
-dir_tmp = "tmp"
-
python = sys.executable
git = os.environ.get('GIT', "git")
-torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
-requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
-commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
-
-gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
-clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
-
-stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
-taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
-k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
-codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
-blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
-
-args = shlex.split(commandline_args)
def extract_arg(args, name):
return [x for x in args if x != name], name in args
-args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
-xformers = '--xformers' in args
-
-
-def repo_dir(name):
- return os.path.join(dir_repos, name)
-
-
def run(command, desc=None, errdesc=None):
if desc is not None:
print(desc)
@@ -58,23 +34,11 @@ stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.st
return result.stdout.decode(encoding="utf8", errors="ignore")
-def run_python(code, desc=None, errdesc=None):
- return run(f'"{python}" -c "{code}"', desc, errdesc)
-
-
-def run_pip(args, desc=None):
- return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
-
-
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
-def check_run_python(code):
- return check_run(f'"{python}" -c "{code}"')
-
-
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
@@ -84,6 +48,22 @@ def is_installed(package):
return spec is not None
+def repo_dir(name):
+ return os.path.join(dir_repos, name)
+
+
+def run_python(code, desc=None, errdesc=None):
+ return run(f'"{python}" -c "{code}"', desc, errdesc)
+
+
+def run_pip(args, desc=None):
+ return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
+
+
+def check_run_python(code):
+ return check_run(f'"{python}" -c "{code}"')
+
+
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
@@ -105,56 +85,81 @@ def git_clone(url, dir, name, commithash=None):
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
-try:
- commit = run(f"{git} rev-parse HEAD").strip()
-except Exception:
- commit = "<none>"
+def prepare_enviroment():
+ torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
+ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
+ commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
-print(f"Python {sys.version}")
-print(f"Commit hash: {commit}")
+ gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
+ clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
+ stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
+ taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
+ codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
+ blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
-if not is_installed("torch") or not is_installed("torchvision"):
- run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
+ args = shlex.split(commandline_args)
+
+ args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
+ xformers = '--xformers' in args
+ deepdanbooru = '--deepdanbooru' in args
+
+ try:
+ commit = run(f"{git} rev-parse HEAD").strip()
+ except Exception:
+ commit = "<none>"
-if not skip_torch_cuda_test:
- run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
+ print(f"Python {sys.version}")
+ print(f"Commit hash: {commit}")
-if not is_installed("gfpgan"):
- run_pip(f"install {gfpgan_package}", "gfpgan")
+ if not is_installed("torch") or not is_installed("torchvision"):
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
-if not is_installed("clip"):
- run_pip(f"install {clip_package}", "clip")
+ if not skip_torch_cuda_test:
+ run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
-if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
- if platform.system() == "Windows":
- run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
- elif platform.system() == "Linux":
- run_pip("install xformers", "xformers")
+ if not is_installed("gfpgan"):
+ run_pip(f"install {gfpgan_package}", "gfpgan")
-os.makedirs(dir_repos, exist_ok=True)
+ if not is_installed("clip"):
+ run_pip(f"install {clip_package}", "clip")
-git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
-git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
-git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
-git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
-git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
+ if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
+ if platform.system() == "Windows":
+ run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
+ elif platform.system() == "Linux":
+ run_pip("install xformers", "xformers")
-if not is_installed("lpips"):
- run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
+ if not is_installed("deepdanbooru") and deepdanbooru:
+ run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
-run_pip(f"install -r {requirements_file}", "requirements for Web UI")
+ os.makedirs(dir_repos, exist_ok=True)
-sys.argv += args
+ git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
+ git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
+ git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
+ git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
+ git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
+
+ if not is_installed("lpips"):
+ run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
+
+ run_pip(f"install -r {requirements_file}", "requirements for Web UI")
+
+ sys.argv += args
+
+ if "--exit" in args:
+ print("Exiting because of --exit argument")
+ exit(0)
-if "--exit" in args:
- print("Exiting because of --exit argument")
- exit(0)
def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
import webui
webui.webui()
+
if __name__ == "__main__":
+ prepare_enviroment()
start_webui()
diff --git a/models/deepbooru/Put your deepbooru release project folder here.txt b/models/deepbooru/Put your deepbooru release project folder here.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/models/deepbooru/Put your deepbooru release project folder here.txt
diff --git a/modules/bsrgan_model.py b/modules/bsrgan_model.py
index 3bd80791..737e1a76 100644
--- a/modules/bsrgan_model.py
+++ b/modules/bsrgan_model.py
@@ -10,13 +10,11 @@ from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
from modules import devices, modelloader
from modules.bsrgan_model_arch import RRDBNet
-from modules.paths import models_path
class UpscalerBSRGAN(modules.upscaler.Upscaler):
def __init__(self, dirname):
self.name = "BSRGAN"
- self.model_path = os.path.join(models_path, self.name)
self.model_name = "BSRGAN 4x"
self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/BSRGAN.pth"
self.user_path = dirname
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
new file mode 100644
index 00000000..7e3c0618
--- /dev/null
+++ b/modules/deepbooru.py
@@ -0,0 +1,73 @@
+import os.path
+from concurrent.futures import ProcessPoolExecutor
+from multiprocessing import get_context
+
+
+def _load_tf_and_return_tags(pil_image, threshold):
+ import deepdanbooru as dd
+ import tensorflow as tf
+ import numpy as np
+
+ this_folder = os.path.dirname(__file__)
+ model_path = os.path.abspath(os.path.join(this_folder, '..', 'models', 'deepbooru'))
+ if not os.path.exists(os.path.join(model_path, 'project.json')):
+ # there is no point importing these every time
+ import zipfile
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(r"https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip",
+ model_path)
+ with zipfile.ZipFile(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"), "r") as zip_ref:
+ zip_ref.extractall(model_path)
+ os.remove(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"))
+
+ tags = dd.project.load_tags_from_project(model_path)
+ model = dd.project.load_model_from_project(
+ model_path, compile_model=True
+ )
+
+ width = model.input_shape[2]
+ height = model.input_shape[1]
+ image = np.array(pil_image)
+ image = tf.image.resize(
+ image,
+ size=(height, width),
+ method=tf.image.ResizeMethod.AREA,
+ preserve_aspect_ratio=True,
+ )
+ image = image.numpy() # EagerTensor to np.array
+ image = dd.image.transform_and_pad_image(image, width, height)
+ image = image / 255.0
+ image_shape = image.shape
+ image = image.reshape((1, image_shape[0], image_shape[1], image_shape[2]))
+
+ y = model.predict(image)[0]
+
+ result_dict = {}
+
+ for i, tag in enumerate(tags):
+ result_dict[tag] = y[i]
+ result_tags_out = []
+ result_tags_print = []
+ for tag in tags:
+ if result_dict[tag] >= threshold:
+ if tag.startswith("rating:"):
+ continue
+ result_tags_out.append(tag)
+ result_tags_print.append(f'{result_dict[tag]} {tag}')
+
+ print('\n'.join(sorted(result_tags_print, reverse=True)))
+
+ return ', '.join(result_tags_out).replace('_', ' ').replace(':', ' ')
+
+
+def subprocess_init_no_cuda():
+ import os
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+
+def get_deepbooru_tags(pil_image, threshold=0.5):
+ context = get_context('spawn')
+ with ProcessPoolExecutor(initializer=subprocess_init_no_cuda, mp_context=context) as executor:
+ f = executor.submit(_load_tf_and_return_tags, pil_image, threshold, )
+ ret = f.result() # will rethrow any exceptions
+ return ret \ No newline at end of file
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 28548124..46ad0da3 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -5,9 +5,8 @@ import torch
from PIL import Image
from basicsr.utils.download_util import load_file_from_url
-import modules.esrgam_model_arch as arch
+import modules.esrgan_model_arch as arch
from modules import shared, modelloader, images, devices
-from modules.paths import models_path
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import opts
@@ -76,7 +75,6 @@ class UpscalerESRGAN(Upscaler):
self.model_name = "ESRGAN_4x"
self.scalers = []
self.user_path = dirname
- self.model_path = os.path.join(models_path, self.name)
super().__init__()
model_paths = self.find_models(ext_filter=[".pt", ".pth"])
scalers = []
diff --git a/modules/esrgam_model_arch.py b/modules/esrgan_model_arch.py
index e413d36e..e413d36e 100644
--- a/modules/esrgam_model_arch.py
+++ b/modules/esrgan_model_arch.py
diff --git a/modules/extras.py b/modules/extras.py
index 1d9e64e5..41e8612c 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -29,7 +29,7 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v
if extras_mode == 1:
#convert file to pillow image
for img in image_folder:
- image = Image.fromarray(np.array(Image.open(img)))
+ image = Image.open(img)
imageArr.append(image)
imageNameArr.append(os.path.splitext(img.orig_name)[0])
else:
@@ -98,6 +98,10 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v
no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo,
forced_filename=image_name if opts.use_original_name_batch else None)
+ if opts.enable_pnginfo:
+ image.info = existing_pnginfo
+ image.info["extras"] = info
+
outputs.append(image)
devices.torch_gc()
@@ -169,9 +173,9 @@ def run_modelmerger(primary_model_name, secondary_model_name, interp_method, int
print(f"Loading {secondary_model_info.filename}...")
secondary_model = torch.load(secondary_model_info.filename, map_location='cpu')
-
- theta_0 = primary_model['state_dict']
- theta_1 = secondary_model['state_dict']
+
+ theta_0 = sd_models.get_state_dict_from_checkpoint(primary_model)
+ theta_1 = sd_models.get_state_dict_from_checkpoint(secondary_model)
theta_funcs = {
"Weighted Sum": weighted_sum,
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
index 7f062242..498bc9d8 100644
--- a/modules/hypernetwork.py
+++ b/modules/hypernetwork.py
@@ -40,18 +40,28 @@ class Hypernetwork:
self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1]))
-def load_hypernetworks(path):
+def list_hypernetworks(path):
res = {}
-
for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
+ name = os.path.splitext(os.path.basename(filename))[0]
+ res[name] = filename
+ return res
+
+
+def load_hypernetwork(filename):
+ path = shared.hypernetworks.get(filename, None)
+ if path is not None:
+ print(f"Loading hypernetwork {filename}")
try:
- hn = Hypernetwork(filename)
- res[hn.name] = hn
+ shared.loaded_hypernetwork = Hypernetwork(path)
except Exception:
- print(f"Error loading hypernetwork {filename}", file=sys.stderr)
+ print(f"Error loading hypernetwork {path}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ else:
+ if shared.loaded_hypernetwork is not None:
+ print(f"Unloading hypernetwork")
- return res
+ shared.loaded_hypernetwork = None
def attention_CrossAttention_forward(self, x, context=None, mask=None):
@@ -60,7 +70,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None):
q = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
diff --git a/modules/images.py b/modules/images.py
index 4a4fc977..e62eec8e 100644
--- a/modules/images.py
+++ b/modules/images.py