aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md23
-rw-r--r--javascript/hints.js4
-rw-r--r--javascript/ui.js4
-rw-r--r--launch.py21
-rw-r--r--modules/api/api.py110
-rw-r--r--modules/api/models.py6
-rw-r--r--modules/codeformer_model.py3
-rw-r--r--modules/deepbooru.py258
-rw-r--r--modules/deepbooru_model.py676
-rw-r--r--modules/devices.py55
-rw-r--r--modules/esrgan_model.py2
-rw-r--r--modules/extensions.py7
-rw-r--r--modules/generation_parameters_copypaste.py1
-rw-r--r--modules/gfpgan_model.py4
-rw-r--r--modules/hypernetworks/hypernetwork.py296
-rw-r--r--modules/images.py5
-rw-r--r--modules/img2img.py4
-rw-r--r--modules/lowvram.py8
-rw-r--r--modules/paths.py2
-rw-r--r--modules/processing.py117
-rw-r--r--modules/script_callbacks.py35
-rw-r--r--modules/scripts.py69
-rw-r--r--modules/scunet_model.py2
-rw-r--r--modules/sd_hijack.py303
-rw-r--r--modules/sd_hijack_checkpoint.py10
-rw-r--r--modules/sd_hijack_clip.py301
-rw-r--r--modules/sd_hijack_inpainting.py20
-rw-r--r--modules/sd_hijack_open_clip.py37
-rw-r--r--modules/sd_models.py13
-rw-r--r--modules/sd_samplers.py69
-rw-r--r--modules/sd_vae.py36
-rw-r--r--modules/shared.py71
-rw-r--r--modules/styles.py11
-rw-r--r--modules/swinir_model.py2
-rw-r--r--modules/textual_inversion/dataset.py140
-rw-r--r--modules/textual_inversion/preprocess.py12
-rw-r--r--modules/textual_inversion/textual_inversion.py332
-rw-r--r--modules/textual_inversion/ui.py2
-rw-r--r--modules/txt2img.py3
-rw-r--r--modules/ui.py101
-rw-r--r--modules/ui_extensions.py4
-rw-r--r--modules/ui_tempdir.py62
-rw-r--r--requirements.txt2
-rw-r--r--requirements_versions.txt2
-rw-r--r--scripts/img2imgalt.py4
-rw-r--r--scripts/xy_grid.py22
-rw-r--r--v1-inference.yaml70
-rw-r--r--webui-user.sh3
-rw-r--r--webui.bat12
-rw-r--r--webui.py25
-rwxr-xr-xwebui.sh30
51 files changed, 2300 insertions, 1111 deletions
diff --git a/README.md b/README.md
index 33508f31..8a4ffade 100644
--- a/README.md
+++ b/README.md
@@ -70,7 +70,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- separate prompts using uppercase `AND`
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
-- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args)
+- DeepDanbooru integration, creates danbooru style tags for anime prompts
- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args)
- via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI
- Generate forever option
@@ -84,26 +84,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- API
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
-
-## Where are Aesthetic Gradients?!?!
-Aesthetic Gradients are now an extension. You can install it using git:
-
-```commandline
-git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients extensions/aesthetic-gradients
-```
-
-After running this command, make sure that you have `aesthetic-gradients` dir in webui's `extensions` directory and restart
-the UI. The interface for Aesthetic Gradients should appear exactly the same as it was.
-
-## Where is History/Image browser?!?!
-Image browser is now an extension. You can install it using git:
-
-```commandline
-git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser extensions/images-browser
-```
-
-After running this command, make sure that you have `images-browser` dir in webui's `extensions` directory and restart
-the UI. The interface for Image browser should appear exactly the same as it was.
+- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
diff --git a/javascript/hints.js b/javascript/hints.js
index 623bc25c..ac417ff6 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -62,8 +62,8 @@ titles = {
"Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.",
- "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
- "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
+ "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
+ "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",
"Loopback": "Process an image, use it as an input, repeat.",
diff --git a/javascript/ui.js b/javascript/ui.js
index 95cfd106..2ca66d79 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -8,8 +8,8 @@ function set_theme(theme){
}
function selected_gallery_index(){
- var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem .gallery-item')
- var button = gradioApp().querySelector('[style="display: block;"].tabitem .gallery-item.\\!ring-2')
+ var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery] .gallery-item')
+ var button = gradioApp().querySelector('[style="display: block;"].tabitem div[id$=_gallery] .gallery-item.\\!ring-2')
var result = -1
buttons.forEach(function(v, i){ if(v==button) { result = i } })
diff --git a/launch.py b/launch.py
index 8e65676d..b1626cb5 100644
--- a/launch.py
+++ b/launch.py
@@ -134,19 +134,19 @@ def prepare_enviroment():
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
- deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@d91a2963bf87c6a770d74894667e9ffa9f6de7ff")
+ openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b")
xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
- stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git")
- taming_transformers_repo = os.environ.get('TAMING_REANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
+ stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
+ taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
- codeformer_repo = os.environ.get('CODEFORMET_REPO', 'https://github.com/sczhou/CodeFormer.git')
+ codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
- stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
+ stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "47b6b607fdd31875c9279cd2f4f16b92e4ea958e")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
- k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "60e5042ca0da89c14d1dd59d73883280f8fce991")
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
@@ -158,7 +158,6 @@ def prepare_enviroment():
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
sys.argv, run_tests = extract_arg(sys.argv, '--tests')
xformers = '--xformers' in sys.argv
- deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv
try:
@@ -181,6 +180,9 @@ def prepare_enviroment():
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
+ if not is_installed("open_clip"):
+ run_pip(f"install {openclip_package}", "open_clip")
+
if (not is_installed("xformers") or reinstall_xformers) and xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
@@ -193,15 +195,12 @@ def prepare_enviroment():
elif platform.system() == "Linux":
run_pip("install xformers", "xformers")
- if not is_installed("deepdanbooru") and deepdanbooru:
- run_pip(f"install {deepdanbooru_package}#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
-
if not is_installed("pyngrok") and ngrok:
run_pip("install pyngrok", "ngrok")
os.makedirs(dir_repos, exist_ok=True)
- git_clone(stable_diffusion_repo, repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
+ git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
diff --git a/modules/api/api.py b/modules/api/api.py
index 596a6616..7a567be3 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -5,19 +5,19 @@ import uvicorn
from threading import Lock
from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image
from fastapi import APIRouter, Depends, FastAPI, HTTPException
+from fastapi.security import HTTPBasic, HTTPBasicCredentials
+from secrets import compare_digest
+
import modules.shared as shared
+from modules import sd_samplers, deepbooru
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
-from modules.sd_samplers import all_samplers
from modules.extras import run_extras, run_pnginfo
from PIL import PngImagePlugin
from modules.sd_models import checkpoints_list
from modules.realesrgan_model import get_realesrgan_models
from typing import List
-if shared.cmd_opts.deepdanbooru:
- from modules.deepbooru import get_deepbooru_tags
-
def upscaler_to_index(name: str):
try:
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
@@ -25,8 +25,12 @@ def upscaler_to_index(name: str):
raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}")
-sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None)
+def validate_sampler_name(name):
+ config = sd_samplers.all_samplers_map.get(name, None)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Sampler not found")
+ return name
def setUpscalers(req: dict):
reqDict = vars(req)
@@ -57,39 +61,53 @@ def encode_pil_to_base64(image):
class Api:
def __init__(self, app: FastAPI, queue_lock: Lock):
+ if shared.cmd_opts.api_auth:
+ self.credenticals = dict()
+ for auth in shared.cmd_opts.api_auth.split(","):
+ user, password = auth.split(":")
+ self.credenticals[user] = password
+
self.router = APIRouter()
self.app = app
self.queue_lock = queue_lock
- self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
- self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
- self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
- self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
- self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
- self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
- self.app.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"])
- self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
- self.app.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel)
- self.app.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
- self.app.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel)
- self.app.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem])
- self.app.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem])
- self.app.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem])
- self.app.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem])
- self.app.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem])
- self.app.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem])
- self.app.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem])
- self.app.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str])
- self.app.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem])
+ self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
+ self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
+ self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
+ self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
+ self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
+ self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
+ self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"])
+ self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
+ self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"])
+ self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel)
+ self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
+ self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel)
+ self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem])
+ self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem])
+ self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem])
+ self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem])
+ self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem])
+ self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem])
+ self.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem])
+ self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str])
+ self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem])
+
+ def add_api_route(self, path: str, endpoint, **kwargs):
+ if shared.cmd_opts.api_auth:
+ return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs)
+ return self.app.add_api_route(path, endpoint, **kwargs)
+
+ def auth(self, credenticals: HTTPBasicCredentials = Depends(HTTPBasic())):
+ if credenticals.username in self.credenticals:
+ if compare_digest(credenticals.password, self.credenticals[credenticals.username]):
+ return True
+
+ raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"})
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
- sampler_index = sampler_to_index(txt2imgreq.sampler_index)
-
- if sampler_index is None:
- raise HTTPException(status_code=404, detail="Sampler not found")
-
populate = txt2imgreq.copy(update={ # Override __init__ params
"sd_model": shared.sd_model,
- "sampler_index": sampler_index[0],
+ "sampler_name": validate_sampler_name(txt2imgreq.sampler_index),
"do_not_save_samples": True,
"do_not_save_grid": True
}
@@ -109,12 +127,6 @@ class Api:
return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI):
- sampler_index = sampler_to_index(img2imgreq.sampler_index)
-
- if sampler_index is None:
- raise HTTPException(status_code=404, detail="Sampler not found")
-
-
init_images = img2imgreq.init_images
if init_images is None:
raise HTTPException(status_code=404, detail="Init image not found")
@@ -123,10 +135,9 @@ class Api:
if mask:
mask = decode_base64_to_image(mask)
-
populate = img2imgreq.copy(update={ # Override __init__ params
"sd_model": shared.sd_model,
- "sampler_index": sampler_index[0],
+ "sampler_name": validate_sampler_name(img2imgreq.sampler_index),
"do_not_save_samples": True,
"do_not_save_grid": True,
"mask": mask
@@ -231,10 +242,7 @@ class Api:
if interrogatereq.model == "clip":
processed = shared.interrogator.interrogate(img)
elif interrogatereq.model == "deepdanbooru":
- if shared.cmd_opts.deepdanbooru:
- processed = get_deepbooru_tags(img)
- else:
- raise HTTPException(status_code=404, detail="Model not found. Add --deepdanbooru when launching for using the model.")
+ processed = deepbooru.model.tag(img)
else:
raise HTTPException(status_code=404, detail="Model not found")
@@ -245,6 +253,9 @@ class Api:
return {}
+ def skip(self):
+ shared.state.skip()
+
def get_config(self):
options = {}
for key in shared.opts.data.keys():
@@ -256,14 +267,9 @@ class Api:
return options
- def set_config(self, req: OptionsModel):
- # currently req has all options fields even if you send a dict like { "send_seed": false }, which means it will
- # overwrite all options with default values.
- raise RuntimeError('Setting options via API is not supported')
-
- reqDict = vars(req)
- for o in reqDict:
- setattr(shared.opts, o, reqDict[o])
+ def set_config(self, req: Dict[str, Any]):
+ for k, v in req.items():
+ shared.opts.set(k, v)
shared.opts.save(shared.config_filename)
return
@@ -272,7 +278,7 @@ class Api:
return vars(shared.cmd_opts)
def get_samplers(self):
- return [{"name":sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in all_samplers]
+ return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers]
def get_upscalers(self):
upscalers = []
diff --git a/modules/api/models.py b/modules/api/models.py
index f9cd929e..f77951fc 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -176,9 +176,9 @@ class InterrogateResponse(BaseModel):
caption: str = Field(default=None, title="Caption", description="The generated caption for the image.")
fields = {}
-for key, value in opts.data.items():
- metadata = opts.data_labels.get(key)
- optType = opts.typemap.get(type(value), type(value))
+for key, metadata in opts.data_labels.items():
+ value = opts.data.get(key)
+ optType = opts.typemap.get(type(metadata.default), type(value))
if (metadata is not None):
fields.update({key: (Optional[optType], Field(
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index e6d9fa4f..ab40d842 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -36,6 +36,7 @@ def setup_model(dirname):
from basicsr.utils.download_util import load_file_from_url
from basicsr.utils import imwrite, img2tensor, tensor2img
from facelib.utils.face_restoration_helper import FaceRestoreHelper
+ from facelib.detection.retinaface import retinaface
from modules.shared import cmd_opts
net_class = CodeFormer
@@ -65,6 +66,8 @@ def setup_model(dirname):
net.load_state_dict(checkpoint)
net.eval()
+ if hasattr(retinaface, 'device'):
+ retinaface.device = devices.device_codeformer
face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=devices.device_codeformer)
self.net = net
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 8bbc90a4..b9066d81 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -1,173 +1,97 @@
-import os.path
-from concurrent.futures import ProcessPoolExecutor
-import multiprocessing
-import time
+import os
import re
+import torch
+from PIL import Image
+import numpy as np
+
+from modules import modelloader, paths, deepbooru_model, devices, images, shared
+
re_special = re.compile(r'([\\()])')
-def get_deepbooru_tags(pil_image):
- """
- This method is for running only one image at a time for simple use. Used to the img2img interrogate.
- """
- from modules import shared # prevents circular reference
-
- try:
- create_deepbooru_process(shared.opts.interrogate_deepbooru_score_threshold, create_deepbooru_opts())
- return get_tags_from_process(pil_image)
- finally:
- release_process()
-
-
-OPT_INCLUDE_RANKS = "include_ranks"
-def create_deepbooru_opts():
- from modules import shared
-
- return {
- "use_spaces": shared.opts.deepbooru_use_spaces,
- "use_escape": shared.opts.deepbooru_escape,
- "alpha_sort": shared.opts.deepbooru_sort_alpha,
- OPT_INCLUDE_RANKS: shared.opts.interrogate_return_ranks,
- }
-
-
-def deepbooru_process(queue, deepbooru_process_return, threshold, deepbooru_opts):
- model, tags = get_deepbooru_tags_model()
- while True: # while process is running, keep monitoring queue for new image
- pil_image = queue.get()
- if pil_image == "QUIT":
- break
- else:
- deepbooru_process_return["value"] = get_deepbooru_tags_from_model(model, tags, pil_image, threshold, deepbooru_opts)
-
-
-def create_deepbooru_process(threshold, deepbooru_opts):
- """
- Creates deepbooru process. A queue is created to send images into the process. This enables multiple images