From fe9740d2f5fa057e02529f8a81de21333adf4234 Mon Sep 17 00:00:00 2001 From: judgeou Date: Sun, 23 Oct 2022 20:40:23 +0800 Subject: update deepdanbooru version --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 333f308a..8affd410 100644 --- a/launch.py +++ b/launch.py @@ -111,7 +111,7 @@ def prepare_enviroment(): gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") - deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26") + deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@d91a2963bf87c6a770d74894667e9ffa9f6de7ff") xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl') -- cgit v1.2.3 From 59dfe0845d964868e92572c78a420b6d68c46ea4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 30 Oct 2022 08:22:44 +0300 Subject: launch tests from launch.py with --tests commandline argument --- .gitignore | 2 ++ launch.py | 19 +++++++++++++++++++ run_tests.bat | 15 --------------- test/extras_test.py | 6 +++--- test/img2img_test.py | 12 ++++++------ test/server_poll.py | 28 +++++++++++++++------------- test/txt2img_test.py | 8 +++++--- 7 files changed, 50 insertions(+), 40 deletions(-) delete mode 100644 run_tests.bat (limited to 'launch.py') diff --git a/.gitignore b/.gitignore index 8fa05852..ee53044c 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,5 @@ notification.mp3 /textual_inversion .vscode /extensions +/test/stdout.txt +/test/stderr.txt diff --git a/launch.py b/launch.py index 8affd410..33f98343 100644 --- a/launch.py +++ b/launch.py @@ -128,10 +128,12 @@ def prepare_enviroment(): blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") sys.argv += shlex.split(commandline_args) + test_argv = [x for x in sys.argv if x != '--tests'] sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test') sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers') sys.argv, update_check = extract_arg(sys.argv, '--update-check') + sys.argv, run_tests = extract_arg(sys.argv, '--tests') xformers = '--xformers' in sys.argv deepdanbooru = '--deepdanbooru' in sys.argv ngrok = '--ngrok' in sys.argv @@ -194,6 +196,23 @@ def prepare_enviroment(): print("Exiting because of --exit argument") exit(0) + if run_tests: + tests(test_argv) + exit(0) + + +def tests(argv): + print(f"Launching Web UI in another process for testing with arguments: {' '.join(argv[1:])}") + + with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr: + proc = subprocess.Popen([sys.executable, *argv], stdout=stdout, stderr=stderr) + + import test.server_poll + test.server_poll.run_tests() + + print(f"Stopping Web UI process with id {proc.pid}") + proc.kill() + def start_webui(): print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}") diff --git a/run_tests.bat b/run_tests.bat deleted file mode 100644 index 3a63f034..00000000 --- a/run_tests.bat +++ /dev/null @@ -1,15 +0,0 @@ -@echo off -set ERROR_REPORTING=FALSE -set COMMANDLINE_ARGS= --api -echo Launching SDWebUI... -start "SDWebUITest" webui.bat - -if not defined PYTHON (set PYTHON=python) -if not defined VENV_DIR (set VENV_DIR=venv) -set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe" -%PYTHON% test/server_poll.py -for /f "tokens=2 delims=," %%a in ('tasklist /v /fo csv ^| findstr /i "SDWebUITest"') do set "$PID=%%a" - -taskkill /PID %$PID% >nul 2>&1 - -pause diff --git a/test/extras_test.py b/test/extras_test.py index 2e1764d9..9b8ce0f0 100644 --- a/test/extras_test.py +++ b/test/extras_test.py @@ -1,7 +1,5 @@ import unittest -import requests -from gradio.processing_utils import encode_pil_to_base64 -from PIL import Image + class TestExtrasWorking(unittest.TestCase): def setUp(self): @@ -22,8 +20,10 @@ class TestExtrasWorking(unittest.TestCase): "image": "" } + class TestExtrasCorrectness(unittest.TestCase): pass + if __name__ == "__main__": unittest.main() diff --git a/test/img2img_test.py b/test/img2img_test.py index 61e3e285..012a9580 100644 --- a/test/img2img_test.py +++ b/test/img2img_test.py @@ -3,13 +3,12 @@ import requests from gradio.processing_utils import encode_pil_to_base64 from PIL import Image + class TestImg2ImgWorking(unittest.TestCase): def setUp(self): self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" self.simple_img2img = { - "init_images": [ - encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")) - ], + "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))], "resize_mode": 0, "denoising_strength": 0.75, "mask": None, @@ -19,9 +18,7 @@ class TestImg2ImgWorking(unittest.TestCase): "inpaint_full_res_padding": 0, "inpainting_mask_invert": 0, "prompt": "example prompt", - "styles": [ - "" - ], + "styles": [], "seed": -1, "subseed": -1, "subseed_strength": 0, @@ -45,6 +42,7 @@ class TestImg2ImgWorking(unittest.TestCase): "sampler_index": "Euler a", "include_init_images": False } + def test_img2img_simple_performed(self): self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) @@ -52,8 +50,10 @@ class TestImg2ImgWorking(unittest.TestCase): self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) + class TestImg2ImgCorrectness(unittest.TestCase): pass + if __name__ == "__main__": unittest.main() diff --git a/test/server_poll.py b/test/server_poll.py index 8c0436f8..eeefb7eb 100644 --- a/test/server_poll.py +++ b/test/server_poll.py @@ -2,16 +2,18 @@ import unittest import requests import time -timeout_threshold = 240 -start_time = time.time() -while time.time()-start_time < timeout_threshold: - try: - requests.head("http://localhost:7860/") - break - except requests.exceptions.ConnectionError: - pass -if time.time()-start_time < timeout_threshold: - suite = unittest.TestLoader().discover('', pattern='*_test.py') - result = unittest.TextTestRunner(verbosity=2).run(suite) -else: - print("Launch unsuccessful") + +def run_tests(): + timeout_threshold = 240 + start_time = time.time() + while time.time()-start_time < timeout_threshold: + try: + requests.head("http://localhost:7860/") + break + except requests.exceptions.ConnectionError: + pass + if time.time()-start_time < timeout_threshold: + suite = unittest.TestLoader().discover('', pattern='*_test.py') + result = unittest.TextTestRunner(verbosity=2).run(suite) + else: + print("Launch unsuccessful") diff --git a/test/txt2img_test.py b/test/txt2img_test.py index ad27686a..1936e07e 100644 --- a/test/txt2img_test.py +++ b/test/txt2img_test.py @@ -1,6 +1,7 @@ import unittest import requests + class TestTxt2ImgWorking(unittest.TestCase): def setUp(self): self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" @@ -10,9 +11,7 @@ class TestTxt2ImgWorking(unittest.TestCase): "firstphase_width": 0, "firstphase_height": 0, "prompt": "example prompt", - "styles": [ - "" - ], + "styles": [], "seed": -1, "subseed": -1, "subseed_strength": 0, @@ -34,6 +33,7 @@ class TestTxt2ImgWorking(unittest.TestCase): "s_noise": 1, "sampler_index": "Euler a" } + def test_txt2img_simple_performed(self): self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) @@ -65,8 +65,10 @@ class TestTxt2ImgWorking(unittest.TestCase): self.simple_txt2img["n_iter"] = 2 self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + class TestTxt2ImgCorrectness(unittest.TestCase): pass + if __name__ == "__main__": unittest.main() -- cgit v1.2.3 From 5a6e0cfba675c0f11ade7124cbeec1356c77beb2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 30 Oct 2022 08:28:36 +0300 Subject: always add --api when running tests --- launch.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 33f98343..958336f2 100644 --- a/launch.py +++ b/launch.py @@ -202,6 +202,9 @@ def prepare_enviroment(): def tests(argv): + if "--api" not in argv: + argv.append("--api") + print(f"Launching Web UI in another process for testing with arguments: {' '.join(argv[1:])}") with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr: -- cgit v1.2.3 From d35bf649456da2558cbb6f2ea16fa1606022b7e7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 1 Nov 2022 14:19:24 +0300 Subject: make launch.py run installers for extensions that have ones add some more classes to safety module for an extension --- launch.py | 22 ++++++++++++++++++++-- modules/safe.py | 2 +- 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 958336f2..0d90d553 100644 --- a/launch.py +++ b/launch.py @@ -7,6 +7,7 @@ import shlex import platform dir_repos = "repositories" +dir_extensions = "extensions" python = sys.executable git = os.environ.get('GIT', "git") index_url = os.environ.get('INDEX_URL', "") @@ -101,9 +102,24 @@ def version_check(commit): else: print("Not a git clone, can't perform version check.") except Exception as e: - print("versipm check failed",e) + print("version check failed", e) + + +def run_extensions_installers(): + if not os.path.isdir(dir_extensions): + return + + for dirname_extension in os.listdir(dir_extensions): + path_installer = os.path.join(dir_extensions, dirname_extension, "install.py") + if not os.path.isfile(path_installer): + continue + + try: + print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}")) + except Exception as e: + print(e, file=sys.stderr) + - def prepare_enviroment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") @@ -189,6 +205,8 @@ def prepare_enviroment(): run_pip(f"install -r {requirements_file}", "requirements for Web UI") + run_extensions_installers() + if update_check: version_check(commit) diff --git a/modules/safe.py b/modules/safe.py index 399165a1..348a24fc 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -32,7 +32,7 @@ class RestrictedUnpickler(pickle.Unpickler): return getattr(collections, name) if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']: return getattr(torch._utils, name) - if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage']: + if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage']: return getattr(torch, name) if module == 'torch.nn.modules.container' and name in ['ParameterDict']: return getattr(torch.nn.modules.container, name) -- cgit v1.2.3 From b85e83c3bd869c3f1ffacf8d3ff97bd9d406acff Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 1 Nov 2022 14:48:53 +0300 Subject: add PYTHONPATH for extension's install.py --- launch.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 0d90d553..ff2f74ba 100644 --- a/launch.py +++ b/launch.py @@ -17,11 +17,11 @@ def extract_arg(args, name): return [x for x in args if x != name], name in args -def run(command, desc=None, errdesc=None): +def run(command, desc=None, errdesc=None, custom_env=None): if desc is not None: print(desc) - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) if result.returncode != 0: @@ -115,7 +115,10 @@ def run_extensions_installers(): continue try: - print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}")) + env = os.environ.copy() + env['PYTHONPATH'] = os.path.abspath(".") + + print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env)) except Exception as e: print(e, file=sys.stderr) -- cgit v1.2.3 From fb1374791bf4b4c9b49de5378f29b12fdabcac97 Mon Sep 17 00:00:00 2001 From: Billy Cao Date: Thu, 3 Nov 2022 13:08:11 +0800 Subject: Fix --nowebui argument being ineffective --- launch.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 958336f2..b061bce6 100644 --- a/launch.py +++ b/launch.py @@ -217,12 +217,15 @@ def tests(argv): proc.kill() -def start_webui(): - print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}") +def start(): + print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}") import webui - webui.webui() + if '--nowebui' in sys.argv: + webui.api_only() + else: + webui.webui() if __name__ == "__main__": prepare_enviroment() - start_webui() + start() -- cgit v1.2.3 From c0f7dbda3361daaa3e315e747fe5bebb75ea55d0 Mon Sep 17 00:00:00 2001 From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com> Date: Fri, 4 Nov 2022 23:01:58 +0000 Subject: Update k-diffusion to release 0.0.10 --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 2a51f20e..5fa11560 100644 --- a/launch.py +++ b/launch.py @@ -142,7 +142,7 @@ def prepare_enviroment(): stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc") taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") - k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878") + k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "60e5042ca0da89c14d1dd59d73883280f8fce991") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") -- cgit v1.2.3 From 98947d173e3f1667eba29c904f681047dea9de90 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 12 Nov 2022 11:11:47 +0300 Subject: run installers for newly installed extensions --- launch.py | 26 +++++++++++++++----------- modules/ui_extensions.py | 3 +++ 2 files changed, 18 insertions(+), 11 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 5fa11560..8e65676d 100644 --- a/launch.py +++ b/launch.py @@ -105,22 +105,26 @@ def version_check(commit): print("version check failed", e) +def run_extension_installer(extension_dir): + path_installer = os.path.join(extension_dir, "install.py") + if not os.path.isfile(path_installer): + return + + try: + env = os.environ.copy() + env['PYTHONPATH'] = os.path.abspath(".") + + print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) + except Exception as e: + print(e, file=sys.stderr) + + def run_extensions_installers(): if not os.path.isdir(dir_extensions): return for dirname_extension in os.listdir(dir_extensions): - path_installer = os.path.join(dir_extensions, dirname_extension, "install.py") - if not os.path.isfile(path_installer): - continue - - try: - env = os.environ.copy() - env['PYTHONPATH'] = os.path.abspath(".") - - print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env)) - except Exception as e: - print(e, file=sys.stderr) + run_extension_installer(os.path.join(dir_extensions, dirname_extension)) def prepare_enviroment(): diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 02ab9643..6671cb60 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -134,6 +134,9 @@ def install_extension_from_url(dirname, url): os.rename(tmpdir, target_dir) + import launch + launch.run_extension_installer(target_dir) + extensions.list_extensions() return [extension_table(), html.escape(f"Installed into {target_dir}. Use Installed tab to restart.")] finally: -- cgit v1.2.3 From 007f4f7314eabd9cc3a2b0d11889de49ad3c682a Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Sat, 12 Nov 2022 15:12:15 +0300 Subject: Tests cleaned up --- launch.py | 5 ++++- test/server_poll.py | 7 ++++--- test/test_files/empty.pt | Bin 0 -> 431 bytes test/txt2img_test.py | 4 +++- test/utils_test.py | 18 +++++++++--------- 5 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 test/test_files/empty.pt (limited to 'launch.py') diff --git a/launch.py b/launch.py index 8e65676d..6822a01d 100644 --- a/launch.py +++ b/launch.py @@ -229,6 +229,9 @@ def prepare_enviroment(): def tests(argv): if "--api" not in argv: argv.append("--api") + if "--ckpt" not in argv: + argv.append("--ckpt") + argv.append("./test/test_files/empty.pt") print(f"Launching Web UI in another process for testing with arguments: {' '.join(argv[1:])}") @@ -236,7 +239,7 @@ def tests(argv): proc = subprocess.Popen([sys.executable, *argv], stdout=stdout, stderr=stderr) import test.server_poll - test.server_poll.run_tests() + test.server_poll.run_tests(proc) print(f"Stopping Web UI process with id {proc.pid}") proc.kill() diff --git a/test/server_poll.py b/test/server_poll.py index eeefb7eb..8e63b450 100644 --- a/test/server_poll.py +++ b/test/server_poll.py @@ -3,7 +3,7 @@ import requests import time -def run_tests(): +def run_tests(proc): timeout_threshold = 240 start_time = time.time() while time.time()-start_time < timeout_threshold: @@ -11,8 +11,9 @@ def run_tests(): requests.head("http://localhost:7860/") break except requests.exceptions.ConnectionError: - pass - if time.time()-start_time < timeout_threshold: + if proc.poll() is not None: + break + if proc.poll() is None: suite = unittest.TestLoader().discover('', pattern='*_test.py') result = unittest.TextTestRunner(verbosity=2).run(suite) else: diff --git a/test/test_files/empty.pt b/test/test_files/empty.pt new file mode 100644 index 00000000..c6ac59eb Binary files /dev/null and b/test/test_files/empty.pt differ diff --git a/test/txt2img_test.py b/test/txt2img_test.py index 1936e07e..ce752085 100644 --- a/test/txt2img_test.py +++ b/test/txt2img_test.py @@ -53,13 +53,15 @@ class TestTxt2ImgWorking(unittest.TestCase): self.simple_txt2img["restore_faces"] = True self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - def test_txt2img_with_tiling_faces_performed(self): + def test_txt2img_with_tiling_performed(self): self.simple_txt2img["tiling"] = True self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_with_vanilla_sampler_performed(self): self.simple_txt2img["sampler_index"] = "PLMS" self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + self.simple_txt2img["sampler_index"] = "DDIM" + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_multiple_batches_performed(self): self.simple_txt2img["n_iter"] = 2 diff --git a/test/utils_test.py b/test/utils_test.py index 65d3d177..be9e6bf8 100644 --- a/test/utils_test.py +++ b/test/utils_test.py @@ -18,19 +18,19 @@ class UtilsTests(unittest.TestCase): def test_options_get(self): self.assertEqual(requests.get(self.url_options).status_code, 200) - def test_options_write(self): - response = requests.get(self.url_options) - self.assertEqual(response.status_code, 200) + # def test_options_write(self): + # response = requests.get(self.url_options) + # self.assertEqual(response.status_code, 200) - pre_value = response.json()["send_seed"] + # pre_value = response.json()["send_seed"] - self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200) + # self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200) - response = requests.get(self.url_options) - self.assertEqual(response.status_code, 200) - self.assertEqual(response.json()["send_seed"], not pre_value) + # response = requests.get(self.url_options) + # self.assertEqual(response.status_code, 200) + # self.assertEqual(response.json()["send_seed"], not pre_value) - requests.post(self.url_options, json={"send_seed": pre_value}) + # requests.post(self.url_options, json={"send_seed": pre_value}) def test_cmd_flags(self): self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200) -- cgit v1.2.3 From 084cf043900bb2c5503155ee2a24a52cc1db3eca Mon Sep 17 00:00:00 2001 From: Anas Abou Allaban Date: Sat, 12 Nov 2022 22:41:22 -0500 Subject: Fix env var names --- launch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 8e65676d..0f84b5d1 100644 --- a/launch.py +++ b/launch.py @@ -139,9 +139,9 @@ def prepare_enviroment(): xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl') stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git") - taming_transformers_repo = os.environ.get('TAMING_REANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git") + taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git") k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git') - codeformer_repo = os.environ.get('CODEFORMET_REPO', 'https://github.com/sczhou/CodeFormer.git') + codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git') blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git') stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc") -- cgit v1.2.3 From 93d6c0209ae55632b72751cf82740e32a0cd81bc Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Mon, 14 Nov 2022 13:39:22 +0300 Subject: Tests separated for github-actions CI --- .github/workflows/run_tests.yaml | 26 ++++++++++++ launch.py | 36 ++++++++++------ test/advanced_features/__init__.py | 0 test/advanced_features/extras_test.py | 29 +++++++++++++ test/advanced_features/txt2img_test.py | 47 +++++++++++++++++++++ test/basic_features/__init__.py | 0 test/basic_features/img2img_test.py | 55 ++++++++++++++++++++++++ test/basic_features/txt2img_test.py | 72 ++++++++++++++++++++++++++++++++ test/basic_features/utils_test.py | 67 ++++++++++++++++++++++++++++++ test/extras_test.py | 29 ------------- test/img2img_test.py | 59 -------------------------- test/server_poll.py | 6 ++- test/txt2img_test.py | 76 ---------------------------------- test/utils_test.py | 63 ---------------------------- 14 files changed, 324 insertions(+), 241 deletions(-) create mode 100644 .github/workflows/run_tests.yaml create mode 100644 test/advanced_features/__init__.py create mode 100644 test/advanced_features/extras_test.py create mode 100644 test/advanced_features/txt2img_test.py create mode 100644 test/basic_features/__init__.py create mode 100644 test/basic_features/img2img_test.py create mode 100644 test/basic_features/txt2img_test.py create mode 100644 test/basic_features/utils_test.py delete mode 100644 test/extras_test.py delete mode 100644 test/img2img_test.py delete mode 100644 test/txt2img_test.py delete mode 100644 test/utils_test.py (limited to 'launch.py') diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml new file mode 100644 index 00000000..a56a8110 --- /dev/null +++ b/.github/workflows/run_tests.yaml @@ -0,0 +1,26 @@ +name: Run tests on CPU with empty model + +on: + - push + - pull_request + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: 3.10.6 + - uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + - name: Run tests + run: | + export COMMANDLINE_ARGS="--tests basic_features --no-half --disable-opt-split-attention --use-cpu all" + python launch.py diff --git a/launch.py b/launch.py index 6822a01d..d0f502c2 100644 --- a/launch.py +++ b/launch.py @@ -17,6 +17,19 @@ def extract_arg(args, name): return [x for x in args if x != name], name in args +def extract_opt(args, name): + opt = None + is_present = False + if name in args: + is_present = True + idx = args.index(name) + del args[idx] + if idx < len(args) and args[idx][0] != "-": + opt = args[idx] + del args[idx] + return args, is_present, opt + + def run(command, desc=None, errdesc=None, custom_env=None): if desc is not None: print(desc) @@ -151,12 +164,11 @@ def prepare_enviroment(): blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") sys.argv += shlex.split(commandline_args) - test_argv = [x for x in sys.argv if x != '--tests'] sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test') sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers') sys.argv, update_check = extract_arg(sys.argv, '--update-check') - sys.argv, run_tests = extract_arg(sys.argv, '--tests') + sys.argv, run_tests, test_dir = extract_opt(sys.argv, '--tests') xformers = '--xformers' in sys.argv deepdanbooru = '--deepdanbooru' in sys.argv ngrok = '--ngrok' in sys.argv @@ -222,24 +234,24 @@ def prepare_enviroment(): exit(0) if run_tests: - tests(test_argv) + tests(test_dir) exit(0) -def tests(argv): - if "--api" not in argv: - argv.append("--api") - if "--ckpt" not in argv: - argv.append("--ckpt") - argv.append("./test/test_files/empty.pt") +def tests(test_dir): + if "--api" not in sys.argv: + sys.argv.append("--api") + if "--ckpt" not in sys.argv: + sys.argv.append("--ckpt") + sys.argv.append("./test/test_files/empty.pt") - print(f"Launching Web UI in another process for testing with arguments: {' '.join(argv[1:])}") + print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}") with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr: - proc = subprocess.Popen([sys.executable, *argv], stdout=stdout, stderr=stderr) + proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr) import test.server_poll - test.server_poll.run_tests(proc) + test.server_poll.run_tests(proc, test_dir) print(f"Stopping Web UI process with id {proc.pid}") proc.kill() diff --git a/test/advanced_features/__init__.py b/test/advanced_features/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/advanced_features/extras_test.py b/test/advanced_features/extras_test.py new file mode 100644 index 00000000..8763f8ed --- /dev/null +++ b/test/advanced_features/extras_test.py @@ -0,0 +1,29 @@ +import unittest + + +class TestExtrasWorking(unittest.TestCase): + def setUp(self): + self.url_img2img = "http://localhost:7860/sdapi/v1/extra-single-image" + self.simple_extras = { + "resize_mode": 0, + "show_extras_results": True, + "gfpgan_visibility": 0, + "codeformer_visibility": 0, + "codeformer_weight": 0, + "upscaling_resize": 2, + "upscaling_resize_w": 128, + "upscaling_resize_h": 128, + "upscaling_crop": True, + "upscaler_1": "None", + "upscaler_2": "None", + "extras_upscaler_2_visibility": 0, + "image": "" + } + + +class TestExtrasCorrectness(unittest.TestCase): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/test/advanced_features/txt2img_test.py b/test/advanced_features/txt2img_test.py new file mode 100644 index 00000000..36ed7b9a --- /dev/null +++ b/test/advanced_features/txt2img_test.py @@ -0,0 +1,47 @@ +import unittest +import requests + + +class TestTxt2ImgWorking(unittest.TestCase): + def setUp(self): + self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" + self.simple_txt2img = { + "enable_hr": False, + "denoising_strength": 0, + "firstphase_width": 0, + "firstphase_height": 0, + "prompt": "example prompt", + "styles": [], + "seed": -1, + "subseed": -1, + "subseed_strength": 0, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "batch_size": 1, + "n_iter": 1, + "steps": 3, + "cfg_scale": 7, + "width": 64, + "height": 64, + "restore_faces": False, + "tiling": False, + "negative_prompt": "", + "eta": 0, + "s_churn": 0, + "s_tmax": 0, + "s_tmin": 0, + "s_noise": 1, + "sampler_index": "Euler a" + } + + def test_txt2img_with_restore_faces_performed(self): + self.simple_txt2img["restore_faces"] = True + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + +class TestTxt2ImgCorrectness(unittest.TestCase): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/test/basic_features/__init__.py b/test/basic_features/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/basic_features/img2img_test.py b/test/basic_features/img2img_test.py new file mode 100644 index 00000000..0a9c1e8a --- /dev/null +++ b/test/basic_features/img2img_test.py @@ -0,0 +1,55 @@ +import unittest +import requests +from gradio.processing_utils import encode_pil_to_base64 +from PIL import Image + + +class TestImg2ImgWorking(unittest.TestCase): + def setUp(self): + self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" + self.simple_img2img = { + "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))], + "resize_mode": 0, + "denoising_strength": 0.75, + "mask": None, + "mask_blur": 4, + "inpainting_fill": 0, + "inpaint_full_res": False, + "inpaint_full_res_padding": 0, + "inpainting_mask_invert": 0, + "prompt": "example prompt", + "styles": [], + "seed": -1, + "subseed": -1, + "subseed_strength": 0, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "batch_size": 1, + "n_iter": 1, + "steps": 3, + "cfg_scale": 7, + "width": 64, + "height": 64, + "restore_faces": False, + "tiling": False, + "negative_prompt": "", + "eta": 0, + "s_churn": 0, + "s_tmax": 0, + "s_tmin": 0, + "s_noise": 1, + "override_settings": {}, + "sampler_index": "Euler a", + "include_init_images": False + } + + def test_img2img_simple_performed(self): + self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) + + def test_inpainting_masked_performed(self): + self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) + self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/basic_features/txt2img_test.py b/test/basic_features/txt2img_test.py new file mode 100644 index 00000000..fe4af999 --- /dev/null +++ b/test/basic_features/txt2img_test.py @@ -0,0 +1,72 @@ +import unittest +import requests + + +class TestTxt2ImgWorking(unittest.TestCase): + def setUp(self): + self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" + self.simple_txt2img = { + "enable_hr": False, + "denoising_strength": 0, + "firstphase_width": 0, + "firstphase_height": 0, + "prompt": "example prompt", + "styles": [], + "seed": -1, + "subseed": -1, + "subseed_strength": 0, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "batch_size": 1, + "n_iter": 1, + "steps": 3, + "cfg_scale": 7, + "width": 64, + "height": 64, + "restore_faces": False, + "tiling": False, + "negative_prompt": "", + "eta": 0, + "s_churn": 0, + "s_tmax": 0, + "s_tmin": 0, + "s_noise": 1, + "sampler_index": "Euler a" + } + + def test_txt2img_simple_performed(self): + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + def test_txt2img_with_negative_prompt_performed(self): + self.simple_txt2img["negative_prompt"] = "example negative prompt" + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + def test_txt2img_not_square_image_performed(self): + self.simple_txt2img["height"] = 128 + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + def test_txt2img_with_hrfix_performed(self): + self.simple_txt2img["enable_hr"] = True + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + def test_txt2img_with_restore_faces_performed(self): + self.simple_txt2img["restore_faces"] = True + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + def test_txt2img_with_tiling_performed(self): + self.simple_txt2img["tiling"] = True + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + def test_txt2img_with_vanilla_sampler_performed(self): + self.simple_txt2img["sampler_index"] = "PLMS" + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + self.simple_txt2img["sampler_index"] = "DDIM" + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + def test_txt2img_multiple_batches_performed(self): + self.simple_txt2img["n_iter"] = 2 + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/basic_features/utils_test.py b/test/basic_features/utils_test.py new file mode 100644 index 00000000..9706db8b --- /dev/null +++ b/test/basic_features/utils_test.py @@ -0,0 +1,67 @@ +import unittest +import requests + +class UtilsTests(unittest.TestCase): + def setUp(self): + self.url_options = "http://localhost:7860/sdapi/v1/options" + self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags" + self.url_samplers = "http://localhost:7860/sdapi/v1/samplers" + self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers" + self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models" + self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks" + self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers" + self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models" + self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles" + self.url_artist_categories = "http://localhost:7860/sdapi/v1/artist-categories" + self.url_artists = "http://localhost:7860/sdapi/v1/artists" + + def test_options_get(self): + self.assertEqual(requests.get(self.url_options).status_code, 200) + + # def test_options_write(self): + # response = requests.get(self.url_options) + # self.assertEqual(response.status_code, 200) + + # pre_value = response.json()["send_seed"] + + # self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200) + + # response = requests.get(self.url_options) + # self.assertEqual(response.status_code, 200) + # self.assertEqual(response.json()["send_seed"], not pre_value) + + # requests.post(self.url_options, json={"send_seed": pre_value}) + + def test_cmd_flags(self): + self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200) + + def test_samplers(self): + self.assertEqual(requests.get(self.url_samplers).status_code, 200) + + def test_upscalers(self): + self.assertEqual(requests.get(self.url_upscalers).status_code, 200) + + def test_sd_models(self): + self.assertEqual(requests.get(self.url_sd_models).status_code, 200) + + def test_hypernetworks(self): + self.assertEqual(requests.get(self.url_hypernetworks).status_code, 200) + + def test_face_restorers(self): + self.assertEqual(requests.get(self.url_face_restorers).status_code, 200) + + def test_realesrgan_models(self): + self.assertEqual(requests.get(self.url_realesrgan_models).status_code, 200) + + def test_prompt_styles(self): + self.assertEqual(requests.get(self.url_prompt_styles).status_code, 200) + + def test_artist_categories(self): + self.assertEqual(requests.get(self.url_artist_categories).status_code, 200) + + def test_artists(self): + self.assertEqual(requests.get(self.url_artists).status_code, 200) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/extras_test.py b/test/extras_test.py deleted file mode 100644 index 9b8ce0f0..00000000 --- a/test/extras_test.py +++ /dev/null @@ -1,29 +0,0 @@ -import unittest - - -class TestExtrasWorking(unittest.TestCase): - def setUp(self): - self.url_img2img = "http://localhost:7860/sdapi/v1/extra-single-image" - self.simple_extras = { - "resize_mode": 0, - "show_extras_results": True, - "gfpgan_visibility": 0, - "codeformer_visibility": 0, - "codeformer_weight": 0, - "upscaling_resize": 2, - "upscaling_resize_w": 512, - "upscaling_resize_h": 512, - "upscaling_crop": True, - "upscaler_1": "None", - "upscaler_2": "None", - "extras_upscaler_2_visibility": 0, - "image": "" - } - - -class TestExtrasCorrectness(unittest.TestCase): - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/test/img2img_test.py b/test/img2img_test.py deleted file mode 100644 index 012a9580..00000000 --- a/test/img2img_test.py +++ /dev/null @@ -1,59 +0,0 @@ -import unittest -import requests -from gradio.processing_utils import encode_pil_to_base64 -from PIL import Image - - -class TestImg2ImgWorking(unittest.TestCase): - def setUp(self): - self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" - self.simple_img2img = { - "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))], - "resize_mode": 0, - "denoising_strength": 0.75, - "mask": None, - "mask_blur": 4, - "inpainting_fill": 0, - "inpaint_full_res": False, - "inpaint_full_res_padding": 0, - "inpainting_mask_invert": 0, - "prompt": "example prompt", - "styles": [], - "seed": -1, - "subseed": -1, - "subseed_strength": 0, - "seed_resize_from_h": -1, - "seed_resize_from_w": -1, - "batch_size": 1, - "n_iter": 1, - "steps": 3, - "cfg_scale": 7, - "width": 64, - "height": 64, - "restore_faces": False, - "tiling": False, - "negative_prompt": "", - "eta": 0, - "s_churn": 0, - "s_tmax": 0, - "s_tmin": 0, - "s_noise": 1, - "override_settings": {}, - "sampler_index": "Euler a", - "include_init_images": False - } - - def test_img2img_simple_performed(self): - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - def test_inpainting_masked_performed(self): - self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) - self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) - - -class TestImg2ImgCorrectness(unittest.TestCase): - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/test/server_poll.py b/test/server_poll.py index 8e63b450..c71e906a 100644 --- a/test/server_poll.py +++ b/test/server_poll.py @@ -3,7 +3,7 @@ import requests import time -def run_tests(proc): +def run_tests(proc, test_dir): timeout_threshold = 240 start_time = time.time() while time.time()-start_time < timeout_threshold: @@ -14,7 +14,9 @@ def run_tests(proc): if proc.poll() is not None: break if proc.poll() is None: - suite = unittest.TestLoader().discover('', pattern='*_test.py') + if test_dir is None: + test_dir = "" + suite = unittest.TestLoader().discover(test_dir, pattern="*_test.py", top_level_dir="test") result = unittest.TextTestRunner(verbosity=2).run(suite) else: print("Launch unsuccessful") diff --git a/test/txt2img_test.py b/test/txt2img_test.py deleted file mode 100644 index ce752085..00000000 --- a/test/txt2img_test.py +++ /dev/null @@ -1,76 +0,0 @@ -import unittest -import requests - - -class TestTxt2ImgWorking(unittest.TestCase): - def setUp(self): - self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" - self.simple_txt2img = { - "enable_hr": False, - "denoising_strength": 0, - "firstphase_width": 0, - "firstphase_height": 0, - "prompt": "example prompt", - "styles": [], - "seed": -1, - "subseed": -1, - "subseed_strength": 0, - "seed_resize_from_h": -1, - "seed_resize_from_w": -1, - "batch_size": 1, - "n_iter": 1, - "steps": 3, - "cfg_scale": 7, - "width": 64, - "height": 64, - "restore_faces": False, - "tiling": False, - "negative_prompt": "", - "eta": 0, - "s_churn": 0, - "s_tmax": 0, - "s_tmin": 0, - "s_noise": 1, - "sampler_index": "Euler a" - } - - def test_txt2img_simple_performed(self): - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_negative_prompt_performed(self): - self.simple_txt2img["negative_prompt"] = "example negative prompt" - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_not_square_image_performed(self): - self.simple_txt2img["height"] = 128 - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_hrfix_performed(self): - self.simple_txt2img["enable_hr"] = True - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_restore_faces_performed(self): - self.simple_txt2img["restore_faces"] = True - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_tiling_performed(self): - self.simple_txt2img["tiling"] = True - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_with_vanilla_sampler_performed(self): - self.simple_txt2img["sampler_index"] = "PLMS" - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - self.simple_txt2img["sampler_index"] = "DDIM" - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - def test_txt2img_multiple_batches_performed(self): - self.simple_txt2img["n_iter"] = 2 - self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) - - -class TestTxt2ImgCorrectness(unittest.TestCase): - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/test/utils_test.py b/test/utils_test.py deleted file mode 100644 index be9e6bf8..00000000 --- a/test/utils_test.py +++ /dev/null @@ -1,63 +0,0 @@ -import unittest -import requests - -class UtilsTests(unittest.TestCase): - def setUp(self): - self.url_options = "http://localhost:7860/sdapi/v1/options" - self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags" - self.url_samplers = "http://localhost:7860/sdapi/v1/samplers" - self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers" - self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models" - self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks" - self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers" - self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models" - self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles" - self.url_artist_categories = "http://localhost:7860/sdapi/v1/artist-categories" - self.url_artists = "http://localhost:7860/sdapi/v1/artists" - - def test_options_get(self): - self.assertEqual(requests.get(self.url_options).status_code, 200) - - # def test_options_write(self): - # response = requests.get(self.url_options) - # self.assertEqual(response.status_code, 200) - - # pre_value = response.json()["send_seed"] - - # self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200) - - # response = requests.get(self.url_options) - # self.assertEqual(response.status_code, 200) - # self.assertEqual(response.json()["send_seed"], not pre_value) - - # requests.post(self.url_options, json={"send_seed": pre_value}) - - def test_cmd_flags(self): - self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200) - - def test_samplers(self): - self.assertEqual(requests.get(self.url_samplers).status_code, 200) - - def test_upscalers(self): - self.assertEqual(requests.get(self.url_upscalers).status_code, 200) - - def test_sd_models(self): - self.assertEqual(requests.get(self.url_sd_models).status_code, 200) - - def test_hypernetworks(self): - self.assertEqual(requests.get(self.url_hypernetworks).status_code, 200) - - def test_face_restorers(self): - self.assertEqual(requests.get(self.url_face_restorers).status_code, 200) - - def test_realesrgan_models(self): - self.assertEqual(requests.get(self.url_realesrgan_models).status_code, 200) - - def test_prompt_styles(self): - self.assertEqual(requests.get(self.url_prompt_styles).status_code, 200) - - def test_artist_categories(self): - self.assertEqual(requests.get(self.url_artist_categories).status_code, 200) - - def test_artists(self): - self.assertEqual(requests.get(self.url_artists).status_code, 200) \ No newline at end of file -- cgit v1.2.3 From 0646040667b59526ac8346d53efd14dc0e75b01e Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Mon, 14 Nov 2022 14:36:07 +0300 Subject: Propagate test error and try it without localhost --- launch.py | 7 ++++--- test/advanced_features/extras_test.py | 2 +- test/advanced_features/txt2img_test.py | 2 +- test/basic_features/img2img_test.py | 2 +- test/basic_features/txt2img_test.py | 2 +- test/basic_features/utils_test.py | 22 +++++++++++----------- test/server_poll.py | 4 +++- 7 files changed, 22 insertions(+), 19 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index d0f502c2..8ed1dffc 100644 --- a/launch.py +++ b/launch.py @@ -234,8 +234,8 @@ def prepare_enviroment(): exit(0) if run_tests: - tests(test_dir) - exit(0) + exitcode = tests(test_dir) + exit(exitcode) def tests(test_dir): @@ -251,10 +251,11 @@ def tests(test_dir): proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr) import test.server_poll - test.server_poll.run_tests(proc, test_dir) + exitcode = test.server_poll.run_tests(proc, test_dir) print(f"Stopping Web UI process with id {proc.pid}") proc.kill() + return exitcode def start(): diff --git a/test/advanced_features/extras_test.py b/test/advanced_features/extras_test.py index 8763f8ed..abdd5aa2 100644 --- a/test/advanced_features/extras_test.py +++ b/test/advanced_features/extras_test.py @@ -3,7 +3,7 @@ import unittest class TestExtrasWorking(unittest.TestCase): def setUp(self): - self.url_img2img = "http://localhost:7860/sdapi/v1/extra-single-image" + self.url_img2img = "http://127.0.0.1:7860/sdapi/v1/extra-single-image" self.simple_extras = { "resize_mode": 0, "show_extras_results": True, diff --git a/test/advanced_features/txt2img_test.py b/test/advanced_features/txt2img_test.py index 36ed7b9a..6ab5a242 100644 --- a/test/advanced_features/txt2img_test.py +++ b/test/advanced_features/txt2img_test.py @@ -4,7 +4,7 @@ import requests class TestTxt2ImgWorking(unittest.TestCase): def setUp(self): - self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" + self.url_txt2img = "http://127.0.0.1:7860/sdapi/v1/txt2img" self.simple_txt2img = { "enable_hr": False, "denoising_strength": 0, diff --git a/test/basic_features/img2img_test.py b/test/basic_features/img2img_test.py index 0a9c1e8a..b9a34113 100644 --- a/test/basic_features/img2img_test.py +++ b/test/basic_features/img2img_test.py @@ -6,7 +6,7 @@ from PIL import Image class TestImg2ImgWorking(unittest.TestCase): def setUp(self): - self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" + self.url_img2img = "http://127.0.0.1:7860/sdapi/v1/img2img" self.simple_img2img = { "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))], "resize_mode": 0, diff --git a/test/basic_features/txt2img_test.py b/test/basic_features/txt2img_test.py index fe4af999..0be675d9 100644 --- a/test/basic_features/txt2img_test.py +++ b/test/basic_features/txt2img_test.py @@ -4,7 +4,7 @@ import requests class TestTxt2ImgWorking(unittest.TestCase): def setUp(self): - self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" + self.url_txt2img = "http://127.0.0.1:7860/sdapi/v1/txt2img" self.simple_txt2img = { "enable_hr": False, "denoising_strength": 0, diff --git a/test/basic_features/utils_test.py b/test/basic_features/utils_test.py index 9706db8b..592f7619 100644 --- a/test/basic_features/utils_test.py +++ b/test/basic_features/utils_test.py @@ -3,17 +3,17 @@ import requests class UtilsTests(unittest.TestCase): def setUp(self): - self.url_options = "http://localhost:7860/sdapi/v1/options" - self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags" - self.url_samplers = "http://localhost:7860/sdapi/v1/samplers" - self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers" - self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models" - self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks" - self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers" - self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models" - self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles" - self.url_artist_categories = "http://localhost:7860/sdapi/v1/artist-categories" - self.url_artists = "http://localhost:7860/sdapi/v1/artists" + self.url_options = "http://127.0.0.1:7860/sdapi/v1/options" + self.url_cmd_flags = "http://127.0.0.1:7860/sdapi/v1/cmd-flags" + self.url_samplers = "http://127.0.0.1:7860/sdapi/v1/samplers" + self.url_upscalers = "http://127.0.0.1:7860/sdapi/v1/upscalers" + self.url_sd_models = "http://127.0.0.1:7860/sdapi/v1/sd-models" + self.url_hypernetworks = "http://127.0.0.1:7860/sdapi/v1/hypernetworks" + self.url_face_restorers = "http://127.0.0.1:7860/sdapi/v1/face-restorers" + self.url_realesrgan_models = "http://127.0.0.1:7860/sdapi/v1/realesrgan-models" + self.url_prompt_styles = "http://127.0.0.1:7860/sdapi/v1/prompt-styles" + self.url_artist_categories = "http://127.0.0.1:7860/sdapi/v1/artist-categories" + self.url_artists = "http://127.0.0.1:7860/sdapi/v1/artists" def test_options_get(self): self.assertEqual(requests.get(self.url_options).status_code, 200) diff --git a/test/server_poll.py b/test/server_poll.py index c71e906a..028fd476 100644 --- a/test/server_poll.py +++ b/test/server_poll.py @@ -8,7 +8,7 @@ def run_tests(proc, test_dir): start_time = time.time() while time.time()-start_time < timeout_threshold: try: - requests.head("http://localhost:7860/") + requests.head("http://127.0.0.1:7860/") break except requests.exceptions.ConnectionError: if proc.poll() is not None: @@ -18,5 +18,7 @@ def run_tests(proc, test_dir): test_dir = "" suite = unittest.TestLoader().discover(test_dir, pattern="*_test.py", top_level_dir="test") result = unittest.TextTestRunner(verbosity=2).run(suite) + return len(result.failures) else: print("Launch unsuccessful") + return 1 -- cgit v1.2.3 From 9e4f68acad4697fdf10004eb85d3f6f769c2c70b Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Mon, 14 Nov 2022 18:40:15 +0300 Subject: Stop exporting cl args and upload stdout and stderr as artifacts --- .github/workflows/run_tests.yaml | 11 +++++++++-- launch.py | 2 ++ test/advanced_features/extras_test.py | 2 +- test/advanced_features/txt2img_test.py | 2 +- test/basic_features/img2img_test.py | 2 +- test/basic_features/txt2img_test.py | 2 +- test/basic_features/utils_test.py | 22 +++++++++++----------- test/server_poll.py | 2 +- 8 files changed, 27 insertions(+), 18 deletions(-) (limited to 'launch.py') diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index 223a31b9..558b0c61 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -22,5 +22,12 @@ jobs: ${{ runner.os }}-pip- - name: Run tests run: | - export COMMANDLINE_ARGS="--tests basic_features --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test --port 80" - python launch.py + python launch.py --tests basic_features --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test + - name: Upload main app stdout-stderr + uses: actions/upload-artifact@v3 + if: always() + with: + name: stdout-stderr + path: | + ./test/stdout.txt + ./test/stderr.txt diff --git a/launch.py b/launch.py index 8ed1dffc..ed21d0e6 100644 --- a/launch.py +++ b/launch.py @@ -244,6 +244,8 @@ def tests(test_dir): if "--ckpt" not in sys.argv: sys.argv.append("--ckpt") sys.argv.append("./test/test_files/empty.pt") + if "--skip-torch-cuda-test" not in sys.argv: + sys.argv.append("--skip-torch-cuda-test") print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}") diff --git a/test/advanced_features/extras_test.py b/test/advanced_features/extras_test.py index 4b8ae25a..8763f8ed 100644 --- a/test/advanced_features/extras_test.py +++ b/test/advanced_features/extras_test.py @@ -3,7 +3,7 @@ import unittest class TestExtrasWorking(unittest.TestCase): def setUp(self): - self.url_img2img = "http://localhost:80/sdapi/v1/extra-single-image" + self.url_img2img = "http://localhost:7860/sdapi/v1/extra-single-image" self.simple_extras = { "resize_mode": 0, "show_extras_results": True, diff --git a/test/advanced_features/txt2img_test.py b/test/advanced_features/txt2img_test.py index e6c3531a..36ed7b9a 100644 --- a/test/advanced_features/txt2img_test.py +++ b/test/advanced_features/txt2img_test.py @@ -4,7 +4,7 @@ import requests class TestTxt2ImgWorking(unittest.TestCase): def setUp(self): - self.url_txt2img = "http://localhost:80/sdapi/v1/txt2img" + self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" self.simple_txt2img = { "enable_hr": False, "denoising_strength": 0, diff --git a/test/basic_features/img2img_test.py b/test/basic_features/img2img_test.py index c4c9a90f..0a9c1e8a 100644 --- a/test/basic_features/img2img_test.py +++ b/test/basic_features/img2img_test.py @@ -6,7 +6,7 @@ from PIL import Image class TestImg2ImgWorking(unittest.TestCase): def setUp(self): - self.url_img2img = "http://localhost:80/sdapi/v1/img2img" + self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" self.simple_img2img = { "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))], "resize_mode": 0, diff --git a/test/basic_features/txt2img_test.py b/test/basic_features/txt2img_test.py index 4f1fc77d..fe4af999 100644 --- a/test/basic_features/txt2img_test.py +++ b/test/basic_features/txt2img_test.py @@ -4,7 +4,7 @@ import requests class TestTxt2ImgWorking(unittest.TestCase): def setUp(self): - self.url_txt2img = "http://localhost:80/sdapi/v1/txt2img" + self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" self.simple_txt2img = { "enable_hr": False, "denoising_strength": 0, diff --git a/test/basic_features/utils_test.py b/test/basic_features/utils_test.py index fdb72b9a..9706db8b 100644 --- a/test/basic_features/utils_test.py +++ b/test/basic_features/utils_test.py @@ -3,17 +3,17 @@ import requests class UtilsTests(unittest.TestCase): def setUp(self): - self.url_options = "http://localhost:80/sdapi/v1/options" - self.url_cmd_flags = "http://localhost:80/sdapi/v1/cmd-flags" - self.url_samplers = "http://localhost:80/sdapi/v1/samplers" - self.url_upscalers = "http://localhost:80/sdapi/v1/upscalers" - self.url_sd_models = "http://localhost:80/sdapi/v1/sd-models" - self.url_hypernetworks = "http://localhost:80/sdapi/v1/hypernetworks" - self.url_face_restorers = "http://localhost:80/sdapi/v1/face-restorers" - self.url_realesrgan_models = "http://localhost:80/sdapi/v1/realesrgan-models" - self.url_prompt_styles = "http://localhost:80/sdapi/v1/prompt-styles" - self.url_artist_categories = "http://localhost:80/sdapi/v1/artist-categories" - self.url_artists = "http://localhost:80/sdapi/v1/artists" + self.url_options = "http://localhost:7860/sdapi/v1/options" + self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags" + self.url_samplers = "http://localhost:7860/sdapi/v1/samplers" + self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers" + self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models" + self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks" + self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers" + self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models" + self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles" + self.url_artist_categories = "http://localhost:7860/sdapi/v1/artist-categories" + self.url_artists = "http://localhost:7860/sdapi/v1/artists" def test_options_get(self): self.assertEqual(requests.get(self.url_options).status_code, 200) diff --git a/test/server_poll.py b/test/server_poll.py index e4462b6c..d4df697b 100644 --- a/test/server_poll.py +++ b/test/server_poll.py @@ -8,7 +8,7 @@ def run_tests(proc, test_dir): start_time = time.time() while time.time()-start_time < timeout_threshold: try: - requests.head("http://localhost:80/") + requests.head("http://localhost:7860/") break except requests.exceptions.ConnectionError: if proc.poll() is not None: -- cgit v1.2.3 From c81d440d876dfd2ab3560410f37442ef56fc6632 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 20 Nov 2022 16:39:20 +0300 Subject: moved deepdanbooru to pure pytorch implementation --- README.md | 2 +- launch.py | 5 - modules/api/api.py | 10 +- modules/deepbooru.py | 258 +++++------- modules/deepbooru_model.py | 676 ++++++++++++++++++++++++++++++++ modules/shared.py | 2 +- modules/textual_inversion/preprocess.py | 12 +- modules/ui.py | 7 +- 8 files changed, 777 insertions(+), 195 deletions(-) create mode 100644 modules/deepbooru_model.py (limited to 'launch.py') diff --git a/README.md b/README.md index 33508f31..5f5ab3aa 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - separate prompts using uppercase `AND` - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2` - No token limit for prompts (original stable diffusion lets you use up to 75 tokens) -- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args) +- DeepDanbooru integration, creates danbooru style tags for anime prompts - [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args) - via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI - Generate forever option diff --git a/launch.py b/launch.py index 0f84b5d1..d2f1055c 100644 --- a/launch.py +++ b/launch.py @@ -134,7 +134,6 @@ def prepare_enviroment(): gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") - deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@d91a2963bf87c6a770d74894667e9ffa9f6de7ff") xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl') @@ -158,7 +157,6 @@ def prepare_enviroment(): sys.argv, update_check = extract_arg(sys.argv, '--update-check') sys.argv, run_tests = extract_arg(sys.argv, '--tests') xformers = '--xformers' in sys.argv - deepdanbooru = '--deepdanbooru' in sys.argv ngrok = '--ngrok' in sys.argv try: @@ -193,9 +191,6 @@ def prepare_enviroment(): elif platform.system() == "Linux": run_pip("install xformers", "xformers") - if not is_installed("deepdanbooru") and deepdanbooru: - run_pip(f"install {deepdanbooru_package}#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru") - if not is_installed("pyngrok") and ngrok: run_pip("install pyngrok", "ngrok") diff --git a/modules/api/api.py b/modules/api/api.py index 79b2c818..7a567be3 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -9,7 +9,7 @@ from fastapi.security import HTTPBasic, HTTPBasicCredentials from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers +from modules import sd_samplers, deepbooru from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.extras import run_extras, run_pnginfo @@ -18,9 +18,6 @@ from modules.sd_models import checkpoints_list from modules.realesrgan_model import get_realesrgan_models from typing import List -if shared.cmd_opts.deepdanbooru: - from modules.deepbooru import get_deepbooru_tags - def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) @@ -245,10 +242,7 @@ class Api: if interrogatereq.model == "clip": processed = shared.interrogator.interrogate(img) elif interrogatereq.model == "deepdanbooru": - if shared.cmd_opts.deepdanbooru: - processed = get_deepbooru_tags(img) - else: - raise HTTPException(status_code=404, detail="Model not found. Add --deepdanbooru when launching for using the model.") + processed = deepbooru.model.tag(img) else: raise HTTPException(status_code=404, detail="Model not found") diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 8bbc90a4..b9066d81 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -1,173 +1,97 @@ -import os.path -from concurrent.futures import ProcessPoolExecutor -import multiprocessing -import time +import os import re +import torch +from PIL import Image +import numpy as np + +from modules import modelloader, paths, deepbooru_model, devices, images, shared + re_special = re.compile(r'([\\()])') -def get_deepbooru_tags(pil_image): - """ - This method is for running only one image at a time for simple use. Used to the img2img interrogate. - """ - from modules import shared # prevents circular reference - - try: - create_deepbooru_process(shared.opts.interrogate_deepbooru_score_threshold, create_deepbooru_opts()) - return get_tags_from_process(pil_image) - finally: - release_process() - - -OPT_INCLUDE_RANKS = "include_ranks" -def create_deepbooru_opts(): - from modules import shared - - return { - "use_spaces": shared.opts.deepbooru_use_spaces, - "use_escape": shared.opts.deepbooru_escape, - "alpha_sort": shared.opts.deepbooru_sort_alpha, - OPT_INCLUDE_RANKS: shared.opts.interrogate_return_ranks, - } - - -def deepbooru_process(queue, deepbooru_process_return, threshold, deepbooru_opts): - model, tags = get_deepbooru_tags_model() - while True: # while process is running, keep monitoring queue for new image - pil_image = queue.get() - if pil_image == "QUIT": - break - else: - deepbooru_process_return["value"] = get_deepbooru_tags_from_model(model, tags, pil_image, threshold, deepbooru_opts) - - -def create_deepbooru_process(threshold, deepbooru_opts): - """ - Creates deepbooru process. A queue is created to send images into the process. This enables multiple images - to be processed in a row without reloading the model or creating a new process. To return the data, a shared - dictionary is created to hold the tags created. To wait for tags to be returned, a value of -1 is assigned - to the dictionary and the method adding the image to the queue should wait for this value to be updated with - the tags. - """ - from modules import shared # prevents circular reference - context = multiprocessing.get_context("spawn") - shared.deepbooru_process_manager = context.Manager() - shared.deepbooru_process_queue = shared.deepbooru_process_manager.Queue() - shared.deepbooru_process_return = shared.deepbooru_process_manager.dict() - shared.deepbooru_process_return["value"] = -1 - shared.deepbooru_process = context.Process(target=deepbooru_process, args=(shared.deepbooru_process_queue, shared.deepbooru_process_return, threshold, deepbooru_opts)) - shared.deepbooru_process.start() - - -def get_tags_from_process(image): - from modules import shared - - shared.deepbooru_process_return["value"] = -1 - shared.deepbooru_process_queue.put(image) - while shared.deepbooru_process_return["value"] == -1: - time.sleep(0.2) - caption = shared.deepbooru_process_return["value"] - shared.deepbooru_process_return["value"] = -1 - - return caption - - -def release_process(): - """ - Stops the deepbooru process to return used memory - """ - from modules import shared # prevents circular reference - shared.deepbooru_process_queue.put("QUIT") - shared.deepbooru_process.join() - shared.deepbooru_process_queue = None - shared.deepbooru_process = None - shared.deepbooru_process_return = None - shared.deepbooru_process_manager = None - -def get_deepbooru_tags_model(): - import deepdanbooru as dd - import tensorflow as tf - import numpy as np - this_folder = os.path.dirname(__file__) - model_path = os.path.abspath(os.path.join(this_folder, '..', 'models', 'deepbooru')) - if not os.path.exists(os.path.join(model_path, 'project.json')): - # there is no point importing these every time - import zipfile - from basicsr.utils.download_util import load_file_from_url - load_file_from_url( - r"https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip", - model_path) - with zipfile.ZipFile(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"), "r") as zip_ref: - zip_ref.extractall(model_path) - os.remove(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip")) - - tags = dd.project.load_tags_from_project(model_path) - model = dd.project.load_model_from_project( - model_path, compile_model=False - ) - return model, tags - - -def get_deepbooru_tags_from_model(model, tags, pil_image, threshold, deepbooru_opts): - import deepdanbooru as dd - import tensorflow as tf - import numpy as np - - alpha_sort = deepbooru_opts['alpha_sort'] - use_spaces = deepbooru_opts['use_spaces'] - use_escape = deepbooru_opts['use_escape'] - include_ranks = deepbooru_opts['include_ranks'] - - width = model.input_shape[2] - height = model.input_shape[1] - image = np.array(pil_image) - image = tf.image.resize( - image, - size=(height, width), - method=tf.image.ResizeMethod.AREA, - preserve_aspect_ratio=True, - ) - image = image.numpy() # EagerTensor to np.array - image = dd.image.transform_and_pad_image(image, width, height) - image = image / 255.0 - image_shape = image.shape - image = image.reshape((1, image_shape[0], image_shape[1], image_shape[2])) - - y = model.predict(image)[0] - - result_dict = {} - - for i, tag in enumerate(tags): - result_dict[tag] = y[i] - - unsorted_tags_in_theshold = [] - result_tags_print = [] - for tag in tags: - if result_dict[tag] >= threshold: + +class DeepDanbooru: + def __init__(self): + self.model = None + + def load(self): + if self.model is not None: + return + + files = modelloader.load_models( + model_path=os.path.join(paths.models_path, "torch_deepdanbooru"), + model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt', + ext_filter=".pt", + download_name='model-resnet_custom_v3.pt', + ) + + self.model = deepbooru_model.DeepDanbooruModel() + self.model.load_state_dict(torch.load(files[0], map_location="cpu")) + + self.model.eval() + self.model.to(devices.cpu, devices.dtype) + + def start(self): + self.load() + self.model.to(devices.device) + + def stop(self): + if not shared.opts.interrogate_keep_models_in_memory: + self.model.to(devices.cpu) + devices.torch_gc() + + def tag(self, pil_image): + self.start() + res = self.tag_multi(pil_image) + self.stop() + + return res + + def tag_multi(self, pil_image, force_disable_ranks=False): + threshold = shared.opts.interrogate_deepbooru_score_threshold + use_spaces = shared.opts.deepbooru_use_spaces + use_escape = shared.opts.deepbooru_escape + alpha_sort = shared.opts.deepbooru_sort_alpha + include_ranks = shared.opts.interrogate_return_ranks and not force_disable_ranks + + pic = images.resize_image(2, pil_image.convert("RGB"), 512, 512) + a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255 + + with torch.no_grad(), devices.autocast(): + x = torch.from_numpy(a).cuda() + y = self.model(x)[0].detach().cpu().numpy() + + probability_dict = {} + + for tag, probability in zip(self.model.tags, y): + if probability < threshold: + continue + if tag.startswith("rating:"): continue - unsorted_tags_in_theshold.append((result_dict[tag], tag)) - result_tags_print.append(f'{result_dict[tag]} {tag}') - - # sort tags - result_tags_out = [] - sort_ndx = 0 - if alpha_sort: - sort_ndx = 1 - - # sort by reverse by likelihood and normal for alpha, and format tag text as requested - unsorted_tags_in_theshold.sort(key=lambda y: y[sort_ndx], reverse=(not alpha_sort)) - for weight, tag in unsorted_tags_in_theshold: - tag_outformat = tag - if use_spaces: - tag_outformat = tag_outformat.replace('_', ' ') - if use_escape: - tag_outformat = re.sub(re_special, r'\\\1', tag_outformat) - if include_ranks: - tag_outformat = f"({tag_outformat}:{weight:.3f})" - - result_tags_out.append(tag_outformat) - - print('\n'.join(sorted(result_tags_print, reverse=True))) - - return ', '.join(result_tags_out) + + probability_dict[tag] = probability + + if alpha_sort: + tags = sorted(probability_dict) + else: + tags = [tag for tag, _ in sorted(probability_dict.items(), key=lambda x: -x[1])] + + res = [] + + for tag in tags: + probability = probability_dict[tag] + tag_outformat = tag + if use_spaces: + tag_outformat = tag_outformat.replace('_', ' ') + if use_escape: + tag_outformat = re.sub(re_special, r'\\\1', tag_outformat) + if include_ranks: + tag_outformat = f"({tag_outformat}:{probability:.3f})" + + res.append(tag_outformat) + + return ", ".join(res) + + +model = DeepDanbooru() diff --git a/modules/deepbooru_model.py b/modules/deepbooru_model.py new file mode 100644 index 00000000..edd40c81 --- /dev/null +++ b/modules/deepbooru_model.py @@ -0,0 +1,676 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +# see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more + + +class DeepDanbooruModel(nn.Module): + def __init__(self): + super(DeepDanbooruModel, self).__init__() + + self.tags = [] + + self.n_Conv_0 = nn.Conv2d(kernel_size=(7, 7), in_channels=3, out_channels=64, stride=(2, 2)) + self.n_MaxPool_0 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)) + self.n_Conv_1 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_2 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=64) + self.n_Conv_3 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_4 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_5 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64) + self.n_Conv_6 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_7 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_8 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64) + self.n_Conv_9 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_10 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_11 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=512, stride=(2, 2)) + self.n_Conv_12 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=128) + self.n_Conv_13 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128, stride=(2, 2)) + self.n_Conv_14 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_15 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_16 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_17 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_18 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_19 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_20 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_21 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_22 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_23 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_24 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_25 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_26 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_27 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_28 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_29 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_30 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_31 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_32 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_33 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_34 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_35 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_36 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=1024, stride=(2, 2)) + self.n_Conv_37 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=256) + self.n_Conv_38 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2)) + self.n_Conv_39 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_40 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_41 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_42 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_43 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_44 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_45 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_46 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_47 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_48 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_49 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_50 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_51 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_52 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_53 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_54 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_55 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_56 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_57 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_58 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_59 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_60 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_61 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_62 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_63 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_64 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_65 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_66 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_67 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_68 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_69 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_70 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_71 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_72 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_73 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_74 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_75 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_76 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_77 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_78 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_79 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_80 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_81 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_82 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_83 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_84 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_85 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_86 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_87 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_88 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_89 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_90 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_91 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_92 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_93 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_94 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_95 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_96 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_97 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_98 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2)) + self.n_Conv_99 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_100 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=1024, stride=(2, 2)) + self.n_Conv_101 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_102 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_103 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_104 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_105 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_106 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_107 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_108 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_109 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_110 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_111 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_112 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_113 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_114 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_115 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_116 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_117 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_118 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_119 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_120 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_121 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_122 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_123 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_124 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_125 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_126 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_127 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_128 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_129 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_130 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_131 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_132 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_133 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_134 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_135 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_136 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_137 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_138 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_139 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_140 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_141 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_142 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_143 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_144 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_145 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_146 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_147 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_148 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_149 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_150 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_151 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_152 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_153 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_154 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_155 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_156 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_157 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_158 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=2048, stride=(2, 2)) + self.n_Conv_159 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=512) + self.n_Conv_160 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512, stride=(2, 2)) + self.n_Conv_161 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_162 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512) + self.n_Conv_163 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512) + self.n_Conv_164 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_165 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512) + self.n_Conv_166 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512) + self.n_Conv_167 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_168 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=4096, stride=(2, 2)) + self.n_Conv_169 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=1024) + self.n_Conv_170 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024, stride=(2, 2)) + self.n_Conv_171 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_172 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024) + self.n_Conv_173 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024) + self.n_Conv_174 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_175 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024) + self.n_Conv_176 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024) + self.n_Conv_177 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_178 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=9176, bias=False) + + def forward(self, *inputs): + t_358, = inputs + t_359 = t_358.permute(*[0, 3, 1, 2]) + t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0) + t_360 = self.n_Conv_0(t_359_padded) + t_361 = F.relu(t_360) + t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf')) + t_362 = self.n_MaxPool_0(t_361) + t_363 = self.n_Conv_1(t_362) + t_364 = self.n_Conv_2(t_362) + t_365 = F.relu(t_364) + t_365_padded = F.pad(t_365, [1, 1, 1, 1], value=0) + t_366 = self.n_Conv_3(t_365_padded) + t_367 = F.relu(t_366) + t_368 = self.n_Conv_4(t_367) + t_369 = torch.add(t_368, t_363) + t_370 = F.relu(t_369) + t_371 = self.n_Conv_5(t_370) + t_372 = F.relu(t_371) + t_372_padded = F.pad(t_372, [1, 1, 1, 1], value=0) + t_373 = self.n_Conv_6(t_372_padded) + t_374 = F.relu(t_373) + t_375 = self.n_Conv_7(t_374) + t_376 = torch.add(t_375, t_370) + t_377 = F.relu(t_376) + t_378 = self.n_Conv_8(t_377) + t_379 = F.relu(t_378) + t_379_padded = F.pad(t_379, [1, 1, 1, 1], value=0) + t_380 = self.n_Conv_9(t_379_padded) + t_381 = F.relu(t_380) + t_382 = self.n_Conv_10(t_381) + t_383 = torch.add(t_382, t_377) + t_384 = F.relu(t_383) + t_385 = self.n_Conv_11(t_384) + t_386 = self.n_Conv_12(t_384) + t_387 = F.relu(t_386) + t_387_padded = F.pad(t_387, [0, 1, 0, 1], value=0) + t_388 = self.n_Conv_13(t_387_padded) + t_389 = F.relu(t_388) + t_390 = self.n_Conv_14(t_389) + t_391 = torch.add(t_390, t_385) + t_392 = F.relu(t_391) + t_393 = self.n_Conv_15(t_392) + t_394 = F.relu(t_393) + t_394_padded = F.pad(t_394, [1, 1, 1, 1], value=0) + t_395 = self.n_Conv_16(t_394_padded) + t_396 = F.relu(t_395) + t_397 = self.n_Conv_17(t_396) + t_398 = torch.add(t_397, t_392) + t_399 = F.relu(t_398) + t_400 = self.n_Conv_18(t_399) + t_401 = F.relu(t_400) + t_401_padded = F.pad(t_401, [1, 1, 1, 1], value=0) + t_402 = self.n_Conv_19(t_401_padded) + t_403 = F.relu(t_402) + t_404 = self.n_Conv_20(t_403) + t_405 = torch.add(t_404, t_399) + t_406 = F.relu(t_405) + t_407 = self.n_Conv_21(t_406) + t_408 = F.relu(t_407) + t_408_padded = F.pad(t_408, [1, 1, 1, 1], value=0) + t_409 = self.n_Conv_22(t_408_padded) + t_410 = F.relu(t_409) + t_411 = self.n_Conv_23(t_410) + t_412 = torch.add(t_411, t_406) + t_413 = F.relu(t_412) + t_414 = self.n_Conv_24(t_413) + t_415 = F.relu(t_414) + t_415_padded = F.pad(t_415, [1, 1, 1, 1], value=0) + t_416 = self.n_Conv_25(t_415_padded) + t_417 = F.relu(t_416) + t_418 = self.n_Conv_26(t_417) + t_419 = torch.add(t_418, t_413) + t_420 = F.relu(t_419) + t_421 = self.n_Conv_27(t_420) + t_422 = F.relu(t_421) + t_422_padded = F.pad(t_422, [1, 1, 1, 1], value=0) + t_423 = self.n_Conv_28(t_422_padded) + t_424 = F.relu(t_423) + t_425 = self.n_Conv_29(t_424) + t_426 = torch.add(t_425, t_420) + t_427 = F.relu(t_426) + t_428 = self.n_Conv_30(t_427) + t_429 = F.relu(t_428) + t_429_padded = F.pad(t_429, [1, 1, 1, 1], value=0) + t_430 = self.n_Conv_31(t_429_padded) + t_431 = F.relu(t_430) + t_432 = self.n_Conv_32(t_431) + t_433 = torch.add(t_432, t_427) + t_434 = F.relu(t_433) + t_435 = self.n_Conv_33(t_434) + t_436 = F.relu(t_435) + t_436_padded = F.pad(t_436, [1, 1, 1, 1], value=0) + t_437 = self.n_Conv_34(t_436_padded) + t_438 = F.relu(t_437) + t_439 = self.n_Conv_35(t_438) + t_440 = torch.add(t_439, t_434) + t_441 = F.relu(t_440) + t_442 = self.n_Conv_36(t_441) + t_443 = self.n_Conv_37(t_441) + t_444 = F.relu(t_443) + t_444_padded = F.pad(t_444, [0, 1, 0, 1], value=0) + t_445 = self.n_Conv_38(t_444_padded) + t_446 = F.relu(t_445) + t_447 = self.n_Conv_39(t_446) + t_448 = torch.add(t_447, t_442) + t_449 = F.relu(t_448) + t_450 = self.n_Conv_40(t_449) + t_451 = F.relu(t_450) + t_451_padded = F.pad(t_451, [1, 1, 1, 1], value=0) + t_452 = self.n_Conv_41(t_451_padded) + t_453 = F.relu(t_452) + t_454 = self.n_Conv_42(t_453) + t_455 = torch.add(t_454, t_449) + t_456 = F.relu(t_455) + t_457 = self.n_Conv_43(t_456) + t_458 = F.relu(t_457) + t_458_padded = F.pad(t_458, [1, 1, 1, 1], value=0) + t_459 = self.n_Conv_44(t_458_padded) + t_460 = F.relu(t_459) + t_461 = self.n_Conv_45(t_460) + t_462 = torch.add(t_461, t_456) + t_463 = F.relu(t_462) + t_464 = self.n_Conv_46(t_463) + t_465 = F.relu(t_464) + t_465_padded = F.pad(t_465, [1, 1, 1, 1], value=0) + t_466 = self.n_Conv_47(t_465_padded) + t_467 = F.relu(t_466) + t_468 = self.n_Conv_48(t_467) + t_469 = torch.add(t_468, t_463) + t_470 = F.relu(t_469) + t_471 = self.n_Conv_49(t_470) + t_472 = F.relu(t_471) + t_472_padded = F.pad(t_472, [1, 1, 1, 1], value=0) + t_473 = self.n_Conv_50(t_472_padded) + t_474 = F.relu(t_473) + t_475 = self.n_Conv_51(t_474) + t_476 = torch.add(t_475, t_470) + t_477 = F.relu(t_476) + t_478 = self.n_Conv_52(t_477) + t_479 = F.relu(t_478) + t_479_padded = F.pad(t_479, [1, 1, 1, 1], value=0) + t_480 = self.n_Conv_53(t_479_padded) + t_481 = F.relu(t_480) + t_482 = self.n_Conv_54(t_481) + t_483 = torch.add(t_482, t_477) + t_484 = F.relu(t_483) + t_485 = self.n_Conv_55(t_484) + t_486 = F.relu(t_485) + t_486_padded = F.pad(t_486, [1, 1, 1, 1], value=0) + t_487 = self.n_Conv_56(t_486_padded) + t_488 = F.relu(t_487) + t_489 = self.n_Conv_57(t_488) + t_490 = torch.add(t_489, t_484) + t_491 = F.relu(t_490) + t_492 = self.n_Conv_58(t_491) + t_493 = F.relu(t_492) + t_493_padded = F.pad(t_493, [1, 1, 1, 1], value=0) + t_494 = self.n_Conv_59(t_493_padded) + t_495 = F.relu(t_494) + t_496 = self.n_Conv_60(t_495) + t_497 = torch.add(t_496, t_491) + t_498 = F.relu(t_497) + t_499 = self.n_Conv_61(t_498) + t_500 = F.relu(t_499) + t_500_padded = F.pad(t_500, [1, 1, 1, 1], value=0) + t_501 = self.n_Conv_62(t_500_padded) + t_502 = F.relu(t_501) + t_503 = self.n_Conv_63(t_502) + t_504 = torch.add(t_503, t_498) + t_505 = F.relu(t_504) + t_506 = self.n_Conv_64(t_505) + t_507 = F.relu(t_506) + t_507_padded = F.pad(t_507, [1, 1, 1, 1], value=0) + t_508 = self.n_Conv_65(t_507_padded) + t_509 = F.relu(t_508) + t_510 = self.n_Conv_66(t_509) + t_511 = torch.add(t_510, t_505) + t_512 = F.relu(t_511) + t_513 = self.n_Conv_67(t_512) + t_514 = F.relu(t_513) + t_514_padded = F.pad(t_514, [1, 1, 1, 1], value=0) + t_515 = self.n_Conv_68(t_514_padded) + t_516 = F.relu(t_515) + t_517 = self.n_Conv_69(t_516) + t_518 = torch.add(t_517, t_512) + t_519 = F.relu(t_518) + t_520 = self.n_Conv_70(t_519) + t_521 = F.relu(t_520) + t_521_padded = F.pad(t_521, [1, 1, 1, 1], value=0) + t_522 = self.n_Conv_71(t_521_padded) + t_523 = F.relu(t_522) + t_524 = self.n_Conv_72(t_523) + t_525 = torch.add(t_524, t_519) + t_526 = F.relu(t_525) + t_527 = self.n_Conv_73(t_526) + t_528 = F.relu(t_527) + t_528_padded = F.pad(t_528, [1, 1, 1, 1], value=0) + t_529 = self.n_Conv_74(t_528_padded) + t_530 = F.relu(t_529) + t_531 = self.n_Conv_75(t_530) + t_532 = torch.add(t_531, t_526) + t_533 = F.relu(t_532) + t_534 = self.n_Conv_76(t_533) + t_535 = F.relu(t_534) + t_535_padded = F.pad(t_535, [1, 1, 1, 1], value=0) + t_536 = self.n_Conv_77(t_535_padded) + t_537 = F.relu(t_536) + t_538 = self.n_Conv_78(t_537) + t_539 = torch.add(t_538, t_533) + t_540 = F.relu(t_539) + t_541 = self.n_Conv_79(t_540) + t_542 = F.relu(t_541) + t_542_padded = F.pad(t_542, [1, 1, 1, 1], value=0) + t_543 = self.n_Conv_80(t_542_padded) + t_544 = F.relu(t_543) + t_545 = self.n_Conv_81(t_544) + t_546 = torch.add(t_545, t_540) + t_547 = F.relu(t_546) + t_548 = self.n_Conv_82(t_547) + t_549 = F.relu(t_548) + t_549_padded = F.pad(t_549, [1, 1, 1, 1], value=0) + t_550 = self.n_Conv_83(t_549_padded) + t_551 = F.relu(t_550) + t_552 = self.n_Conv_84(t_551) + t_553 = torch.add(t_552, t_547) + t_554 = F.relu(t_553) + t_555 = self.n_Conv_85(t_554) + t_556 = F.relu(t_555) + t_556_padded = F.pad(t_556, [1, 1, 1, 1], value=0) + t_557 = self.n_Conv_86(t_556_padded) + t_558 = F.relu(t_557) + t_559 = self.n_Conv_87(t_558) + t_560 = torch.add(t_559, t_554) + t_561 = F.relu(t_560) + t_562 = self.n_Conv_88(t_561) + t_563 = F.relu(t_562) + t_563_padded = F.pad(t_563, [1, 1, 1, 1], value=0) + t_564 = self.n_Conv_89(t_563_padded) + t_565 = F.relu(t_564) + t_566 = self.n_Conv_90(t_565) + t_567 = torch.add(t_566, t_561) + t_568 = F.relu(t_567) + t_569 = self.n_Conv_91(t_568) + t_570 = F.relu(t_569) + t_570_padded = F.pad(t_570, [1, 1, 1, 1], value=0) + t_571 = self.n_Conv_92(t_570_padded) + t_572 = F.relu(t_571) + t_573 = self.n_Conv_93(t_572) + t_574 = torch.add(t_573, t_568) + t_575 = F.relu(t_574) + t_576 = self.n_Conv_94(t_575) + t_577 = F.relu(t_576) + t_577_padded = F.pad(t_577, [1, 1, 1, 1], value=0) + t_578 = self.n_Conv_95(t_577_padded) + t_579 = F.relu(t_578) + t_580 = self.n_Conv_96(t_579) + t_581 = torch.add(t_580, t_575) + t_582 = F.relu(t_581) + t_583 = self.n_Conv_97(t_582) + t_584 = F.relu(t_583) + t_584_padded = F.pad(t_584, [0, 1, 0, 1], value=0) + t_585 = self.n_Conv_98(t_584_padded) + t_586 = F.relu(t_585) + t_587 = self.n_Conv_99(t_586) + t_588 = self.n_Conv_100(t_582) + t_589 = torch.add(t_587, t_588) + t_590 = F.relu(t_589) + t_591 = self.n_Conv_101(t_590) + t_592 = F.relu(t_591) + t_592_padded = F.pad(t_592, [1, 1, 1, 1], value=0) + t_593 = self.n_Conv_102(t_592_padded) + t_594 = F.relu(t_593) + t_595 = self.n_Conv_103(t_594) + t_596 = torch.add(t_595, t_590) + t_597 = F.relu(t_596) + t_598 = self.n_Conv_104(t_597) + t_599 = F.relu(t_598) + t_599_padded = F.pad(t_599, [1, 1, 1, 1], value=0) + t_600 = self.n_Conv_105(t_599_padded) + t_601 = F.relu(t_600) + t_602 = self.n_Conv_106(t_601) + t_603 = torch.add(t_602, t_597) + t_604 = F.relu(t_603) + t_605 = self.n_Conv_107(t_604) + t_606 = F.relu(t_605) + t_606_padded = F.pad(t_606, [1, 1, 1, 1], value=0) + t_607 = self.n_Conv_108(t_606_padded) + t_608 = F.relu(t_607) + t_609 = self.n_Conv_109(t_608) + t_610 = torch.add(t_609, t_604) + t_611 = F.relu(t_610) + t_612 = self.n_Conv_110(t_611) + t_613 = F.relu(t_612) + t_613_padded = F.pad(t_613, [1, 1, 1, 1], value=0) + t_614 = self.n_Conv_111(t_613_padded) + t_615 = F.relu(t_614) + t_616 = self.n_Conv_112(t_615) + t_617 = torch.add(t_616, t_611) + t_618 = F.relu(t_617) + t_619 = self.n_Conv_113(t_618) + t_620 = F.relu(t_619) + t_620_padded = F.pad(t_620, [1, 1, 1, 1], value=0) + t_621 = self.n_Conv_114(t_620_padded) + t_622 = F.relu(t_621) + t_623 = self.n_Conv_115(t_622) + t_624 = torch.add(t_623, t_618) + t_625 = F.relu(t_624) + t_626 = self.n_Conv_116(t_625) + t_627 = F.relu(t_626) + t_627_padded = F.pad(t_627, [1, 1, 1, 1], value=0) + t_628 = self.n_Conv_117(t_627_padded) + t_629 = F.relu(t_628) + t_630 = self.n_Conv_118(t_629) + t_631 = torch.add(t_630, t_625) + t_632 = F.relu(t_631) + t_633 = self.n_Conv_119(t_632) + t_634 = F.relu(t_633) + t_634_padded = F.pad(t_634, [1, 1, 1, 1], value=0) + t_635 = self.n_Conv_120(t_634_padded) + t_636 = F.relu(t_635) + t_637 = self.n_Conv_121(t_636) + t_638 = torch.add(t_637, t_632) + t_639 = F.relu(t_638) + t_640 = self.n_Conv_122(t_639) + t_641 = F.relu(t_640) + t_641_padded = F.pad(t_641, [1, 1, 1, 1], value=0) + t_642 = self.n_Conv_123(t_641_padded) + t_643 = F.relu(t_642) + t_644 = self.n_Conv_124(t_643) + t_645 = torch.add(t_644, t_639) + t_646 = F.relu(t_645) + t_647 = self.n_Conv_125(t_646) + t_648 = F.relu(t_647) + t_648_padded = F.pad(t_648, [1, 1, 1, 1], value=0) + t_649 = self.n_Conv_126(t_648_padded) + t_650 = F.relu(t_649) + t_651 = self.n_Conv_127(t_650) + t_652 = torch.add(t_651, t_646) + t_653 = F.relu(t_652) + t_654 = self.n_Conv_128(t_653) + t_655 = F.relu(t_654) + t_655_padded = F.pad(t_655, [1, 1, 1, 1], value=0) + t_656 = self.n_Conv_129(t_655_padded) + t_657 = F.relu(t_656) + t_658 = self.n_Conv_130(t_657) + t_659 = torch.add(t_658, t_653) + t_660 = F.relu(t_659) + t_661 = self.n_Conv_131(t_660) + t_662 = F.relu(t_661) + t_662_padded = F.pad(t_662, [1, 1, 1, 1], value=0) + t_663 = self.n_Conv_132(t_662_padded) + t_664 = F.relu(t_663) + t_665 = self.n_Conv_133(t_664) + t_666 = torch.add(t_665, t_660) + t_667 = F.relu(t_666) + t_668 = self.n_Conv_134(t_667) + t_669 = F.relu(t_668) + t_669_padded = F.pad(t_669, [1, 1, 1, 1], value=0) + t_670 = self.n_Conv_135(t_669_padded) + t_671 = F.relu(t_670) + t_672 = self.n_Conv_136(t_671) + t_673 = torch.add(t_672, t_667) + t_674 = F.relu(t_673) + t_675 = self.n_Conv_137(t_674) + t_676 = F.relu(t_675) + t_676_padded = F.pad(t_676, [1, 1, 1, 1], value=0) + t_677 = self.n_Conv_138(t_676_padded) + t_678 = F.relu(t_677) + t_679 = self.n_Conv_139(t_678) + t_680 = torch.add(t_679, t_674) + t_681 = F.relu(t_680) + t_682 = self.n_Conv_140(t_681) + t_683 = F.relu(t_682) + t_683_padded = F.pad(t_683, [1, 1, 1, 1], value=0) + t_684 = self.n_Conv_141(t_683_padded) + t_685 = F.relu(t_684) + t_686 = self.n_Conv_142(t_685) + t_687 = torch.add(t_686, t_681) + t_688 = F.relu(t_687) + t_689 = self.n_Conv_143(t_688) + t_690 = F.relu(t_689) + t_690_padded = F.pad(t_690, [1, 1, 1, 1], value=0) + t_691 = self.n_Conv_144(t_690_padded) + t_692 = F.relu(t_691) + t_693 = self.n_Conv_145(t_692) + t_694 = torch.add(t_693, t_688) + t_695 = F.relu(t_694) + t_696 = self.n_Conv_146(t_695) + t_697 = F.relu(t_696) + t_697_padded = F.pad(t_697, [1, 1, 1, 1], value=0) + t_698 = self.n_Conv_147(t_697_padded) + t_699 = F.relu(t_698) + t_700 = self.n_Conv_148(t_699) + t_701 = torch.add(t_700, t_695) + t_702 = F.relu(t_701) + t_703 = self.n_Conv_149(t_702) + t_704 = F.relu(t_703) + t_704_padded = F.pad(t_704, [1, 1, 1, 1], value=0) + t_705 = self.n_Conv_150(t_704_padded) + t_706 = F.relu(t_705) + t_707 = self.n_Conv_151(t_706) + t_708 = torch.add(t_707, t_702) + t_709 = F.relu(t_708) + t_710 = self.n_Conv_152(t_709) + t_711 = F.relu(t_710) + t_711_padded = F.pad(t_711, [1, 1, 1, 1], value=0) + t_712 = self.n_Conv_153(t_711_padded) + t_713 = F.relu(t_712) + t_714 = self.n_Conv_154(t_713) + t_715 = torch.add(t_714, t_709) + t_716 = F.relu(t_715) + t_717 = self.n_Conv_155(t_716) + t_718 = F.relu(t_717) + t_718_padded = F.pad(t_718, [1, 1, 1, 1], value=0) + t_719 = self.n_Conv_156(t_718_padded) + t_720 = F.relu(t_719) + t_721 = self.n_Conv_157(t_720) + t_722 = torch.add(t_721, t_716) + t_723 = F.relu(t_722) + t_724 = self.n_Conv_158(t_723) + t_725 = self.n_Conv_159(t_723) + t_726 = F.relu(t_725) + t_726_padded = F.pad(t_726, [0, 1, 0, 1], value=0) + t_727 = self.n_Conv_160(t_726_padded) + t_728 = F.relu(t_727) + t_729 = self.n_Conv_161(t_728) + t_730 = torch.add(t_729, t_724) + t_731 = F.relu(t_730) + t_732 = self.n_Conv_162(t_731) + t_733 = F.relu(t_732) + t_733_padded = F.pad(t_733, [1, 1, 1, 1], value=0) + t_734 = self.n_Conv_163(t_733_padded) + t_735 = F.relu(t_734) + t_736 = self.n_Conv_164(t_735) + t_737 = torch.add(t_736, t_731) + t_738 = F.relu(t_737) + t_739 = self.n_Conv_165(t_738) + t_740 = F.relu(t_739) + t_740_padded = F.pad(t_740, [1, 1, 1, 1], value=0) + t_741 = self.n_Conv_166(t_740_padded) + t_742 = F.relu(t_741) + t_743 = self.n_Conv_167(t_742) + t_744 = torch.add(t_743, t_738) + t_745 = F.relu(t_744) + t_746 = self.n_Conv_168(t_745) + t_747 = self.n_Conv_169(t_745) + t_748 = F.relu(t_747) + t_748_padded = F.pad(t_748, [0, 1, 0, 1], value=0) + t_749 = self.n_Conv_170(t_748_padded) + t_750 = F.relu(t_749) + t_751 = self.n_Conv_171(t_750) + t_752 = torch.add(t_751, t_746) + t_753 = F.relu(t_752) + t_754 = self.n_Conv_172(t_753) + t_755 = F.relu(t_754) + t_755_padded = F.pad(t_755, [1, 1, 1, 1], value=0) + t_756 = self.n_Conv_173(t_755_padded) + t_757 = F.relu(t_756) + t_758 = self.n_Conv_174(t_757) + t_759 = torch.add(t_758, t_753) + t_760 = F.relu(t_759) + t_761 = self.n_Conv_175(t_760) + t_762 = F.relu(t_761) + t_762_padded = F.pad(t_762, [1, 1, 1, 1], value=0) + t_763 = self.n_Conv_176(t_762_padded) + t_764 = F.relu(t_763) + t_765 = self.n_Conv_177(t_764) + t_766 = torch.add(t_765, t_760) + t_767 = F.relu(t_766) + t_768 = self.n_Conv_178(t_767) + t_769 = F.avg_pool2d(t_768, kernel_size=t_768.shape[-2:]) + t_770 = torch.squeeze(t_769, 3) + t_770 = torch.squeeze(t_770, 2) + t_771 = torch.sigmoid(t_770) + return t_771 + + def load_state_dict(self, state_dict, **kwargs): + self.tags = state_dict.get('tags', []) + + super(DeepDanbooruModel, self).load_state_dict({k: v for k, v in state_dict.items() if k != 'tags'}) + diff --git a/modules/shared.py b/modules/shared.py index a4457305..c93ae2a3 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -55,7 +55,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None) parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") -parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator") +parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything") parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.") parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 488aa5b5..56b9b2eb 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -6,12 +6,10 @@ import sys import tqdm import time -from modules import shared, images +from modules import shared, images, deepbooru from modules.paths import models_path from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop -if cmd_opts.deepdanbooru: - import modules.deepbooru as deepbooru def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False): @@ -20,9 +18,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce shared.interrogator.load() if process_caption_deepbooru: - db_opts = deepbooru.create_deepbooru_opts() - db_opts[deepbooru.OPT_INCLUDE_RANKS] = False - deepbooru.create_deepbooru_process(opts.interrogate_deepbooru_score_threshold, db_opts) + deepbooru.model.start() preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug) @@ -32,7 +28,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce shared.interrogator.send_blip_to_ram() if process_caption_deepbooru: - deepbooru.release_process() + deepbooru.model.stop() def listfiles(dirname): @@ -58,7 +54,7 @@ def save_pic_with_caption(image, index, params: PreprocessParams, existing_capti if params.process_caption_deepbooru: if len(caption) > 0: caption += ", " - caption += deepbooru.get_tags_from_process(image) + caption += deepbooru.model.tag_multi(image) filename_part = params.src filename_part = os.path.splitext(filename_part)[0] diff --git a/modules/ui.py b/modules/ui.py index a5953fce..e6da1b2a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -19,14 +19,11 @@ import numpy as np from PIL import Image, PngImagePlugin -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru from modules.paths import script_path from modules.shared import opts, cmd_opts, restricted_opts -if cmd_opts.deepdanbooru: - from modules.deepbooru import get_deepbooru_tags - import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste import modules.gfpgan_model @@ -352,7 +349,7 @@ def interrogate(image): def interrogate_deepbooru(image): - prompt = get_deepbooru_tags(image) + prompt = deepbooru.model.tag(image) return gr_show(True) if prompt is None else prompt -- cgit v1.2.3 From ce6911158b5b2f9cf79b405a1f368f875492044d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 26 Nov 2022 16:10:46 +0300 Subject: Add support Stable Diffusion 2.0 --- README.md | 21 +- launch.py | 12 +- modules/paths.py | 2 +- modules/sd_hijack.py | 297 +++--------------------- modules/sd_hijack_clip.py | 301 +++++++++++++++++++++++++ modules/sd_hijack_inpainting.py | 20 +- modules/sd_hijack_open_clip.py | 37 +++ modules/sd_samplers.py | 14 +- modules/shared.py | 34 ++- modules/textual_inversion/textual_inversion.py | 7 +- modules/ui.py | 13 +- requirements.txt | 1 + requirements_versions.txt | 1 + v1-inference.yaml | 70 ++++++ webui.py | 5 +- 15 files changed, 504 insertions(+), 331 deletions(-) create mode 100644 modules/sd_hijack_clip.py create mode 100644 modules/sd_hijack_open_clip.py create mode 100644 v1-inference.yaml (limited to 'launch.py') diff --git a/README.md b/README.md index 5f5ab3aa..8a4ffade 100644 --- a/README.md +++ b/README.md @@ -84,26 +84,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - API - Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML. - via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) - -## Where are Aesthetic Gradients?!?! -Aesthetic Gradients are now an extension. You can install it using git: - -```commandline -git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients extensions/aesthetic-gradients -``` - -After running this command, make sure that you have `aesthetic-gradients` dir in webui's `extensions` directory and restart -the UI. The interface for Aesthetic Gradients should appear exactly the same as it was. - -## Where is History/Image browser?!?! -Image browser is now an extension. You can install it using git: - -```commandline -git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser extensions/images-browser -``` - -After running this command, make sure that you have `images-browser` dir in webui's `extensions` directory and restart -the UI. The interface for Image browser should appear exactly the same as it was. +- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions ## Installation and Running Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. diff --git a/launch.py b/launch.py index d2f1055c..b1626cb5 100644 --- a/launch.py +++ b/launch.py @@ -134,18 +134,19 @@ def prepare_enviroment(): gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") + openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl') - stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git") + stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git") taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git") k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git') codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git') blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git') - stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc") + stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "47b6b607fdd31875c9279cd2f4f16b92e4ea958e") taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") - k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "60e5042ca0da89c14d1dd59d73883280f8fce991") + k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") @@ -179,6 +180,9 @@ def prepare_enviroment(): if not is_installed("clip"): run_pip(f"install {clip_package}", "clip") + if not is_installed("open_clip"): + run_pip(f"install {openclip_package}", "open_clip") + if (not is_installed("xformers") or reinstall_xformers) and xformers: if platform.system() == "Windows": if platform.python_version().startswith("3.10"): @@ -196,7 +200,7 @@ def prepare_enviroment(): os.makedirs(dir_repos, exist_ok=True) - git_clone(stable_diffusion_repo, repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash) + git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) diff --git a/modules/paths.py b/modules/paths.py index 1e7a2fbc..4dd03a35 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -9,7 +9,7 @@ sys.path.insert(0, script_path) # search for directory of stable diffusion in following places sd_path = None -possible_sd_paths = [os.path.join(script_path, 'repositories/stable-diffusion'), '.', os.path.dirname(script_path)] +possible_sd_paths = [os.path.join(script_path, 'repositories/stable-diffusion-stability-ai'), '.', os.path.dirname(script_path)] for possible_sd_path in possible_sd_paths: if os.path.exists(os.path.join(possible_sd_path, 'ldm/models/diffusion/ddpm.py')): sd_path = os.path.abspath(possible_sd_path) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index eaedac13..d5243fd3 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -9,18 +9,29 @@ from torch.nn.functional import silu import modules.textual_inversion.textual_inversion from modules import prompt_parser, devices, sd_hijack_optimizations, shared -from modules.shared import opts, device, cmd_opts +from modules.shared import cmd_opts +from modules import sd_hijack_clip, sd_hijack_open_clip + from modules.sd_hijack_optimizations import invokeAI_mps_available import ldm.modules.attention import ldm.modules.diffusionmodules.model import ldm.models.diffusion.ddim import ldm.models.diffusion.plms +import ldm.modules.encoders.modules attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward +# new memory efficient cross attention blocks do not support hypernets and we already +# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention +ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention +ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention + +# silence new console spam from SD2 +ldm.modules.attention.print = lambda *args: None +ldm.modules.diffusionmodules.model.print = lambda *args: None def apply_optimizations(): undo_optimizations() @@ -49,16 +60,11 @@ def apply_optimizations(): def undo_optimizations(): - from modules.hypernetworks import hypernetwork - - ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward + ldm.modules.attention.CrossAttention.forward = attention_CrossAttention_forward # this stops hypernets from working ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward -def get_target_prompt_token_count(token_count): - return math.ceil(max(token_count, 1) / 75) * 75 - class StableDiffusionModelHijack: fixes = None @@ -70,10 +76,13 @@ class StableDiffusionModelHijack: embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir) def hijack(self, m): - model_embeddings = m.cond_stage_model.transformer.text_model.embeddings - - model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) - m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + if type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: + model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) + m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder: + m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) + m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) self.clip = m.cond_stage_model @@ -89,12 +98,15 @@ class StableDiffusionModelHijack: self.layers = flatten(m) def undo_hijack(self, m): - if type(m.cond_stage_model) == FrozenCLIPEmbedderWithCustomWords: + if type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped - model_embeddings = m.cond_stage_model.transformer.text_model.embeddings - if type(model_embeddings.token_embedding) == EmbeddingsWithFixes: - model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped + model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + if type(model_embeddings.token_embedding) == EmbeddingsWithFixes: + model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped + elif type(m.cond_stage_model) == sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords: + m.cond_stage_model.wrapped.model.token_embedding = m.cond_stage_model.wrapped.model.token_embedding.wrapped + m.cond_stage_model = m.cond_stage_model.wrapped self.apply_circular(False) self.layers = None @@ -114,261 +126,8 @@ class StableDiffusionModelHijack: def tokenize(self, text): _, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text]) - return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count) - - -class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): - def __init__(self, wrapped, hijack): - super().__init__() - self.wrapped = wrapped - self.hijack: StableDiffusionModelHijack = hijack - self.tokenizer = wrapped.tokenizer - self.token_mults = {} - - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] - - tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] - for text, ident in tokens_with_parens: - mult = 1.0 - for c in text: - if c == '[': - mult /= 1.1 - if c == ']': - mult *= 1.1 - if c == '(': - mult *= 1.1 - if c == ')': - mult /= 1.1 - - if mult != 1.0: - self.token_mults[ident] = mult - - def tokenize_line(self, line, used_custom_terms, hijack_comments): - id_end = self.wrapped.tokenizer.eos_token_id - - if opts.enable_emphasis: - parsed = prompt_parser.parse_prompt_attention(line) - else: - parsed = [[line, 1.0]] - - tokenized = self.wrapped.tokenizer([text for text, _ in parsed], truncation=False, add_special_tokens=False)["input_ids"] - - fixes = [] - remade_tokens = [] - multipliers = [] - last_comma = -1 - - for tokens, (text, weight) in zip(tokenized, parsed): - i = 0 - while i < len(tokens): - token = tokens[i] - - embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) - - if token == self.comma_token: - last_comma = len(remade_tokens) - elif opts.comma_padding_backtrack != 0 and max(len(remade_tokens), 1) % 75 == 0 and last_comma != -1 and len(remade_tokens) - last_comma <= opts.comma_padding_backtrack: - last_comma += 1 - reloc_tokens = remade_tokens[last_comma:] - reloc_mults = multipliers[last_comma:] - - remade_tokens = remade_tokens[:last_comma] - length = len(remade_tokens) - - rem = int(math.ceil(length / 75)) * 75 - length - remade_tokens += [id_end] * rem + reloc_tokens - multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults - - if embedding is None: - remade_tokens.append(token) - multipliers.append(weight) - i += 1 - else: - emb_len = int(embedding.vec.shape[0]) - iteration = len(remade_tokens) // 75 - if (len(remade_tokens) + emb_len) // 75 != iteration: - rem = (75 * (iteration + 1) - len(remade_tokens)) - remade_tokens += [id_end] * rem - multipliers += [1.0] * rem - iteration += 1 - fixes.append((iteration, (len(remade_tokens) % 75, embedding))) - remade_tokens += [0] * emb_len - multipliers += [weight] * emb_len - used_custom_terms.append((embedding.name, embedding.checksum())) - i += embedding_length_in_tokens - - token_count = len(remade_tokens) - prompt_target_length = get_target_prompt_token_count(token_count) - tokens_to_add = prompt_target_length - len(remade_tokens) - - remade_tokens = remade_tokens + [id_end] * tokens_to_add - multipliers = multipliers + [1.0] * tokens_to_add - - return remade_tokens, fixes, multipliers, token_count - - def process_text(self, texts): - used_custom_terms = [] - remade_batch_tokens = [] - hijack_comments = [] - hijack_fixes = [] - token_count = 0 - - cache = {} - batch_multipliers = [] - for line in texts: - if line in cache: - remade_tokens, fixes, multipliers = cache[line] - else: - remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments) - token_count = max(current_token_count, token_count) - - cache[line] = (remade_tokens, fixes, multipliers) - - remade_batch_tokens.append(remade_tokens) - hijack_fixes.append(fixes) - batch_multipliers.append(multipliers) - - return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count - - def process_text_old(self, text): - id_start = self.wrapped.tokenizer.bos_token_id - id_end = self.wrapped.tokenizer.eos_token_id - maxlen = self.wrapped.max_length # you get to stay at 77 - used_custom_terms = [] - remade_batch_tokens = [] - overflowing_words = [] - hijack_comments = [] - hijack_fixes = [] - token_count = 0 - - cache = {} - batch_tokens = self.wrapped.tokenizer(text, truncation=False, add_special_tokens=False)["input_ids"] - batch_multipliers = [] - for tokens in batch_tokens: - tuple_tokens = tuple(tokens) - - if tuple_tokens in cache: - remade_tokens, fixes, multipliers = cache[tuple_tokens] - else: - fixes = [] - remade_tokens = [] - multipliers = [] - mult = 1.0 - - i = 0 - while i < len(tokens): - token = tokens[i] - - embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) - - mult_change = self.token_mults.get(token) if opts.enable_emphasis else None - if mult_change is not None: - mult *= mult_change - i += 1 - elif embedding is None: - remade_tokens.append(token) - multipliers.append(mult) - i += 1 - else: - emb_len = int(embedding.vec.shape[0]) - fixes.append((len(remade_tokens), embedding)) - remade_tokens += [0] * emb_len - multipliers += [mult] * emb_len - used_custom_terms.append((embedding.name, embedding.checksum())) - i += embedding_length_in_tokens - - if len(remade_tokens) > maxlen - 2: - vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} - ovf = remade_tokens[maxlen - 2:] - overflowing_words = [vocab.get(int(x), "") for x in ovf] - overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) - hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") - - token_count = len(remade_tokens) - remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) - remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] - cache[tuple_tokens] = (remade_tokens, fixes, multipliers) - - multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) - multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] - - remade_batch_tokens.append(remade_tokens) - hijack_fixes.append(fixes) - batch_multipliers.append(multipliers) - return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count - - def forward(self, text): - use_old = opts.use_old_emphasis_implementation - if use_old: - batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) - else: - batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text) - - self.hijack.comments += hijack_comments - - if len(used_custom_terms) > 0: - self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) - - if use_old: - self.hijack.fixes = hijack_fixes - return self.process_tokens(remade_batch_tokens, batch_multipliers) - - z = None - i = 0 - while max(map(len, remade_batch_tokens)) != 0: - rem_tokens = [x[75:] for x in remade_batch_tokens] - rem_multipliers = [x[75:] for x in batch_multipliers] - - self.hijack.fixes = [] - for unfiltered in hijack_fixes: - fixes = [] - for fix in unfiltered: - if fix[0] == i: - fixes.append(fix[1]) - self.hijack.fixes.append(fixes) - - tokens = [] - multipliers = [] - for j in range(len(remade_batch_tokens)): - if len(remade_batch_tokens[j]) > 0: - tokens.append(remade_batch_tokens[j][:75]) - multipliers.append(batch_multipliers[j][:75]) - else: - tokens.append([self.wrapped.tokenizer.eos_token_id] * 75) - multipliers.append([1.0] * 75) - - z1 = self.process_tokens(tokens, multipliers) - z = z1 if z is None else torch.cat((z, z1), axis=-2) - - remade_batch_tokens = rem_tokens - batch_multipliers = rem_multipliers - i += 1 - - return z - - def process_tokens(self, remade_batch_tokens, batch_multipliers): - if not opts.use_old_emphasis_implementation: - remade_batch_tokens = [[self.wrapped.tokenizer.bos_token_id] + x[:75] + [self.wrapped.tokenizer.eos_token_id] for x in remade_batch_tokens] - batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers] - - tokens = torch.asarray(remade_batch_tokens).to(device) - outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) - - if opts.CLIP_stop_at_last_layers > 1: - z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] - z = self.wrapped.transformer.text_model.final_layer_norm(z) - else: - z = outputs.last_hidden_state - - # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers] - batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device) - original_mean = z.mean() - z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) - new_mean = z.mean() - z *= original_mean / new_mean + return remade_batch_tokens[0], token_count, sd_hijack_clip.get_target_prompt_token_count(token_count) - return z class EmbeddingsWithFixes(torch.nn.Module): diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py new file mode 100644 index 00000000..b451d1cf --- /dev/null +++ b/modules/sd_hijack_clip.py @@ -0,0 +1,301 @@ +import math + +import torch + +from modules import prompt_parser, devices +from modules.shared import opts + + +def get_target_prompt_token_count(token_count): + return math.ceil(max(token_count, 1) / 75) * 75 + + +class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): + def __init__(self, wrapped, hijack): + super().__init__() + self.wrapped = wrapped + self.hijack = hijack + + def tokenize(self, texts): + raise NotImplementedError + + def encode_with_transformers(self, tokens): + raise NotImplementedError + + def encode_embedding_init_text(self, init_text, nvpt): + raise NotImplementedError + + def tokenize_line(self, line, used_custom_terms, hijack_comments): + if opts.enable_emphasis: + parsed = prompt_parser.parse_prompt_attention(line) + else: + parsed = [[line, 1.0]] + + tokenized = self.tokenize([text for text, _ in parsed]) + + fixes = [] + remade_tokens = [] + multipliers = [] + last_comma = -1 + + for tokens, (text, weight) in zip(tokenized, parsed): + i = 0 + while i < len(tokens): + token = tokens[i] + + embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) + + if token == self.comma_token: + last_comma = len(remade_tokens) + elif opts.comma_padding_backtrack != 0 and max(len(remade_tokens), 1) % 75 == 0 and last_comma != -1 and len(remade_tokens) - last_comma <= opts.comma_padding_backtrack: + last_comma += 1 + reloc_tokens = remade_tokens[last_comma:] + reloc_mults = multipliers[last_comma:] + + remade_tokens = remade_tokens[:last_comma] + length = len(remade_tokens) + + rem = int(math.ceil(length / 75)) * 75 - length + remade_tokens += [self.id_end] * rem + reloc_tokens + multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults + + if embedding is None: + remade_tokens.append(token) + multipliers.append(weight) + i += 1 + else: + emb_len = int(embedding.vec.shape[0]) + iteration = len(remade_tokens) // 75 + if (len(remade_tokens) + emb_len) // 75 != iteration: + rem = (75 * (iteration + 1) - len(remade_tokens)) + remade_tokens += [self.id_end] * rem + multipliers += [1.0] * rem + iteration += 1 + fixes.append((iteration, (len(remade_tokens) % 75, embedding))) + remade_tokens += [0] * emb_len + multipliers += [weight] * emb_len + used_custom_terms.append((embedding.name, embedding.checksum())) + i += embedding_length_in_tokens + + token_count = len(remade_tokens) + prompt_target_length = get_target_prompt_token_count(token_count) + tokens_to_add = prompt_target_length - len(remade_tokens) + + remade_tokens = remade_tokens + [self.id_end] * tokens_to_add + multipliers = multipliers + [1.0] * tokens_to_add + + return remade_tokens, fixes, multipliers, token_count + + def process_text(self, texts): + used_custom_terms = [] + remade_batch_tokens = [] + hijack_comments = [] + hijack_fixes = [] + token_count = 0 + + cache = {} + batch_multipliers = [] + for line in texts: + if line in cache: + remade_tokens, fixes, multipliers = cache[line] + else: + remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments) + token_count = max(current_token_count, token_count) + + cache[line] = (remade_tokens, fixes, multipliers) + + remade_batch_tokens.append(remade_tokens) + hijack_fixes.append(fixes) + batch_multipliers.append(multipliers) + + return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count + + def process_text_old(self, texts): + id_start = self.id_start + id_end = self.id_end + maxlen = self.wrapped.max_length # you get to stay at 77 + used_custom_terms = [] + remade_batch_tokens = [] + hijack_comments = [] + hijack_fixes = [] + token_count = 0 + + cache = {} + batch_tokens = self.tokenize(texts) + batch_multipliers = [] + for tokens in batch_tokens: + tuple_tokens = tuple(tokens) + + if tuple_tokens in cache: + remade_tokens, fixes, multipliers = cache[tuple_tokens] + else: + fixes = [] + remade_tokens = [] + multipliers = [] + mult = 1.0 + + i = 0 + while i < len(tokens): + token = tokens[i] + + embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) + + mult_change = self.token_mults.get(token) if opts.enable_emphasis else None + if mult_change is not None: + mult *= mult_change + i += 1 + elif embedding is None: + remade_tokens.append(token) + multipliers.append(mult) + i += 1 + else: + emb_len = int(embedding.vec.shape[0]) + fixes.append((len(remade_tokens), embedding)) + remade_tokens += [0] * emb_len + multipliers += [mult] * emb_len + used_custom_terms.append((embedding.name, embedding.checksum())) + i += embedding_length_in_tokens + + if len(remade_tokens) > maxlen - 2: + vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} + ovf = remade_tokens[maxlen - 2:] + overflowing_words = [vocab.get(int(x), "") for x in ovf] + overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) + hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") + + token_count = len(remade_tokens) + remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) + remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] + cache[tuple_tokens] = (remade_tokens, fixes, multipliers) + + multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) + multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] + + remade_batch_tokens.append(remade_tokens) + hijack_fixes.append(fixes) + batch_multipliers.append(multipliers) + return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count + + def forward(self, text): + use_old = opts.use_old_emphasis_implementation + if use_old: + batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) + else: + batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text) + + self.hijack.comments += hijack_comments + + if len(used_custom_terms) > 0: + self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) + + if use_old: + self.hijack.fixes = hijack_fixes + return self.process_tokens(remade_batch_tokens, batch_multipliers) + + z = None + i = 0 + while max(map(len, remade_batch_tokens)) != 0: + rem_tokens = [x[75:] for x in remade_batch_tokens] + rem_multipliers = [x[75:] for x in batch_multipliers] + + self.hijack.fixes = [] + for unfiltered in hijack_fixes: + fixes = [] + for fix in unfiltered: + if fix[0] == i: + fixes.append(fix[1]) + self.hijack.fixes.append(fixes) + + tokens = [] + multipliers = [] + for j in range(len(remade_batch_tokens)): + if len(remade_batch_tokens[j]) > 0: + tokens.append(remade_batch_tokens[j][:75]) + multipliers.append(batch_multipliers[j][:75]) + else: + tokens.append([self.id_end] * 75) + multipliers.append([1.0] * 75) + + z1 = self.process_tokens(tokens, multipliers) + z = z1 if z is None else torch.cat((z, z1), axis=-2) + + remade_batch_tokens = rem_tokens + batch_multipliers = rem_multipliers + i += 1 + + return z + + def process_tokens(self, remade_batch_tokens, batch_multipliers): + if not opts.use_old_emphasis_implementation: + remade_batch_tokens = [[self.id_start] + x[:75] + [self.id_end] for x in remade_batch_tokens] + batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers] + + tokens = torch.asarray(remade_batch_tokens).to(devices.device) + + if self.id_end != self.id_pad: + for batch_pos in range(len(remade_batch_tokens)): + index = remade_batch_tokens[batch_pos].index(self.id_end) + tokens[batch_pos, index+1:tokens.shape[1]] = self.id_pad + + z = self.encode_with_transformers(tokens) + + # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise + batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers] + batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(devices.device) + original_mean = z.mean() + z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) + new_mean = z.mean() + z *= original_mean / new_mean + + return z + + +class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): + def __init__(self, wrapped, hijack): + super().__init__(wrapped, hijack) + self.tokenizer = wrapped.tokenizer + self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] + + self.token_mults = {} + tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] + for text, ident in tokens_with_parens: + mult = 1.0 + for c in text: + if c == '[': + mult /= 1.1 + if c == ']': + mult *= 1.1 + if c == '(': + mult *= 1.1 + if c == ')': + mult /= 1.1 + + if mult != 1.0: + self.token_mults[ident] = mult + + self.id_start = self.wrapped.tokenizer.bos_token_id + self.id_end = self.wrapped.tokenizer.eos_token_id + self.id_pad = self.id_end + + def tokenize(self, texts): + tokenized = self.wrapped.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"] + + return tokenized + + def encode_with_transformers(self, tokens): + outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) + + if opts.CLIP_stop_at_last_layers > 1: + z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] + z = self.wrapped.transformer.text_model.final_layer_norm(z) + else: + z = outputs.last_hidden_state + + return z + + def encode_embedding_init_text(self, init_text, nvpt): + embedding_layer = self.wrapped.transformer.text_model.embeddings + ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] + embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) + + return embedded diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 46714a4f..938f9a58 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -199,8 +199,8 @@ def sample_plms(self, @torch.no_grad() def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, dynamic_threshold=None): b, *_, device = *x.shape, x.device def get_model_output(x, t): @@ -249,6 +249,8 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + if dynamic_threshold is not None: + pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature @@ -321,12 +323,16 @@ def should_hijack_inpainting(checkpoint_info): def do_inpainting_hijack(): - ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning + # most of this stuff seems to no longer be needed because it is already included into SD2.0 + # LatentInpaintDiffusion remains because SD2.0's LatentInpaintDiffusion can't be loaded without specifying a checkpoint + # p_sample_plms is needed because PLMS can't work with dicts as conditionings + # this file should be cleaned up later if weverything tuens out to work fine + + # ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning ldm.models.diffusion.ddpm.LatentInpaintDiffusion = LatentInpaintDiffusion - ldm.models.diffusion.ddim.DDIMSampler.p_sample_ddim = p_sample_ddim - ldm.models.diffusion.ddim.DDIMSampler.sample = sample_ddim + # ldm.models.diffusion.ddim.DDIMSampler.p_sample_ddim = p_sample_ddim + # ldm.models.diffusion.ddim.DDIMSampler.sample = sample_ddim ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms - ldm.models.diffusion.plms.PLMSSampler.sample = sample_plms - + # ldm.models.diffusion.plms.PLMSSampler.sample = sample_plms diff --git a/modules/sd_hijack_open_clip.py b/modules/sd_hijack_open_clip.py new file mode 100644 index 00000000..f733e852 --- /dev/null +++ b/modules/sd_hijack_open_clip.py @@ -0,0 +1,37 @@ +import open_clip.tokenizer +import torch + +from modules import sd_hijack_clip, devices +from modules.shared import opts + +tokenizer = open_clip.tokenizer._tokenizer + + +class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): + def __init__(self, wrapped, hijack): + super().__init__(wrapped, hijack) + + self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] + self.id_start = tokenizer.encoder[""] + self.id_end = tokenizer.encoder[""] + self.id_pad = 0 + + def tokenize(self, texts): + assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' + + tokenized = [tokenizer.encode(text) for text in texts] + + return tokenized + + def encode_with_transformers(self, tokens): + # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers + z = self.wrapped.encode_with_transformer(tokens) + + return z + + def encode_embedding_init_text(self, init_text, nvpt): + ids = tokenizer.encode(init_text) + ids = torch.asarray([ids], device=devices.device, dtype=torch.int) + embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0) + + return embedded diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 4fe67854..4edd8c60 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -127,7 +127,8 @@ class InterruptedException(BaseException): class VanillaStableDiffusionSampler: def __init__(self, constructor, sd_model): self.sampler = constructor(sd_model) - self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms + self.is_plms = hasattr(self.sampler, 'p_sample_plms') + self.orig_p_sample_ddim = self.sampler.p_sample_plms if self.is_plms else self.sampler.p_sample_ddim self.mask = None self.nmask = None self.init_latent = None @@ -218,7 +219,6 @@ class VanillaStableDiffusionSampler: self.mask = p.mask if hasattr(p, 'mask') else None self.nmask = p.nmask if hasattr(p, 'nmask') else None - def adjust_steps_if_invalid(self, p, num_steps): if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'): valid_step = 999 / (1000 // num_steps) @@ -227,7 +227,6 @@ class VanillaStableDiffusionSampler: return num_steps - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = setup_img2img_steps(p, steps) steps = self.adjust_steps_if_invalid(p, steps) @@ -260,9 +259,10 @@ class VanillaStableDiffusionSampler: steps = self.adjust_steps_if_invalid(p, steps or p.steps) # Wrap the conditioning models with additional image conditioning for inpainting model + # dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape if image_conditioning is not None: - conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]} - unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} + conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]} + unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]} samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) @@ -350,7 +350,9 @@ class TorchHijack: class KDiffusionSampler: def __init__(self, funcname, sd_model): - self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization) + denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + + self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) self.funcname = funcname self.func = getattr(k_diffusion.sampling, self.funcname) self.extra_params = sampler_extra_params.get(funcname, []) diff --git a/modules/shared.py b/modules/shared.py index c93ae2a3..8fb1387a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -11,17 +11,15 @@ import tqdm import modules.artists import modules.interrogate import modules.memmon -import modules.sd_models import modules.styles import modules.devices as devices -from modules import sd_samplers, sd_models, localization, sd_vae, extensions, script_loading -from modules.hypernetworks import hypernetwork +from modules import localization, sd_vae, extensions, script_loading from modules.paths import models_path, script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() -parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",) +parser.add_argument("--config", type=str, default=os.path.join(script_path, "v1-inference.yaml"), help="path to config which constructs model",) parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) @@ -121,10 +119,12 @@ xformers_available = False config_filename = cmd_opts.ui_settings_file os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) +hypernetworks = {} loaded_hypernetwork = None + def reload_hypernetworks(): + from modules.hypernetworks import hypernetwork global hypernetworks hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) @@ -206,10 +206,11 @@ class State: if self.current_latent is None: return + import modules.sd_samplers if opts.show_progress_grid: - self.current_image = sd_samplers.samples_to_image_grid(self.current_latent) + self.current_image = modules.sd_samplers.samples_to_image_grid(self.current_latent) else: - self.current_image = sd_samplers.sample_to_image(self.current_latent) + self.current_image = modules.sd_samplers.sample_to_image(self.current_latent) self.current_image_sampling_step = self.sampling_step @@ -248,6 +249,21 @@ def options_section(section_identifier, options_dict): return options_dict +def list_checkpoint_tiles(): + import modules.sd_models + return modules.sd_models.checkpoint_tiles() + + +def refresh_checkpoints(): + import modules.sd_models + return modules.sd_models.list_models() + + +def list_samplers(): + import modules.sd_samplers + return modules.sd_samplers.all_samplers + + hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} options_templates = {} @@ -333,7 +349,7 @@ options_templates.update(options_section(('training', "Training"), { })) options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models), + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": sd_vae.vae_list}, refresh=sd_vae.refresh_vae_list), "sd_vae_as_default": OptionInfo(False, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), @@ -385,7 +401,7 @@ options_templates.update(options_section(('ui', "User interface"), { })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}), + "hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}), "eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 5e4d8688..a273e663 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -64,7 +64,8 @@ class EmbeddingDatabase: self.word_embeddings[embedding.name] = embedding - ids = model.cond_stage_model.tokenizer([embedding.name], add_special_tokens=False)['input_ids'][0] + # TODO changing between clip and open clip changes tokenization, which will cause embeddings to stop working + ids = model.cond_stage_model.tokenize([embedding.name])[0] first_id = ids[0] if first_id not in self.ids_lookup: @@ -155,13 +156,11 @@ class EmbeddingDatabase: def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'): cond_model = shared.sd_model.cond_stage_model - embedding_layer = cond_model.wrapped.transformer.text_model.embeddings with devices.autocast(): cond_model([""]) # will send cond model to GPU if lowvram/medvram is active - ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"] - embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) + embedded = cond_model.encode_embedding_init_text(init_text, num_vectors_per_token) vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device) for i in range(num_vectors_per_token): diff --git a/modules/ui.py b/modules/ui.py index e6da1b2a..e5cb69d0 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -478,9 +478,7 @@ def create_toprow(is_img2img): if is_img2img: with gr.Column(scale=1, elem_id="interrogate_col"): button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - - if cmd_opts.deepdanbooru: - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") with gr.Column(scale=1): with gr.Row(): @@ -1004,11 +1002,10 @@ def create_ui(wrap_gradio_gpu_call): outputs=[img2img_prompt], ) - if cmd_opts.deepdanbooru: - img2img_deepbooru.click( - fn=interrogate_deepbooru, - inputs=[init_img], - outputs=[img2img_prompt], + img2img_deepbooru.click( + fn=interrogate_deepbooru, + inputs=[init_img], + outputs=[img2img_prompt], ) diff --git a/requirements.txt b/requirements.txt index 762db4f3..e4e5ec64 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,3 +28,4 @@ kornia lark inflection GitPython +torchsde diff --git a/requirements_versions.txt b/requirements_versions.txt index 662ca684..8d557fe3 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -25,3 +25,4 @@ kornia==0.6.7 lark==1.1.2 inflection==0.5.1 GitPython==3.1.27 +torchsde==0.2.5 diff --git a/v1-inference.yaml b/v1-inference.yaml new file mode 100644 index 00000000..d4effe56 --- /dev/null +++ b/v1-inference.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/webui.py b/webui.py index c5e5fe75..23215d1e 100644 --- a/webui.py +++ b/webui.py @@ -10,7 +10,7 @@ from fastapi.middleware.gzip import GZipMiddleware from modules.paths import script_path -from modules import devices, sd_samplers, upscaler, extensions, localization +from modules import shared, devices, sd_samplers, upscaler, extensions, localization import modules.codeformer_model as codeformer import modules.extras import modules.face_restoration @@ -23,7 +23,6 @@ import modules.scripts import modules.sd_hijack import modules.sd_models import modules.sd_vae -import modules.shared as shared import modules.txt2img import modules.script_callbacks @@ -86,7 +85,7 @@ def initialize(): shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights())) shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) - shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork))) + shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: shared.reload_hypernetworks())) shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength) if cmd_opts.tls_keyfile is not None and cmd_opts.tls_keyfile is not None: -- cgit v1.2.3 From 89d8804768d4479d72d24ebbbd2bd00650ef5dc7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 27 Nov 2022 18:48:08 +0300 Subject: only run install.py for enabled extensions --- launch.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index c426e673..ad9ddd5a 100644 --- a/launch.py +++ b/launch.py @@ -5,6 +5,8 @@ import sys import importlib.util import shlex import platform +import argparse +import json dir_repos = "repositories" dir_extensions = "extensions" @@ -132,11 +134,26 @@ def run_extension_installer(extension_dir): print(e, file=sys.stderr) -def run_extensions_installers(): +def list_extensions(settings_file): + settings = {} + + try: + if os.path.isfile(settings_file): + with open(settings_file, "r", encoding="utf8") as file: + settings = json.load(file) + except Exception as e: + print(e, file=sys.stderr) + + disabled_extensions = set(settings.get('disabled_extensions', [])) + + return [x for x in os.listdir(dir_extensions) if x not in disabled_extensions] + + +def run_extensions_installers(settings_file): if not os.path.isdir(dir_extensions): return - for dirname_extension in os.listdir(dir_extensions): + for dirname_extension in list_extensions(settings_file): run_extension_installer(os.path.join(dir_extensions, dirname_extension)) @@ -165,6 +182,10 @@ def prepare_enviroment(): sys.argv += shlex.split(commandline_args) + parser = argparse.ArgumentParser() + parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default='config.json') + args, _ = parser.parse_known_args(sys.argv) + sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test') sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers') sys.argv, update_check = extract_arg(sys.argv, '--update-check') @@ -223,7 +244,7 @@ def prepare_enviroment(): run_pip(f"install -r {requirements_file}", "requirements for Web UI") - run_extensions_installers() + run_extensions_installers(settings_file=args.ui_settings_file) if update_check: version_check(commit) -- cgit v1.2.3 From 79953e9b8b0d04868b719452a5b250c450f757b6 Mon Sep 17 00:00:00 2001 From: brkirch Date: Thu, 1 Dec 2022 03:38:13 -0500 Subject: Add support for macOS (Darwin) in launch.py --- launch.py | 19 ++++++++++++++++++- webui-user.sh | 2 +- 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index ad9ddd5a..48314264 100644 --- a/launch.py +++ b/launch.py @@ -193,6 +193,20 @@ def prepare_enviroment(): xformers = '--xformers' in sys.argv ngrok = '--ngrok' in sys.argv + if platform.system() == 'Darwin': + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1 torchvision==0.13.1") + k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/brkirch/k-diffusion.git') + k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "51c9778f269cedb55a4d88c79c0246d35bdadb71") + if os.environ.get('COMMANDLINE_ARGS') == None: + if '--use-cpu' in sys.argv: + idx = sys.argv.index('--use-cpu') + if idx < len(sys.argv) and sys.argv[idx+1][0] != '-': + sys.argv.insert(idx+1, 'interrogate') + else: + sys.argv += ['--use-cpu', 'interrogate'] + sys.argv.append('--no-half') + try: commit = run(f"{git} rev-parse HEAD").strip() except Exception: @@ -204,7 +218,7 @@ def prepare_enviroment(): if not is_installed("torch") or not is_installed("torchvision"): run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch") - if not skip_torch_cuda_test: + if not skip_torch_cuda_test and platform.system() != 'Darwin': run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'") if not is_installed("gfpgan"): @@ -231,6 +245,9 @@ def prepare_enviroment(): if not is_installed("pyngrok") and ngrok: run_pip("install pyngrok", "ngrok") + if platform.system() == 'Darwin' and not is_installed("psutil"): + run_pip("install psutil", "psutil") + os.makedirs(dir_repos, exist_ok=True) git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) diff --git a/webui-user.sh b/webui-user.sh index 16e42759..bfa53cb7 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -10,7 +10,7 @@ #clone_dir="stable-diffusion-webui" # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention" -export COMMANDLINE_ARGS="" +#export COMMANDLINE_ARGS="" # python3 executable #python_cmd="python3" -- cgit v1.2.3 From bef36597cc46671eeb2041b0343bd5e183883eb7 Mon Sep 17 00:00:00 2001 From: brkirch Date: Thu, 1 Dec 2022 04:04:14 -0500 Subject: Fix run as root flag Even though -f enables running webui.sh as root, the -f flag will also be passed to launch.py, causing it to exit with a usage message. This adds a line to launch.py to remove the -f flag if present. In addition to the above, all the letters in the command line arguments after each '-' were being processed for 'f' and "illegal option" was displayed for each letter that didn't match. Instead, this commit silences those errors and stops processing if the first flag doesn't start with '-f'. --- launch.py | 1 + webui.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 48314264..1661b569 100644 --- a/launch.py +++ b/launch.py @@ -186,6 +186,7 @@ def prepare_enviroment(): parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default='config.json') args, _ = parser.parse_known_args(sys.argv) + sys.argv, _ = extract_arg(sys.argv, '-f') sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test') sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers') sys.argv, update_check = extract_arg(sys.argv, '--update-check') diff --git a/webui.sh b/webui.sh index 6d4f0992..5f48741f 100755 --- a/webui.sh +++ b/webui.sh @@ -51,10 +51,11 @@ fi can_run_as_root=0 # read any command line flags to the webui.sh script -while getopts "f" flag +while getopts "f" flag > /dev/null 2>&1 do case ${flag} in f) can_run_as_root=1;; + *) break;; esac done -- cgit v1.2.3 From b7ef99634c3f862e835527f8d11c4996eb4f4eec Mon Sep 17 00:00:00 2001 From: Hsiang-Cheng Yang Date: Sat, 3 Dec 2022 17:35:17 +0800 Subject: Update launch.py fix a typo --- launch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'launch.py') diff --git a/launch.py b/launch.py index ad9ddd5a..90b5d99a 100644 --- a/launch.py +++ b/launch.py @@ -157,7 +157,7 @@ def run_extensions_installers(settings_file): run_extension_installer(os.path.join(dir_extensions, dirname_extension)) -def prepare_enviroment(): +def prepare_environment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") commandline_args = os.environ.get('COMMANDLINE_ARGS', "") @@ -290,5 +290,5 @@ def start(): if __name__ == "__main__": - prepare_enviroment() + prepare_environment() start() -- cgit v1.2.3 From 5ec8981df46ff6e678c09dd2c1bf4d873ac22a46 Mon Sep 17 00:00:00 2001 From: brkirch Date: Sat, 3 Dec 2022 02:28:53 -0500 Subject: Revert most launch.py changes, add mac user script Adds an addition file to read environment variables from when the webui.sh is run from macOS. --- launch.py | 19 +------------------ webui-macos-env.sh | 13 +++++++++++++ webui.sh | 8 ++++++++ 3 files changed, 22 insertions(+), 18 deletions(-) create mode 100644 webui-macos-env.sh (limited to 'launch.py') diff --git a/launch.py b/launch.py index 1661b569..0e1bbaf2 100644 --- a/launch.py +++ b/launch.py @@ -194,20 +194,6 @@ def prepare_enviroment(): xformers = '--xformers' in sys.argv ngrok = '--ngrok' in sys.argv - if platform.system() == 'Darwin': - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1 torchvision==0.13.1") - k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/brkirch/k-diffusion.git') - k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "51c9778f269cedb55a4d88c79c0246d35bdadb71") - if os.environ.get('COMMANDLINE_ARGS') == None: - if '--use-cpu' in sys.argv: - idx = sys.argv.index('--use-cpu') - if idx < len(sys.argv) and sys.argv[idx+1][0] != '-': - sys.argv.insert(idx+1, 'interrogate') - else: - sys.argv += ['--use-cpu', 'interrogate'] - sys.argv.append('--no-half') - try: commit = run(f"{git} rev-parse HEAD").strip() except Exception: @@ -219,7 +205,7 @@ def prepare_enviroment(): if not is_installed("torch") or not is_installed("torchvision"): run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch") - if not skip_torch_cuda_test and platform.system() != 'Darwin': + if not skip_torch_cuda_test: run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'") if not is_installed("gfpgan"): @@ -246,9 +232,6 @@ def prepare_enviroment(): if not is_installed("pyngrok") and ngrok: run_pip("install pyngrok", "ngrok") - if platform.system() == 'Darwin' and not is_installed("psutil"): - run_pip("install psutil", "psutil") - os.makedirs(dir_repos, exist_ok=True) git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) diff --git a/webui-macos-env.sh b/webui-macos-env.sh new file mode 100644 index 00000000..68d1f754 --- /dev/null +++ b/webui-macos-env.sh @@ -0,0 +1,13 @@ +#!/bin/bash +#################################################################### +# macOS defaults # +# Please modify webui-user.sh to change these instead of this file # +#################################################################### + +export COMMANDLINE_ARGS="--skip-torch-cuda-test --no-half --use-cpu interrogate" +export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1" +export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git" +export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71" +export PYTORCH_ENABLE_MPS_FALLBACK=1 + +#################################################################### diff --git a/webui.sh b/webui.sh index 5f48741f..683c97d3 100755 --- a/webui.sh +++ b/webui.sh @@ -4,6 +4,14 @@ # change the variables in webui-user.sh instead # ################################################# +# If run from macOS, load defaults from webui-macos-env.sh +if [[ "$OSTYPE" == "darwin"* ]]; then + if [[ -f webui-macos-env.sh ]] + then + source ./webui-macos-env.sh + fi +fi + # Read variables from webui-user.sh # shellcheck source=/dev/null if [[ -f webui-user.sh ]] -- cgit v1.2.3 From 997461d3dd86f51c06ea0c2eff17ce8b8b48c0af Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 11:57:01 +0300 Subject: add footer with versions --- html/footer.html | 4 ++++ launch.py | 20 ++++++++++++++++---- modules/ui.py | 31 ++++++++++++++++++++++++++++++- style.css | 5 +++++ 4 files changed, 55 insertions(+), 5 deletions(-) (limited to 'launch.py') diff --git a/html/footer.html b/html/footer.html index a8f2adf7..bad87ff6 100644 --- a/html/footer.html +++ b/html/footer.html @@ -7,3 +7,7 @@  •  Reload UI +
+
+{versions} +
diff --git a/launch.py b/launch.py index af0d418b..49b91b1f 100644 --- a/launch.py +++ b/launch.py @@ -13,6 +13,21 @@ dir_extensions = "extensions" python = sys.executable git = os.environ.get('GIT', "git") index_url = os.environ.get('INDEX_URL', "") +stored_commit_hash = None + + +def commit_hash(): + global stored_commit_hash + + if stored_commit_hash is not None: + return stored_commit_hash + + try: + stored_commit_hash = run(f"{git} rev-parse HEAD").strip() + except Exception: + stored_commit_hash = "" + + return stored_commit_hash def extract_arg(args, name): @@ -194,10 +209,7 @@ def prepare_environment(): xformers = '--xformers' in sys.argv ngrok = '--ngrok' in sys.argv - try: - commit = run(f"{git} rev-parse HEAD").strip() - except Exception: - commit = "" + commit = commit_hash() print(f"Python {sys.version}") print(f"Commit hash: {commit}") diff --git a/modules/ui.py b/modules/ui.py index bb64fe20..81d96c5b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1696,7 +1696,9 @@ def create_ui(): if os.path.exists("html/footer.html"): with open("html/footer.html", encoding="utf8") as file: - gr.HTML(file.read(), elem_id="footer") + footer = file.read() + footer = footer.format(versions=versions_html()) + gr.HTML(footer, elem_id="footer") text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False) settings_submit.click( @@ -1857,3 +1859,30 @@ def reload_javascript(): if not hasattr(shared, 'GradioTemplateResponseOriginal'): shared.GradioTemplateResponseOriginal = gradio.routes.templates.TemplateResponse + + +def versions_html(): + import torch + import launch + + python_version = ".".join([str(x) for x in sys.version_info[0:3]]) + commit = launch.commit_hash() + short_commit = commit[0:8] + + if shared.xformers_available: + import xformers + xformers_version = xformers.__version__ + else: + xformers_version = "N/A" + + return f""" +python: {python_version} + •  +torch: {torch.__version__} + •  +xformers: {xformers_version} + •  +gradio: {gr.__version__} + •  +commit: {short_commit} +""" diff --git a/style.css b/style.css index 09ee540b..ee74d79e 100644 --- a/style.css +++ b/style.css @@ -628,6 +628,11 @@ footer { display: inline-block; } +#footer .versions{ + font-size: 85%; + opacity: 0.85; +} + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. If you change anything above, you need to make sure it is RTL compliant by just running -- cgit v1.2.3 From 76a21b9626b7556638db188c157e3e8036803326 Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Tue, 10 Jan 2023 12:46:35 +0300 Subject: clear envvar, add assertion --- launch.py | 1 + test/basic_features/txt2img_test.py | 1 + 2 files changed, 2 insertions(+) (limited to 'launch.py') diff --git a/launch.py b/launch.py index 49b91b1f..bcbb792c 100644 --- a/launch.py +++ b/launch.py @@ -282,6 +282,7 @@ def tests(test_dir): print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}") + os.environ['COMMANDLINE_ARGS'] = "" with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr: proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr) diff --git a/test/basic_features/txt2img_test.py b/test/basic_features/txt2img_test.py index 5b27a7ec..5aa43a44 100644 --- a/test/basic_features/txt2img_test.py +++ b/test/basic_features/txt2img_test.py @@ -43,6 +43,7 @@ class TestTxt2ImgWorking(unittest.TestCase): def test_txt2img_with_complex_prompt_performed(self): self.simple_txt2img["prompt"] = "((emphasis)), (emphasis1:1.1), [to:1], [from::2], [from:to:0.3], [alt|alt1]" + self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_not_square_image_performed(self): self.simple_txt2img["height"] = 128 -- cgit v1.2.3