From a46c23b10f972ee235e282e7d79de2e9e7a91d68 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 5 May 2023 22:48:27 -0600 Subject: Make gamepad navigation optional --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index 6a2b3c2b..977ff16b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -399,6 +399,7 @@ options_templates.update(options_section(('ui', "User interface"), { "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), + "js_modal_lightbox_gamepad": OptionInfo(True, "Navagete image viewer with gamepad"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), -- cgit v1.2.3 From 5cbc1c5d438e9ca7384a26eab28a09f44c5162f2 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 5 May 2023 23:03:32 -0600 Subject: Fix spelling --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index 977ff16b..b3ca8401 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -399,7 +399,7 @@ options_templates.update(options_section(('ui', "User interface"), { "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(True, "Navagete image viewer with gamepad"), + "js_modal_lightbox_gamepad": OptionInfo(True, "Navigate image viewer with gamepad"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), -- cgit v1.2.3 From 99f3bf07d2976211eed81a9293a447c7ead2d893 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 6 May 2023 22:16:51 -0600 Subject: gamepad repeat option --- javascript/imageviewerGamepad.js | 5 ++--- modules/shared.py | 1 + 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'modules/shared.py') diff --git a/javascript/imageviewerGamepad.js b/javascript/imageviewerGamepad.js index f25f0857..d24b7b97 100644 --- a/javascript/imageviewerGamepad.js +++ b/javascript/imageviewerGamepad.js @@ -1,4 +1,3 @@ -const delay = 350//ms let isWaiting = false; window.addEventListener('gamepadconnected', (e) => { setInterval(async () => { @@ -18,7 +17,7 @@ window.addEventListener('gamepadconnected', (e) => { if (xValue < 0.3 && xValue > -0.3) { return true; } - }, delay); + }, opts.js_modal_lightbox_gamepad_repeat); isWaiting = false; } }, 10); @@ -41,7 +40,7 @@ window.addEventListener('wheel', (e) => { setTimeout(() => { isScrolling = false; - }, delay); + }, opts.js_modal_lightbox_gamepad_repeat); }); function sleepUntil(f, timeout) { diff --git a/modules/shared.py b/modules/shared.py index b3ca8401..d8d2bc78 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -400,6 +400,7 @@ options_templates.update(options_section(('ui', "User interface"), { "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), "js_modal_lightbox_gamepad": OptionInfo(True, "Navigate image viewer with gamepad"), + "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), -- cgit v1.2.3 From 083dc3c76ab7dbc7b2b04f3396d4f5280b002906 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 11:33:45 +0300 Subject: directory hiding for extra networks: dirs starting with . will hide their cards on extra network tabs unless specifically searched for create HTML for extra network pages only on demand allow directories starting with . to still list their models for lora, checkpoints, etc keep "search" filter for extra networks when user refreshes the page --- extensions-builtin/Lora/lora.py | 6 +--- html/extra-networks-card.html | 2 +- javascript/extraNetworks.js | 25 ++++++++++++--- modules/modelloader.py | 27 +++++----------- modules/shared.py | 17 ++++++++++ modules/ui_extra_networks.py | 69 +++++++++++++++++++++++++++++------------ 6 files changed, 97 insertions(+), 49 deletions(-) (limited to 'modules/shared.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 83c1c6fd..83933639 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -352,11 +352,7 @@ def list_available_loras(): os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) - candidates = \ - glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.pt'), recursive=True) + \ - glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.safetensors'), recursive=True) + \ - glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.ckpt'), recursive=True) - + candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) for filename in sorted(candidates, key=str.lower): if os.path.isdir(filename): continue diff --git a/html/extra-networks-card.html b/html/extra-networks-card.html index ef4b613a..1d546217 100644 --- a/html/extra-networks-card.html +++ b/html/extra-networks-card.html @@ -6,7 +6,7 @@ - + {name} {description} diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index c8f6b386..c85bc79a 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -1,4 +1,3 @@ - function setupExtraNetworksForTab(tabname){ gradioApp().querySelector('#'+tabname+'_extra_tabs').classList.add('extra-networks') @@ -10,16 +9,34 @@ function setupExtraNetworksForTab(tabname){ tabs.appendChild(search) tabs.appendChild(refresh) - search.addEventListener("input", function(){ + var applyFilter = function(){ var searchTerm = search.value.toLowerCase() gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){ + var searchOnly = elem.querySelector('.search_only') var text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase() - elem.style.display = text.indexOf(searchTerm) == -1 ? "none" : "" + + var visible = text.indexOf(searchTerm) != -1 + + if(searchOnly && searchTerm.length < 4){ + visible = false + } + + elem.style.display = visible ? "" : "none" }) - }); + } + + search.addEventListener("input", applyFilter); + applyFilter(); + + extraNetworksApplyFilter[tabname] = applyFilter; +} + +function applyExtraNetworkFilter(tabname){ + setTimeout(extraNetworksApplyFilter[tabname], 1); } +var extraNetworksApplyFilter = {} var activePromptTextarea = {}; function setupExtraNetworks(){ diff --git a/modules/modelloader.py b/modules/modelloader.py index 522affc6..f2274488 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -22,9 +22,6 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None """ output = [] - if ext_filter is None: - ext_filter = [] - try: places = [] @@ -39,22 +36,14 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None places.append(model_path) for place in places: - if os.path.exists(place): - for file in glob.iglob(place + '**/**', recursive=True): - full_path = file - if os.path.isdir(full_path): - continue - if os.path.islink(full_path) and not os.path.exists(full_path): - print(f"Skipping broken symlink: {full_path}") - continue - if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): - continue - if len(ext_filter) != 0: - model_name, extension = os.path.splitext(file) - if extension not in ext_filter: - continue - if file not in output: - output.append(full_path) + for full_path in shared.walk_files(place, allowed_extensions=ext_filter): + if os.path.islink(full_path) and not os.path.exists(full_path): + print(f"Skipping broken symlink: {full_path}") + continue + if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): + continue + if full_path not in output: + output.append(full_path) if model_url is not None and len(output) == 0: if download_name is not None: diff --git a/modules/shared.py b/modules/shared.py index 91aac1a3..dd374713 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -726,3 +726,20 @@ def html(filename): return file.read() return "" + + +def walk_files(path, allowed_extensions=None): + if not os.path.exists(path): + return + + if allowed_extensions is not None: + allowed_extensions = set(allowed_extensions) + + for root, dirs, files in os.walk(path): + for filename in files: + if allowed_extensions is not None: + _, ext = os.path.splitext(filename) + if ext not in allowed_extensions: + continue + + yield os.path.join(root, filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index aa2f5d1b..86c05a55 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -89,19 +89,22 @@ class ExtraNetworksPage: subdirs = {} for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: - for x in glob.glob(os.path.join(parentdir, '**/*'), recursive=True): - if not os.path.isdir(x): - continue + for root, dirs, files in os.walk(parentdir): + for dirname in dirs: + x = os.path.join(root, dirname) - subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/") - while subdir.startswith("/"): - subdir = subdir[1:] + if not os.path.isdir(x): + continue - is_empty = len(os.listdir(x)) == 0 - if not is_empty and not subdir.endswith("/"): - subdir = subdir + "/" + subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/") + while subdir.startswith("/"): + subdir = subdir[1:] - subdirs[subdir] = 1 + is_empty = len(os.listdir(x)) == 0 + if not is_empty and not subdir.endswith("/"): + subdir = subdir + "/" + + subdirs[subdir] = 1 if subdirs: subdirs = {"": 1, **subdirs} @@ -157,8 +160,20 @@ class ExtraNetworksPage: if metadata: metadata_button = f"" + local_path = "" + filename = item.get("filename", "") + for reldir in self.allowed_directories_for_previews(): + absdir = os.path.abspath(reldir) + + if filename.startswith(absdir): + local_path = filename[len(absdir):] + + # if this is true, the item must not be show in the default view, and must instead only be + # shown when searching for it + serach_only = "/." in local_path or "\\." in local_path + args = { - "style": f"'{height}{width}{background_image}'", + "style": f"'display: none; {height}{width}{background_image}'", "prompt": item.get("prompt", None), "tabname": json.dumps(tabname), "local_preview": json.dumps(item["local_preview"]), @@ -168,6 +183,7 @@ class ExtraNetworksPage: "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"', "search_term": item.get("search_term", ""), "metadata_button": metadata_button, + "serach_only": " search_only" if serach_only else "", } return self.card_page.format(**args) @@ -209,6 +225,11 @@ def intialize(): class ExtraNetworksUi: def __init__(self): self.pages = None + """gradio HTML components related to extra networks' pages""" + + self.page_contents = None + """HTML content of the above; empty initially, filled when extra pages have to be shown""" + self.stored_extra_pages = None self.button_save_preview = None @@ -236,17 +257,22 @@ def pages_in_preferred_order(pages): def create_ui(container, button, tabname): ui = ExtraNetworksUi() ui.pages = [] + ui.pages_contents = [] ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy()) ui.tabname = tabname with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: for page in ui.stored_extra_pages: - with gr.Tab(page.title, id=page.title.lower().replace(" ", "_")): + page_id = page.title.lower().replace(" ", "_") - page_elem = gr.HTML(page.create_html(ui.tabname)) + with gr.Tab(page.title, id=page_id): + elem_id = f"{tabname}_{page_id}_cards_html" + page_elem = gr.HTML('', elem_id=elem_id) ui.pages.append(page_elem) - filter = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) + page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + json.dumps(tabname) + '); return []}', inputs=[], outputs=[]) + + gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh") ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False) @@ -254,19 +280,22 @@ def create_ui(container, button, tabname): def toggle_visibility(is_visible): is_visible = not is_visible - return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")) + + if is_visible and not ui.pages_contents: + refresh() + + return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")), *ui.pages_contents state_visible = gr.State(value=False) - button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button]) + button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button, *ui.pages]) def refresh(): - res = [] - for pg in ui.stored_extra_pages: pg.refresh() - res.append(pg.create_html(ui.tabname)) - return res + ui.pages_contents = [pg.create_html(ui.tabname) for pg in ui.stored_extra_pages] + + return ui.pages_contents button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages) -- cgit v1.2.3 From ab4ab4e595e89d1a9a39db70539d5944fdbe47fa Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:23:49 +0300 Subject: add version to infotext, footer and console output when starting --- launch.py | 17 +++++++++++++++++ modules/processing.py | 11 +++++++++++ modules/shared.py | 1 + modules/ui.py | 6 +++--- 4 files changed, 32 insertions(+), 3 deletions(-) (limited to 'modules/shared.py') diff --git a/launch.py b/launch.py index 1dc12dae..2a33adc8 100644 --- a/launch.py +++ b/launch.py @@ -19,6 +19,7 @@ python = sys.executable git = os.environ.get('GIT', "git") index_url = os.environ.get('INDEX_URL', "") stored_commit_hash = None +stored_git_tag = None dir_repos = "repositories" if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: @@ -70,6 +71,20 @@ def commit_hash(): return stored_commit_hash +def git_tag(): + global stored_git_tag + + if stored_git_tag is not None: + return stored_git_tag + + try: + stored_git_tag = run(f"{git} describe --tags").strip() + except Exception: + stored_git_tag = "" + + return stored_git_tag + + def run(command, desc=None, errdesc=None, custom_env=None, live=False): if desc is not None: print(desc) @@ -246,8 +261,10 @@ def prepare_environment(): check_python_version() commit = commit_hash() + tag = git_tag() print(f"Python {sys.version}") + print(f"Version: {tag}") print(f"Commit hash: {commit}") if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): diff --git a/modules/processing.py b/modules/processing.py index e8808beb..e786791a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -458,6 +458,16 @@ def fix_seed(p): p.subseed = get_fixed_seed(p.subseed) +def program_version(): + import launch + + res = launch.git_tag() + if res == "": + res = None + + return res + + def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): index = position_in_batch + iteration * p.batch_size @@ -483,6 +493,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + "Version": program_version() if opts.add_version_to_infotext else None, } generation_params.update(p.extra_generation_params) diff --git a/modules/shared.py b/modules/shared.py index dd374713..f40faa79 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -394,6 +394,7 @@ options_templates.update(options_section(('ui', "User interface"), { "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), diff --git a/modules/ui.py b/modules/ui.py index 16c46515..b2916e9c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1923,7 +1923,7 @@ def versions_html(): python_version = ".".join([str(x) for x in sys.version_info[0:3]]) commit = launch.commit_hash() - short_commit = commit[0:8] + tag = launch.git_tag() if shared.xformers_available: import xformers @@ -1932,6 +1932,8 @@ def versions_html(): xformers_version = "N/A" return f""" +version: {tag} + •  python: {python_version}  •  torch: {getattr(torch, '__long_version__',torch.__version__)} @@ -1940,7 +1942,5 @@ xformers: {xformers_version}  •  gradio: {gr.__version__}  •  -commit: {short_commit} - •  checkpoint: N/A """ -- cgit v1.2.3 From eabea24eb8ba5068c97ff5655bbe01dc032af4e9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:26:23 +0300 Subject: put infotext options into their own category in settings tab --- modules/shared.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index f40faa79..e1c3e5c4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -392,10 +392,6 @@ options_templates.update(options_section(('ui', "User interface"), { "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), - "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), "font": OptionInfo("", "Font for image grids that have text"), @@ -417,6 +413,13 @@ options_templates.update(options_section(('ui', "User interface"), { "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) })) +options_templates.update(options_section(('infotext', "Infotext"), { + "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), + "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), + "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), +})) + options_templates.update(options_section(('ui', "Live previews"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), -- cgit v1.2.3 From 5edb0acfeb424f71954b111910d2e08c410b0c43 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:38:25 +0300 Subject: use multiselect for quicksettings (this also resets the existing setting) --- modules/shared.py | 4 ++-- modules/ui.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index e1c3e5c4..a8154580 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -404,8 +404,8 @@ options_templates.update(options_section(('ui', "User interface"), { "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), + "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}), "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), diff --git a/modules/ui.py b/modules/ui.py index 39efd576..883d37e7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1525,7 +1525,7 @@ def create_ui(): result = gr.HTML(elem_id="settings_result") - quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")] + quicksettings_names = opts.quicksettings_list quicksettings_names = {x: i for i, x in enumerate(quicksettings_names) if x != 'quicksettings'} quicksettings_list = [] -- cgit v1.2.3 From d1ff57e1cb602a4ebac80a25b8e3ce2424278f94 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 9 May 2023 18:14:12 +0900 Subject: 1.1.1 quicksettings list migration --- modules/shared.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index a8154580..090707ca 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -549,6 +549,10 @@ class Options: with open(filename, "r", encoding="utf8") as file: self.data = json.load(file) + # 1.1.1 quicksettings list migration + if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: + self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] + bad_settings = 0 for k, v in self.data.items(): info = self.data_labels.get(k, None) -- cgit v1.2.3 From d50b95b5a32b917ded123891dce3c8a018fca064 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 07:14:13 +0300 Subject: fix an issue preventing the program from starting if the user specifies a bad gradio theme --- modules/shared.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index 090707ca..4631965b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -672,8 +672,8 @@ def reload_gradio_theme(theme_name=None): else: try: gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - except requests.exceptions.ConnectionError: - print("Can't access HuggingFace Hub, falling back to default Gradio theme") + except Exception as e: + errors.display(e, "changing gradio theme") gradio_theme = gr.themes.Default() -- cgit v1.2.3 From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- extensions-builtin/Lora/lora.py | 1 - extensions-builtin/ScuNET/scripts/scunet_model.py | 1 - extensions-builtin/SwinIR/scripts/swinir_model.py | 3 +-- modules/codeformer/codeformer_arch.py | 4 +--- modules/codeformer/vqgan_arch.py | 2 -- modules/codeformer_model.py | 4 +--- modules/config_states.py | 2 +- modules/esrgan_model.py | 2 +- modules/esrgan_model_arch.py | 1 - modules/extensions.py | 1 - modules/generation_parameters_copypaste.py | 4 ---- modules/hypernetworks/hypernetwork.py | 3 +-- modules/hypernetworks/ui.py | 2 -- modules/images.py | 2 +- modules/img2img.py | 5 +---- modules/mac_specific.py | 1 - modules/modelloader.py | 1 - modules/models/diffusion/uni_pc/uni_pc.py | 1 - modules/processing.py | 5 ++--- modules/sd_hijack.py | 2 +- modules/sd_hijack_inpainting.py | 6 ------ modules/sd_hijack_ip2p.py | 5 +---- modules/sd_hijack_xlmr.py | 2 -- modules/sd_models.py | 2 +- modules/sd_models_config.py | 1 - modules/sd_samplers_kdiffusion.py | 1 - modules/sd_vae.py | 3 --- modules/shared.py | 3 --- modules/styles.py | 9 --------- modules/textual_inversion/autocrop.py | 4 +--- modules/textual_inversion/image_embedding.py | 2 +- modules/textual_inversion/preprocess.py | 4 ---- modules/textual_inversion/textual_inversion.py | 1 - modules/txt2img.py | 9 +++------ modules/ui.py | 5 ++--- modules/ui_extra_networks.py | 1 - modules/ui_postprocessing.py | 2 +- modules/upscaler.py | 2 -- modules/xlmr.py | 2 +- pyproject.toml | 11 +++++++---- scripts/custom_code.py | 2 +- scripts/outpainting_mk_2.py | 4 ++-- scripts/poor_mans_outpainting.py | 4 ++-- scripts/prompt_matrix.py | 7 ++----- scripts/prompts_from_file.py | 5 +---- scripts/sd_upscale.py | 4 ++-- scripts/xyz_grid.py | 6 ++---- webui.py | 2 +- 48 files changed, 42 insertions(+), 114 deletions(-) (limited to 'modules/shared.py') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index ba1293df..0ab43229 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -1,4 +1,3 @@ -import glob import os import re import torch diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index c7fd5739..aa2fdb3a 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -13,7 +13,6 @@ import modules.upscaler from modules import devices, modelloader from scunet_model_arch import SCUNet as net from modules.shared import opts -from modules import images class UpscalerScuNET(modules.upscaler.Upscaler): diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index d77c3a92..55dd94ab 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -1,4 +1,3 @@ -import contextlib import os import numpy as np @@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url from tqdm import tqdm from modules import modelloader, devices, script_callbacks, shared -from modules.shared import cmd_opts, opts, state +from modules.shared import opts, state from swinir_model_arch import SwinIR as net from swinir_model_arch_v2 import Swin2SR as net2 from modules.upscaler import Upscaler, UpscalerData diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index f1a7cf09..00c407de 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -1,14 +1,12 @@ # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py import math -import numpy as np import torch from torch import nn, Tensor import torch.nn.functional as F -from typing import Optional, List +from typing import Optional from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock -from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY def calc_mean_std(feat, eps=1e-5): diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index e7293683..820e6b12 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py ''' -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import copy from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8d84bbc9..8e56cb89 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -33,11 +33,9 @@ def setup_model(dirname): try: from torchvision.transforms.functional import normalize from modules.codeformer.codeformer_arch import CodeFormer - from basicsr.utils.download_util import load_file_from_url - from basicsr.utils import imwrite, img2tensor, tensor2img + from basicsr.utils import img2tensor, tensor2img from facelib.utils.face_restoration_helper import FaceRestoreHelper from facelib.detection.retinaface import retinaface - from modules.shared import cmd_opts net_class = CodeFormer diff --git a/modules/config_states.py b/modules/config_states.py index 2ea00929..8f1ff428 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -14,7 +14,7 @@ from collections import OrderedDict import git from modules import shared, extensions -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir +from modules.paths_internal import script_path, config_states_dir all_config_states = OrderedDict() diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index f4369257..85aa6934 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -6,7 +6,7 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url import modules.esrgan_model_arch as arch -from modules import shared, modelloader, images, devices +from modules import modelloader, images, devices from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 7f8bc7c0..4de9dd8d 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -2,7 +2,6 @@ from collections import OrderedDict import math -import functools import torch import torch.nn as nn import torch.nn.functional as F diff --git a/modules/extensions.py b/modules/extensions.py index 34d9d654..829f8cd9 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,7 +3,6 @@ import sys import traceback import time -from datetime import datetime import git from modules import shared diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fe8b18b2..f1c59c46 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,15 +1,11 @@ import base64 -import html import io -import math import os import re -from pathlib import Path import gradio as gr from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks -import tempfile from PIL import Image re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 1fc49537..9fe749b7 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -1,4 +1,3 @@ -import csv import datetime import glob import html @@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ -from collections import defaultdict, deque +from collections import deque from statistics import stdev, mean diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index 76599f5a..be168736 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -1,6 +1,4 @@ import html -import os -import re import gradio as gr import modules.hypernetworks.hypernetwork diff --git a/modules/images.py b/modules/images.py index 5eb6d855..7392cb8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -19,7 +19,7 @@ import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors -from modules.shared import opts, cmd_opts +from modules.shared import opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) diff --git a/modules/img2img.py b/modules/img2img.py index 32b1ecd6..d704bf90 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,12 +1,9 @@ -import math import os -import sys -import traceback import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError -from modules import devices, sd_samplers +from modules import sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 40ce2101..5c2f92a1 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,6 +1,5 @@ import torch import platform -from modules import paths from modules.sd_hijack_utils import CondFunc from packaging import version diff --git a/modules/modelloader.py b/modules/modelloader.py index cf685000..92ada694 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -1,4 +1,3 @@ -import glob import os import shutil import importlib diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index 11b330bc..a4c4ef4e 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -1,5 +1,4 @@ import torch -import torch.nn.functional as F import math from tqdm.auto import trange diff --git a/modules/processing.py b/modules/processing.py index 6f5233c1..c3932d6b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -2,7 +2,6 @@ import json import math import os import sys -import warnings import hashlib import torch @@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d8135211..81573b78 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -3,7 +3,7 @@ from torch.nn.functional import silu from types import MethodType import modules.textual_inversion.textual_inversion -from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint +from modules import devices, sd_hijack_optimizations, shared from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 55a2ce4d..344d75c8 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -1,15 +1,9 @@ -import os import torch -from einops import repeat -from omegaconf import ListConfig - import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 41ed54a2..6fe6b6ff 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -1,8 +1,5 @@ -import collections import os.path -import sys -import gc -import time + def should_hijack_ip2p(checkpoint_info): from modules import sd_models_config diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py index 4ac51c38..28528329 100644 --- a/modules/sd_hijack_xlmr.py +++ b/modules/sd_hijack_xlmr.py @@ -1,8 +1,6 @@ -import open_clip.tokenizer import torch from modules import sd_hijack_clip, devices -from modules.shared import opts class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): diff --git a/modules/sd_models.py b/modules/sd_models.py index 11c1a344..1c09c709 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -565,7 +565,7 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - from modules import lowvram, devices, sd_hijack + from modules import devices, sd_hijack timer = Timer() if model_data.sd_model: diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 7a79925a..9bfe1237 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -1,4 +1,3 @@ -import re import os import torch diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0fc9f456..3b8e9622 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,7 +1,6 @@ from collections import deque import torch import inspect -import einops import k_diffusion.sampling from modules import prompt_parser, devices, sd_samplers_common diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 521e485a..b7176125 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,8 +1,5 @@ -import torch -import safetensors.torch import os import collections -from collections import namedtuple from modules import paths, shared, devices, script_callbacks, sd_models import glob from copy import deepcopy diff --git a/modules/shared.py b/modules/shared.py index 4631965b..44cd2c0c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,12 +1,9 @@ -import argparse import datetime import json import os import sys import time -import requests -from PIL import Image import gradio as gr import tqdm diff --git a/modules/styles.py b/modules/styles.py index 11642075..c22769cf 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,18 +1,9 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - import csv import os import os.path import typing -import collections.abc as abc -import tempfile import shutil -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - class PromptStyle(typing.NamedTuple): name: str diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index d7d8d2e3..7770d22f 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -1,10 +1,8 @@ import cv2 import requests import os -from collections import defaultdict -from math import log, sqrt import numpy as np -from PIL import Image, ImageDraw +from PIL import ImageDraw GREEN = "#0F0" BLUE = "#00F" diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index 5593f88c..ee0e850a 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -2,7 +2,7 @@ import base64 import json import numpy as np import zlib -from PIL import Image, PngImagePlugin, ImageDraw, ImageFont +from PIL import Image, ImageDraw, ImageFont from fonts.ttf import Roboto import torch from modules.shared import opts diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index da0bcb26..d0cad09e 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,13 +1,9 @@ import os from PIL import Image, ImageOps import math -import platform -import sys import tqdm -import time from modules import paths, shared, images, deepbooru -from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f753b75f..9ed9ba45 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,7 +1,6 @@ import os import sys import traceback -import inspect from collections import namedtuple import torch diff --git a/modules/txt2img.py b/modules/txt2img.py index 16841d0f..f022381c 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,18 +1,15 @@ import modules.scripts -from modules import sd_samplers +from modules import sd_samplers, processing from modules.generation_parameters_copypaste import create_override_settings_dict -from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \ - StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, cmd_opts import modules.shared as shared -import modules.processing as processing from modules.ui import plaintext_to_html def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) - p = StableDiffusionProcessingTxt2Img( + p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, @@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step processed = modules.scripts.scripts_txt2img.run(p, *args) if processed is None: - processed = process_images(p) + processed = processing.process_images(p) p.close() diff --git a/modules/ui.py b/modules/ui.py index 6beda76f..f7e57593 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,10 +14,10 @@ from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress -from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path -from modules.shared import opts, cmd_opts, restricted_opts +from modules.shared import opts, cmd_opts import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste @@ -28,7 +28,6 @@ import modules.shared as shared import modules.styles import modules.textual_inversion.ui from modules import prompt_parser -from modules.images import save_image from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img from modules.textual_inversion import textual_inversion diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 49e06289..800e467a 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,4 +1,3 @@ -import glob import os.path import urllib.parse from pathlib import Path diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index f25639e5..c7dc1154 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -1,5 +1,5 @@ import gradio as gr -from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue +from modules import scripts, shared, ui_common, postprocessing, call_queue import modules.generation_parameters_copypaste as parameters_copypaste diff --git a/modules/upscaler.py b/modules/upscaler.py index 0ad4fe99..777593b0 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -2,8 +2,6 @@ import os from abc import abstractmethod import PIL -import numpy as np -import torch from PIL import Image import modules.shared diff --git a/modules/xlmr.py b/modules/xlmr.py index beab3fdf..e056c3f6 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -1,4 +1,4 @@ -from transformers import BertPreTrainedModel,BertModel,BertConfig +from transformers import BertPreTrainedModel, BertConfig import torch.nn as nn import torch from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig diff --git a/pyproject.toml b/pyproject.toml index 1e164abc..9caa9ba2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,13 @@ [tool.ruff] +exclude = ["extensions"] + ignore = [ "E501", - "E731", - "E402", # Module level import not at top of file - "F401" # Module imported but unused + + "F401", # Module imported but unused ] -exclude = ["extensions"] + +[tool.ruff.per-file-ignores] +"webui.py" = ["E402"] # Module level import not at top of file \ No newline at end of file diff --git a/scripts/custom_code.py b/scripts/custom_code.py index f36a3675..cc6f0d49 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -4,7 +4,7 @@ import ast import copy from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import cmd_opts def convertExpr2Expression(expr): diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index b10fed6c..665dbe89 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -7,9 +7,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state # this function is taken from https://github.com/parlance-zz/g-diffuser-bot diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index ddcbd2d3..c0bbecc1 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw -from modules import images, processing, devices +from modules import images, devices from modules.processing import Processed, process_images -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index e9b11517..fb06beab 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -1,14 +1,11 @@ import math -from collections import namedtuple -from copy import copy -import random import modules.scripts as scripts import gradio as gr from modules import images -from modules.processing import process_images, Processed -from modules.shared import opts, cmd_opts, state +from modules.processing import process_images +from modules.shared import opts, state import modules.sd_samplers diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 76dc5778..149bc85f 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -1,6 +1,4 @@ import copy -import math -import os import random import sys import traceback @@ -11,8 +9,7 @@ import gradio as gr from modules import sd_samplers from modules.processing import Processed, process_images -from PIL import Image -from modules.shared import opts, cmd_opts, state +from modules.shared import state def process_string_tag(tag): diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 332d76d9..d873a09c 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -4,9 +4,9 @@ import modules.scripts as scripts import gradio as gr from PIL import Image -from modules import processing, shared, sd_samplers, images, devices +from modules import processing, shared, images, devices from modules.processing import Processed -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state class Script(scripts.Script): diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 2ff42ef8..332e0ecd 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -10,15 +10,13 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images, paths, sd_samplers, processing, sd_models, sd_vae +from modules import images, sd_samplers, processing, sd_models, sd_vae from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img -from modules.shared import opts, cmd_opts, state +from modules.shared import opts, state import modules.shared as shared import modules.sd_samplers import modules.sd_models import modules.sd_vae -import glob -import os import re from modules.ui_components import ToolButton diff --git a/webui.py b/webui.py index ec3d2aba..48277075 100644 --- a/webui.py +++ b/webui.py @@ -43,7 +43,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states +from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer import modules.face_restoration import modules.gfpgan_model as gfpgan -- cgit v1.2.3 From 4b854806d98cf5ccd48e5cd99c172613da7937f0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 09:02:23 +0300 Subject: F401 fixes for ruff --- extensions-builtin/LDSR/scripts/ldsr_model.py | 4 ++-- modules/cmd_args.py | 2 +- modules/deepbooru.py | 1 - modules/extensions.py | 2 +- modules/gfpgan_model.py | 2 +- modules/models/diffusion/uni_pc/__init__.py | 2 +- modules/paths.py | 4 ++-- modules/realesrgan_model.py | 6 +++--- modules/script_loading.py | 1 - modules/sd_hijack_inpainting.py | 2 +- modules/sd_models.py | 4 +--- modules/sd_samplers.py | 2 +- modules/shared.py | 2 +- modules/ui.py | 4 ++-- modules/upscaler.py | 2 +- pyproject.toml | 9 +++++---- webui.py | 8 ++++---- 17 files changed, 27 insertions(+), 30 deletions(-) (limited to 'modules/shared.py') diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index e8dc083c..fbbe9005 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -7,8 +7,8 @@ from basicsr.utils.download_util import load_file_from_url from modules.upscaler import Upscaler, UpscalerData from ldsr_model_arch import LDSR from modules import shared, script_callbacks -import sd_hijack_autoencoder -import sd_hijack_ddpm_v1 +import sd_hijack_autoencoder # noqa: F401 +import sd_hijack_ddpm_v1 # noqa: F401 class UpscalerLDSR(Upscaler): diff --git a/modules/cmd_args.py b/modules/cmd_args.py index d906a571..e01ca655 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -1,6 +1,6 @@ import argparse import os -from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file +from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401 parser = argparse.ArgumentParser() diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 122fce7f..1c4554a2 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -2,7 +2,6 @@ import os import re import torch -from PIL import Image import numpy as np from modules import modelloader, paths, deepbooru_model, devices, images, shared diff --git a/modules/extensions.py b/modules/extensions.py index 829f8cd9..bc2c0450 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -6,7 +6,7 @@ import time import git from modules import shared -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 extensions = [] diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index fbe6215a..0131dea4 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -78,7 +78,7 @@ def setup_model(dirname): try: from gfpgan import GFPGANer - from facexlib import detection, parsing + from facexlib import detection, parsing # noqa: F401 global user_path global have_gfpgan global gfpgan_constructor diff --git a/modules/models/diffusion/uni_pc/__init__.py b/modules/models/diffusion/uni_pc/__init__.py index e1265e3f..dbb35964 100644 --- a/modules/models/diffusion/uni_pc/__init__.py +++ b/modules/models/diffusion/uni_pc/__init__.py @@ -1 +1 @@ -from .sampler import UniPCSampler +from .sampler import UniPCSampler # noqa: F401 diff --git a/modules/paths.py b/modules/paths.py index acf1894b..5f6474c0 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -1,8 +1,8 @@ import os import sys -from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir +from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401 -import modules.safe +import modules.safe # noqa: F401 # data_path = cmd_opts_pre.data diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index 9ec1adf2..c24d8dbb 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -17,9 +17,9 @@ class UpscalerRealESRGAN(Upscaler): self.user_path = path super().__init__() try: - from basicsr.archs.rrdbnet_arch import RRDBNet - from realesrgan import RealESRGANer - from realesrgan.archs.srvgg_arch import SRVGGNetCompact + from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401 + from realesrgan import RealESRGANer # noqa: F401 + from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401 self.enable = True self.scalers = [] scalers = self.load_models(path) diff --git a/modules/script_loading.py b/modules/script_loading.py index a7d2203f..57b15862 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -2,7 +2,6 @@ import os import sys import traceback import importlib.util -from types import ModuleType def load_module(path): diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 344d75c8..058575b7 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -4,7 +4,7 @@ import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddim import DDIMSampler, noise_like +from ldm.models.diffusion.ddim import noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_models.py b/modules/sd_models.py index 1c09c709..d1e946a5 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -15,7 +15,6 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config -from modules.paths import models_path from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer @@ -87,8 +86,7 @@ class CheckpointInfo: try: # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start. - - from transformers import logging, CLIPModel + from transformers import logging, CLIPModel # noqa: F401 logging.set_verbosity_error() except Exception: diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index ff361f22..4f1bf21d 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,7 +1,7 @@ from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared # imports for functions that previously were here and are used by other modules -from modules.sd_samplers_common import samples_to_image_grid, sample_to_image +from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401 all_samplers = [ *sd_samplers_kdiffusion.samplers_data_k_diffusion, diff --git a/modules/shared.py b/modules/shared.py index 44cd2c0c..7d70f041 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -12,7 +12,7 @@ import modules.memmon import modules.styles import modules.devices as devices from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args -from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from ldm.models.diffusion.ddpm import LatentDiffusion demo = None diff --git a/modules/ui.py b/modules/ui.py index f7e57593..782b569d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -10,10 +10,10 @@ import gradio as gr import gradio.routes import gradio.utils import numpy as np -from PIL import Image, PngImagePlugin +from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path diff --git a/modules/upscaler.py b/modules/upscaler.py index 777593b0..e145be30 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -41,7 +41,7 @@ class Upscaler: os.makedirs(self.model_path, exist_ok=True) try: - import cv2 + import cv2 # noqa: F401 self.can_tile = True except Exception: pass diff --git a/pyproject.toml b/pyproject.toml index 9caa9ba2..0883c127 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,13 +1,14 @@ [tool.ruff] +target-version = "py310" + exclude = ["extensions"] ignore = [ - "E501", - - "F401", # Module imported but unused + "E501", # Line too long + "E731", # Do not assign a `lambda` expression, use a `def` ] [tool.ruff.per-file-ignores] -"webui.py" = ["E402"] # Module level import not at top of file \ No newline at end of file +"webui.py" = ["E402"] # Module level import not at top of file diff --git a/webui.py b/webui.py index 48277075..5d5e80b5 100644 --- a/webui.py +++ b/webui.py @@ -16,12 +16,12 @@ from packaging import version import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) -from modules import paths, timer, import_hook, errors +from modules import paths, timer, import_hook, errors # noqa: F401 startup_timer = timer.Timer() import torch -import pytorch_lightning # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them +import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") @@ -31,12 +31,12 @@ startup_timer.record("import torch") import gradio startup_timer.record("import gradio") -import ldm.modules.encoders.modules +import ldm.modules.encoders.modules # noqa: F401 startup_timer.record("import ldm") from modules import extra_networks, ui_extra_networks_checkpoints from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion -from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call +from modules.call_queue import wrap_queued_call, queue_lock # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors if ".dev" in torch.__version__ or "+git" in torch.__version__: -- cgit v1.2.3 From 028d3f6425d85f122027c127fba8bcbf4f66ee75 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:05:02 +0300 Subject: ruff auto fixes --- extensions-builtin/LDSR/sd_hijack_autoencoder.py | 4 ++-- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 12 ++++++------ extensions-builtin/Lora/lora.py | 12 ++++++------ extensions-builtin/Lora/scripts/lora_script.py | 2 +- modules/config_states.py | 2 +- modules/deepbooru.py | 2 +- modules/devices.py | 2 +- modules/hypernetworks/hypernetwork.py | 2 +- modules/hypernetworks/ui.py | 4 ++-- modules/interrogate.py | 2 +- modules/modelloader.py | 2 +- modules/models/diffusion/ddpm_edit.py | 4 ++-- modules/scripts_auto_postprocessing.py | 2 +- modules/sd_hijack.py | 2 +- modules/sd_hijack_optimizations.py | 14 +++++++------- modules/sd_samplers_compvis.py | 2 +- modules/sd_samplers_kdiffusion.py | 2 +- modules/shared.py | 6 +++--- modules/textual_inversion/textual_inversion.py | 2 +- modules/ui.py | 8 ++++---- modules/ui_extra_networks.py | 4 ++-- modules/ui_tempdir.py | 2 +- 22 files changed, 47 insertions(+), 47 deletions(-) (limited to 'modules/shared.py') diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py index 6303fed5..f457ca93 100644 --- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py @@ -288,5 +288,5 @@ class VQModelInterface(VQModel): dec = self.decoder(quant) return dec -setattr(ldm.models.autoencoder, "VQModel", VQModel) -setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface) +ldm.models.autoencoder.VQModel = VQModel +ldm.models.autoencoder.VQModelInterface = VQModelInterface diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 4d3f6c56..d8fc30e3 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -1116,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] @@ -1215,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, @@ -1437,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1): logs['bbox_image'] = cond_img return logs -setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1) -setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1) -setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1) -setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1) +ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1 +ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1 +ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1 +ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1 diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 0ab43229..9795540f 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -172,7 +172,7 @@ def load_lora(name, filename): else: print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}') continue - assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' + raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}") with torch.no_grad(): module.weight.copy_(weight) @@ -184,7 +184,7 @@ def load_lora(name, filename): elif lora_key == "lora_down.weight": lora_module.down = module else: - assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha' + raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha") if len(keys_failed_to_match) > 0: print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}") @@ -202,7 +202,7 @@ def load_loras(names, multipliers=None): loaded_loras.clear() loras_on_disk = [available_lora_aliases.get(name, None) for name in names] - if any([x is None for x in loras_on_disk]): + if any(x is None for x in loras_on_disk): list_available_loras() loras_on_disk = [available_lora_aliases.get(name, None) for name in names] @@ -309,7 +309,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu print(f'failed to calculate lora weights for layer {lora_layer_name}') - setattr(self, "lora_current_names", wanted_names) + self.lora_current_names = wanted_names def lora_forward(module, input, original_forward): @@ -343,8 +343,8 @@ def lora_forward(module, input, original_forward): def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): - setattr(self, "lora_current_names", ()) - setattr(self, "lora_weights_backup", None) + self.lora_current_names = () + self.lora_weights_backup = None def lora_Linear_forward(self, input): diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 7db971fd..b70e2de7 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { - "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), + "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras), })) diff --git a/modules/config_states.py b/modules/config_states.py index 8f1ff428..75da862a 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -35,7 +35,7 @@ def list_config_states(): j["filepath"] = path config_states.append(j) - config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)) + config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True) for cs in config_states: timestamp = time.asctime(time.gmtime(cs["created_at"])) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 1c4554a2..547e1b4c 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -78,7 +78,7 @@ class DeepDanbooru: res = [] - filtertags = set([x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")]) + filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")} for tag in [x for x in tags if x not in filtertags]: probability = probability_dict[tag] diff --git a/modules/devices.py b/modules/devices.py index c705a3cb..d8a34a0f 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -65,7 +65,7 @@ def enable_tf32(): # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407 - if any([torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())]): + if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())): torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 9fe749b7..6ef0bfdf 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -403,7 +403,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None): k = self.to_k(context_k) v = self.to_v(context_v) - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) sim = einsum('b i d, b j d -> b i j', q, k) * self.scale diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index be168736..e3f9eb13 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork from modules import devices, sd_hijack, shared not_available = ["hardswish", "multiheadattention"] -keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) +keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available] def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) - return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", "" + return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", "" def train_hypernetwork(*args): diff --git a/modules/interrogate.py b/modules/interrogate.py index 22df9216..a1c8e537 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -159,7 +159,7 @@ class InterrogateModels: text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)] top_count = min(top_count, len(text_array)) - text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate) + text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate) text_features = self.clip_model.encode_text(text_tokens).type(self.dtype) text_features /= text_features.norm(dim=-1, keepdim=True) diff --git a/modules/modelloader.py b/modules/modelloader.py index 92ada694..25612bf8 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -39,7 +39,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None if os.path.islink(full_path) and not os.path.exists(full_path): print(f"Skipping broken symlink: {full_path}") continue - if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): + if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist): continue if full_path not in output: output.append(full_path) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 611c2b69..09432117 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -1130,7 +1130,7 @@ class LatentDiffusion(DDPM): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] @@ -1229,7 +1229,7 @@ class LatentDiffusion(DDPM): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, diff --git a/modules/scripts_auto_postprocessing.py b/modules/scripts_auto_postprocessing.py index 30d6d658..d63078de 100644 --- a/modules/scripts_auto_postprocessing.py +++ b/modules/scripts_auto_postprocessing.py @@ -17,7 +17,7 @@ class ScriptPostprocessingForMainUI(scripts.Script): return self.postprocessing_controls.values() def postprocess_image(self, p, script_pp, *args): - args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)} + args_dict = dict(zip(self.postprocessing_controls, args)) pp = scripts_postprocessing.PostprocessedImage(script_pp.image) pp.info = {} diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 81573b78..e374aeb8 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -37,7 +37,7 @@ def apply_optimizations(): optimization_method = None - can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp + can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)): print("Applying xformers cross attention optimization.") diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index b623d53d..a174bbe1 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -49,7 +49,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None): v_in = self.to_v(context_v) del context, context_k, context_v, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in dtype = q.dtype @@ -98,7 +98,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None): del context, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) @@ -229,7 +229,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k = k * self.scale - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) r = einsum_op(q, k, v) r = r.to(dtype) return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h)) @@ -334,7 +334,7 @@ def xformers_attention_forward(self, x, context=None, mask=None): k_in = self.to_k(context_k) v_in = self.to_v(context_v) - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in dtype = q.dtype @@ -460,7 +460,7 @@ def xformers_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) dtype = q.dtype if shared.opts.upcast_attn: q, k = q.float(), k.float() @@ -482,7 +482,7 @@ def sdp_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) dtype = q.dtype if shared.opts.upcast_attn: q, k = q.float(), k.float() @@ -506,7 +506,7 @@ def sub_quad_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) q = q.contiguous() k = k.contiguous() v = v.contiguous() diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index bfcc5574..7427648f 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -83,7 +83,7 @@ class VanillaStableDiffusionSampler: conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step) - assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers' + assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers' cond = tensor # for DDIM, shapes must match, we can't just process cond and uncond independently; diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 3b8e9622..2f733cf5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -86,7 +86,7 @@ class CFGDenoiser(torch.nn.Module): conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" batch_size = len(conds_list) repeats = [len(conds_list[i]) for i in range(batch_size)] diff --git a/modules/shared.py b/modules/shared.py index 7d70f041..e2691585 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { @@ -403,7 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}), - "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}), + "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), @@ -583,7 +583,7 @@ class Options: if item.section not in section_ids: section_ids[item.section] = len(section_ids) - self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])} + self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) def cast_value(self, key, value): """casts an arbitrary to the same type as this setting's value with key diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 9ed9ba45..c37bb2ad 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -167,7 +167,7 @@ class EmbeddingDatabase: if 'string_to_param' in data: param_dict = data['string_to_param'] if hasattr(param_dict, '_parameters'): - param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11 + param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11 assert len(param_dict) == 1, 'embedding file has multiple terms in it' emb = next(iter(param_dict.items()))[1] # diffuser concepts diff --git a/modules/ui.py b/modules/ui.py index 782b569d..84d661b2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1222,7 +1222,7 @@ def create_ui(): ) def get_textual_inversion_template_names(): - return sorted([x for x in textual_inversion.textual_inversion_templates]) + return sorted(textual_inversion.textual_inversion_templates) with gr.Tab(label="Train", id="train"): gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") @@ -1230,8 +1230,8 @@ def create_ui(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()]) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name") + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys())) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name") with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") @@ -1808,7 +1808,7 @@ def create_ui(): if type(x) == gr.Dropdown: def check_dropdown(val): if getattr(x, 'multiselect', False): - return all([value in x.choices for value in val]) + return all(value in x.choices for value in val) else: return val in x.choices diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 800e467a..ab585917 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -26,7 +26,7 @@ def register_page(page): def fetch_file(filename: str = ""): from starlette.responses import FileResponse - if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]): + if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs): raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") ext = os.path.splitext(filename)[1].lower() @@ -326,7 +326,7 @@ def setup_ui(ui, gallery): is_allowed = False for extra_page in ui.stored_extra_pages: - if any([path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()]): + if any(path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()): is_allowed = True break diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 46fa9cb0..cac73c51 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -23,7 +23,7 @@ def register_tmp_file(gradio, filename): def check_tmp_file(gradio, filename): if hasattr(gradio, 'temp_file_sets'): - return any([filename in fileset for fileset in gradio.temp_file_sets]) + return any(filename in fileset for fileset in gradio.temp_file_sets) if hasattr(gradio, 'temp_dirs'): return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) -- cgit v1.2.3 From a5121e7a0623db328a9462d340d389ed6737374a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:37:18 +0300 Subject: fixes for B007 --- extensions-builtin/LDSR/ldsr_model_arch.py | 2 +- extensions-builtin/Lora/lora.py | 2 +- extensions-builtin/ScuNET/scripts/scunet_model.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch_v2.py | 2 +- modules/codeformer_model.py | 2 +- modules/esrgan_model.py | 8 ++------ modules/extra_networks.py | 2 +- modules/generation_parameters_copypaste.py | 2 +- modules/hypernetworks/hypernetwork.py | 12 ++++++------ modules/images.py | 2 +- modules/interrogate.py | 4 ++-- modules/prompt_parser.py | 14 +++++++------- modules/safe.py | 4 ++-- modules/scripts.py | 10 +++++----- modules/scripts_postprocessing.py | 8 ++++---- modules/sd_hijack_clip.py | 2 +- modules/shared.py | 6 +++--- modules/textual_inversion/learn_schedule.py | 2 +- modules/textual_inversion/textual_inversion.py | 10 +++++----- modules/ui.py | 6 +++--- modules/ui_extra_networks.py | 2 +- modules/ui_tempdir.py | 2 +- modules/upscaler.py | 2 +- pyproject.toml | 1 - scripts/prompts_from_file.py | 2 +- scripts/sd_upscale.py | 4 ++-- scripts/xyz_grid.py | 2 +- 28 files changed, 57 insertions(+), 62 deletions(-) (limited to 'modules/shared.py') diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index a5fb8907..27e38549 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -88,7 +88,7 @@ class LDSR: x_t = None logs = None - for n in range(n_runs): + for _ in range(n_runs): if custom_shape is not None: x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device) x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0]) diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 9795540f..7b56136f 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -418,7 +418,7 @@ def infotext_pasted(infotext, params): added = [] - for k, v in params.items(): + for k in params: if not k.startswith("AddNet Model "): continue diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index aa2fdb3a..1f5ea0d3 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -132,7 +132,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) model.load_state_dict(torch.load(filename), strict=True) model.eval() - for k, v in model.named_parameters(): + for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py index 75f7bedc..de195d9b 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch.py +++ b/extensions-builtin/SwinIR/swinir_model_arch.py @@ -848,7 +848,7 @@ class SwinIR(nn.Module): H, W = self.patches_resolution flops += H * W * 3 * self.embed_dim * 9 flops += self.patch_embed.flops() - for i, layer in enumerate(self.layers): + for layer in self.layers: flops += layer.flops() flops += H * W * 3 * self.embed_dim * self.embed_dim flops += self.upsample.flops() diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py index d4c0b0da..15777af9 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py +++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py @@ -1001,7 +1001,7 @@ class Swin2SR(nn.Module): H, W = self.patches_resolution flops += H * W * 3 * self.embed_dim * 9 flops += self.patch_embed.flops() - for i, layer in enumerate(self.layers): + for layer in self.layers: flops += layer.flops() flops += H * W * 3 * self.embed_dim * self.embed_dim flops += self.upsample.flops() diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8e56cb89..ececdbae 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -94,7 +94,7 @@ def setup_model(dirname): self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) self.face_helper.align_warp_face() - for idx, cropped_face in enumerate(self.face_helper.cropped_faces): + for cropped_face in self.face_helper.cropped_faces: cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer) diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 85aa6934..a009eb42 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -16,9 +16,7 @@ def mod2normal(state_dict): # this code is copied from https://github.com/victorca25/iNNfer if 'conv_first.weight' in state_dict: crt_net = {} - items = [] - for k, v in state_dict.items(): - items.append(k) + items = list(state_dict) crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.bias'] = state_dict['conv_first.bias'] @@ -52,9 +50,7 @@ def resrgan2normal(state_dict, nb=23): if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: re8x = 0 crt_net = {} - items = [] - for k, v in state_dict.items(): - items.append(k) + items = list(state_dict) crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.bias'] = state_dict['conv_first.bias'] diff --git a/modules/extra_networks.py b/modules/extra_networks.py index 1978673d..f9db41bc 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -91,7 +91,7 @@ def deactivate(p, extra_network_data): """call deactivate for extra networks in extra_network_data in specified order, then call deactivate for all remaining registered networks""" - for extra_network_name, extra_network_args in extra_network_data.items(): + for extra_network_name in extra_network_data: extra_network = extra_network_registry.get(extra_network_name, None) if extra_network is None: continue diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 7fbbe707..b0e945a1 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -247,7 +247,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model lines.append(lastline) lastline = '' - for i, line in enumerate(lines): + for line in lines: line = line.strip() if line.startswith("Negative prompt:"): done_with_prompt = True diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 6ef0bfdf..38ef074f 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -177,34 +177,34 @@ class Hypernetwork: def weights(self): res = [] - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: res += layer.parameters() return res def train(self, mode=True): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.train(mode=mode) for param in layer.parameters(): param.requires_grad = mode def to(self, device): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.to(device) return self def set_multiplier(self, multiplier): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.multiplier = multiplier return self def eval(self): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.eval() for param in layer.parameters(): @@ -619,7 +619,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi try: sd_hijack_checkpoint.add() - for i in range((steps-initial_step) * gradient_step): + for _ in range((steps-initial_step) * gradient_step): if scheduler.finished: break if shared.state.interrupted: diff --git a/modules/images.py b/modules/images.py index 7392cb8b..c4e98c75 100644 --- a/modules/images.py +++ b/modules/images.py @@ -149,7 +149,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0): return ImageFont.truetype(Roboto, fontsize) def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize): - for i, line in enumerate(lines): + for line in lines: fnt = initial_fnt fontsize = initial_fontsize while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0: diff --git a/modules/interrogate.py b/modules/interrogate.py index a1c8e537..111b1322 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -207,8 +207,8 @@ class InterrogateModels: image_features /= image_features.norm(dim=-1, keepdim=True) - for name, topn, items in self.categories(): - matches = self.rank(image_features, items, top_count=topn) + for cat in self.categories(): + matches = self.rank(image_features, cat.items, top_count=cat.topn) for match, score in matches: if shared.opts.interrogate_return_ranks: res += f", ({match}:{score/100:.3f})" diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 3a720721..b4aff704 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -143,7 +143,7 @@ def get_learned_conditioning(model, prompts, steps): conds = model.get_learned_conditioning(texts) cond_schedule = [] - for i, (end_at_step, text) in enumerate(prompt_schedule): + for i, (end_at_step, _) in enumerate(prompt_schedule): cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i])) cache[prompt] = cond_schedule @@ -219,8 +219,8 @@ def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_s res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) for i, cond_schedule in enumerate(c): target_index = 0 - for current, (end_at, cond) in enumerate(cond_schedule): - if current_step <= end_at: + for current, entry in enumerate(cond_schedule): + if current_step <= entry.end_at_step: target_index = current break res[i] = cond_schedule[target_index].cond @@ -234,13 +234,13 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step): tensors = [] conds_list = [] - for batch_no, composable_prompts in enumerate(c.batch): + for composable_prompts in c.batch: conds_for_batch = [] - for cond_index, composable_prompt in enumerate(composable_prompts): + for composable_prompt in composable_prompts: target_index = 0 - for current, (end_at, cond) in enumerate(composable_prompt.schedules): - if current_step <= end_at: + for current, entry in enumerate(composable_prompt.schedules): + if current_step <= entry.end_at_step: target_index = current break diff --git a/modules/safe.py b/modules/safe.py index 2d5b972f..1e791c5b 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -95,11 +95,11 @@ def check_pt(filename, extra_handler): except zipfile.BadZipfile: - # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle + # if it's not a zip file, it's an old pytorch format, with five objects written to pickle with open(filename, "rb") as file: unpickler = RestrictedUnpickler(file) unpickler.extra_handler = extra_handler - for i in range(5): + for _ in range(5): unpickler.load() diff --git a/modules/scripts.py b/modules/scripts.py index d945b89f..0c12ebd5 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -231,7 +231,7 @@ def load_scripts(): syspath = sys.path def register_scripts_from_module(module): - for key, script_class in module.__dict__.items(): + for script_class in module.__dict__.values(): if type(script_class) != type: continue @@ -295,9 +295,9 @@ class ScriptRunner: auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data() - for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data: - script = script_class() - script.filename = path + for script_data in auto_processing_scripts + scripts_data: + script = script_data.script_class() + script.filename = script_data.path script.is_txt2img = not is_img2img script.is_img2img = is_img2img @@ -492,7 +492,7 @@ class ScriptRunner: module = script_loading.load_module(script.filename) cache[filename] = module - for key, script_class in module.__dict__.items(): + for script_class in module.__dict__.values(): if type(script_class) == type and issubclass(script_class, Script): self.scripts[si] = script_class() self.scripts[si].filename = filename diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py index b11568c0..6751406c 100644 --- a/modules/scripts_postprocessing.py +++ b/modules/scripts_postprocessing.py @@ -66,9 +66,9 @@ class ScriptPostprocessingRunner: def initialize_scripts(self, scripts_data): self.scripts = [] - for script_class, path, basedir, script_module in scripts_data: - script: ScriptPostprocessing = script_class() - script.filename = path + for script_data in scripts_data: + script: ScriptPostprocessing = script_data.script_class() + script.filename = script_data.path if script.name == "Simple Upscale": continue @@ -124,7 +124,7 @@ class ScriptPostprocessingRunner: script_args = args[script.args_from:script.args_to] process_args = {} - for (name, component), value in zip(script.controls.items(), script_args): + for (name, component), value in zip(script.controls.items(), script_args): # noqa B007 process_args[name] = value script.process(pp, **process_args) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 9fa5c5c5..c0c350f6 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): self.hijack.fixes = [x.fixes for x in batch_chunk] for fixes in self.hijack.fixes: - for position, embedding in fixes: + for position, embedding in fixes: # noqa: B007 used_embeddings[embedding.name] = embedding z = self.process_tokens(tokens, multipliers) diff --git a/modules/shared.py b/modules/shared.py index e2691585..913c9e63 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -211,7 +211,7 @@ class OptionInfo: def options_section(section_identifier, options_dict): - for k, v in options_dict.items(): + for v in options_dict.values(): v.section = section_identifier return options_dict @@ -579,7 +579,7 @@ class Options: section_ids = {} settings_items = self.data_labels.items() - for k, item in settings_items: + for _, item in settings_items: if item.section not in section_ids: section_ids[item.section] = len(section_ids) @@ -740,7 +740,7 @@ def walk_files(path, allowed_extensions=None): if allowed_extensions is not None: allowed_extensions = set(allowed_extensions) - for root, dirs, files in os.walk(path): + for root, _, files in os.walk(path): for filename in files: if allowed_extensions is not None: _, ext = os.path.splitext(filename) diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index fda58898..c56bea45 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -12,7 +12,7 @@ class LearnScheduleIterator: self.it = 0 self.maxit = 0 try: - for i, pair in enumerate(pairs): + for pair in pairs: if not pair.strip(): continue tmp = pair.split(':') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c37bb2ad..47035332 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -29,7 +29,7 @@ textual_inversion_templates = {} def list_textual_inversion_templates(): textual_inversion_templates.clear() - for root, dirs, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir): + for root, _, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir): for fn in fns: path = os.path.join(root, fn) @@ -198,7 +198,7 @@ class EmbeddingDatabase: if not os.path.isdir(embdir.path): return - for root, dirs, fns in os.walk(embdir.path, followlinks=True): + for root, _, fns in os.walk(embdir.path, followlinks=True): for fn in fns: try: fullfn = os.path.join(root, fn) @@ -215,7 +215,7 @@ class EmbeddingDatabase: def load_textual_inversion_embeddings(self, force_reload=False): if not force_reload: need_reload = False - for path, embdir in self.embedding_dirs.items(): + for embdir in self.embedding_dirs.values(): if embdir.has_changed(): need_reload = True break @@ -228,7 +228,7 @@ class EmbeddingDatabase: self.skipped_embeddings.clear() self.expected_shape = self.get_expected_shape() - for path, embdir in self.embedding_dirs.items(): + for embdir in self.embedding_dirs.values(): self.load_from_dir(embdir) embdir.update() @@ -469,7 +469,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st try: sd_hijack_checkpoint.add() - for i in range((steps-initial_step) * gradient_step): + for _ in range((steps-initial_step) * gradient_step): if scheduler.finished: break if shared.state.interrupted: diff --git a/modules/ui.py b/modules/ui.py index 84d661b2..83bfb7d8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -416,7 +416,7 @@ def create_sampler_and_steps_selection(choices, tabname): def ordered_ui_categories(): user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))} - for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): + for _, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): yield category @@ -1646,7 +1646,7 @@ def create_ui(): with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings", variant="compact"): - for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): + for _i, k, _item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) component_dict[k] = component @@ -1673,7 +1673,7 @@ def create_ui(): outputs=[text_settings, result], ) - for i, k, item in quicksettings_list: + for _i, k, _item in quicksettings_list: component = component_dict[k] info = opts.data_labels[k] diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index ab585917..2fd82e8e 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -90,7 +90,7 @@ class ExtraNetworksPage: subdirs = {} for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: - for root, dirs, files in os.walk(parentdir): + for root, dirs, _ in os.walk(parentdir): for dirname in dirs: x = os.path.join(root, dirname) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index cac73c51..f05049e1 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -72,7 +72,7 @@ def cleanup_tmpdr(): if temp_dir == "" or not os.path.isdir(temp_dir): return - for root, dirs, files in os.walk(temp_dir, topdown=False): + for root, _, files in os.walk(temp_dir, topdown=False): for name in files: _, extension = os.path.splitext(name) if extension != ".png": diff --git a/modules/upscaler.py b/modules/upscaler.py index e145be30..8acb6e96 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -55,7 +55,7 @@ class Upscaler: dest_w = int(img.width * scale) dest_h = int(img.height * scale) - for i in range(3): + for _ in range(3): shape = (img.width, img.height) img = self.do_upscale(img, selected_model) diff --git a/pyproject.toml b/pyproject.toml index 346a0cde..c88907be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,6 @@ ignore = [ "I001", # Import block is un-sorted or un-formatted "C901", # Function is too complex "C408", # Rewrite as a literal - "B007", # Loop control variable not used within loop body ] diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 149bc85f..27af5ff6 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -156,7 +156,7 @@ class Script(scripts.Script): images = [] all_prompts = [] infotexts = [] - for n, args in enumerate(jobs): + for args in jobs: state.job = f"{state.job_no + 1} out of {state.job_count}" copy_p = copy.copy(p) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index d873a09c..0b1d3096 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -56,7 +56,7 @@ class Script(scripts.Script): work = [] - for y, h, row in grid.tiles: + for _y, _h, row in grid.tiles: for tiledata in row: work.append(tiledata[2]) @@ -85,7 +85,7 @@ class Script(scripts.Script): work_results += processed.images image_index = 0 - for y, h, row in grid.tiles: + for _y, _h, row in grid.tiles: for tiledata in row: tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height)) image_index += 1 diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 332e0ecd..38a20381 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -704,7 +704,7 @@ class Script(scripts.Script): if not include_sub_grids: # Done with sub-grids, drop all related information: - for sg in range(z_count): + for _ in range(z_count): del processed.images[1] del processed.all_prompts[1] del processed.all_seeds[1] -- cgit v1.2.3 From 3ec7b705c78b7aca9569c92a419837352c7a4ec6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 21:21:32 +0300 Subject: suggestions and fixes from the PR --- extensions-builtin/Lora/scripts/lora_script.py | 2 +- extensions-builtin/SwinIR/swinir_model_arch.py | 6 +----- extensions-builtin/SwinIR/swinir_model_arch_v2.py | 11 ++--------- modules/codeformer/codeformer_arch.py | 7 ++----- modules/hypernetworks/ui.py | 4 ++-- modules/models/diffusion/uni_pc/uni_pc.py | 4 ++-- modules/scripts_postprocessing.py | 2 +- modules/sd_hijack_clip.py | 2 +- modules/shared.py | 2 +- modules/textual_inversion/textual_inversion.py | 3 +-- modules/ui.py | 4 ++-- 11 files changed, 16 insertions(+), 31 deletions(-) (limited to 'modules/shared.py') diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index b70e2de7..13d297d7 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { - "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras), + "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras), })) diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py index de195d9b..73e37cfa 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch.py +++ b/extensions-builtin/SwinIR/swinir_model_arch.py @@ -644,17 +644,13 @@ class SwinIR(nn.Module): """ def __init__(self, img_size=64, patch_size=1, in_chans=3, - embed_dim=96, depths=None, num_heads=None, + embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6), window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', **kwargs): super(SwinIR, self).__init__() - - depths = depths or [6, 6, 6, 6] - num_heads = num_heads or [6, 6, 6, 6] - num_in_ch = in_chans num_out_ch = in_chans num_feat = 64 diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py index 15777af9..3ca9be78 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py +++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py @@ -74,12 +74,9 @@ class WindowAttention(nn.Module): """ def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., - pretrained_window_size=None): + pretrained_window_size=(0, 0)): super().__init__() - - pretrained_window_size = pretrained_window_size or [0, 0] - self.dim = dim self.window_size = window_size # Wh, Ww self.pretrained_window_size = pretrained_window_size @@ -701,17 +698,13 @@ class Swin2SR(nn.Module): """ def __init__(self, img_size=64, patch_size=1, in_chans=3, - embed_dim=96, depths=None, num_heads=None, + embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6), window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', **kwargs): super(Swin2SR, self).__init__() - - depths = depths or [6, 6, 6, 6] - num_heads = num_heads or [6, 6, 6, 6] - num_in_ch = in_chans num_out_ch = in_chans num_feat = 64 diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index ff1c0b4b..45c70f84 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -161,13 +161,10 @@ class Fuse_sft_block(nn.Module): class CodeFormer(VQAutoEncoder): def __init__(self, dim_embd=512, n_head=8, n_layers=9, codebook_size=1024, latent_size=256, - connect_list=None, - fix_modules=None): + connect_list=('32', '64', '128', '256'), + fix_modules=('quantize', 'generator')): super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size) - connect_list = connect_list or ['32', '64', '128', '256'] - fix_modules = fix_modules or ['quantize', 'generator'] - if fix_modules is not None: for module in fix_modules: for param in getattr(self, module).parameters(): diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index e3f9eb13..8b6255e2 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork from modules import devices, sd_hijack, shared not_available = ["hardswish", "multiheadattention"] -keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available] +keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available] def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) - return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", "" + return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", "" def train_hypernetwork(*args): diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index f6c49f87..a227b947 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -275,8 +275,8 @@ def model_wrapper( A noise prediction model that accepts the noised data and the continuous time as the inputs. """ - model_kwargs = model_kwargs or [] - classifier_kwargs = classifier_kwargs or [] + model_kwargs = model_kwargs or {} + classifier_kwargs = classifier_kwargs or {} def get_model_input_time(t_continuous): """ diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py index 6751406c..bac1335d 100644 --- a/modules/scripts_postprocessing.py +++ b/modules/scripts_postprocessing.py @@ -124,7 +124,7 @@ class ScriptPostprocessingRunner: script_args = args[script.args_from:script.args_to] process_args = {} - for (name, component), value in zip(script.controls.items(), script_args): # noqa B007 + for (name, _component), value in zip(script.controls.items(), script_args): process_args[name] = value script.process(pp, **process_args) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index c0c350f6..cc6e8c21 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): self.hijack.fixes = [x.fixes for x in batch_chunk] for fixes in self.hijack.fixes: - for position, embedding in fixes: # noqa: B007 + for _position, embedding in fixes: used_embeddings[embedding.name] = embedding z = self.process_tokens(tokens, multipliers) diff --git a/modules/shared.py b/modules/shared.py index 913c9e63..ac67adc0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", hypernetworks]}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 47035332..9e1b2b9a 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -166,8 +166,7 @@ class EmbeddingDatabase: # textual inversion embeddings if 'string_to_param' in data: param_dict = data['string_to_param'] - if hasattr(param_dict, '_parameters'): - param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11 + param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 assert len(param_dict) == 1, 'embedding file has multiple terms in it' emb = next(iter(param_dict.items()))[1] # diffuser concepts diff --git a/modules/ui.py b/modules/ui.py index 83bfb7d8..7ee99473 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1230,8 +1230,8 @@ def create_ui(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys())) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name") + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") -- cgit v1.2.3 From b7e160a87d07b2fd1c12812c43786e141cc86bd5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 11 May 2023 08:14:45 +0300 Subject: change live preview format to jpeg to prevent unreasonably slow previews for large images, and add an option to let user select the format --- modules/progress.py | 4 ++-- modules/shared.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'modules/shared.py') diff --git a/modules/progress.py b/modules/progress.py index 948e6f00..289dd311 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -95,9 +95,9 @@ def progressapi(req: ProgressRequest): image = shared.state.current_image if image is not None: buffered = io.BytesIO() - image.save(buffered, format="png") + image.save(buffered, format=opts.live_previews_format) base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') - live_preview = f"data:image/png;base64,{base64_image}" + live_preview = f"data:image/{opts.live_previews_format};base64,{base64_image}" id_live_preview = shared.state.id_live_preview else: live_preview = None diff --git a/modules/shared.py b/modules/shared.py index ac67adc0..fc39161e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -420,6 +420,7 @@ options_templates.update(options_section(('infotext', "Infotext"), { options_templates.update(options_section(('ui', "Live previews"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), + "live_previews_format": OptionInfo("jpeg", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}), "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}), -- cgit v1.2.3 From 0bfaf613a84613f41946da02571e0e467e88d273 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 11 May 2023 13:30:33 +0300 Subject: put the star where it belongs --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index fc39161e..f387b5ae 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", hypernetworks]}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { -- cgit v1.2.3 From cb3f8ff59fe8f142c3ca074b8cbaaf83357f9dc1 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 11 May 2023 15:55:43 +0000 Subject: Fix symlink scanning --- modules/shared.py | 2 +- modules/ui_extra_networks.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index f387b5ae..210424ac 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -741,7 +741,7 @@ def walk_files(path, allowed_extensions=None): if allowed_extensions is not None: allowed_extensions = set(allowed_extensions) - for root, _, files in os.walk(path): + for root, _, files in os.walk(path, followlinks=True): for filename in files: if allowed_extensions is not None: _, ext = os.path.splitext(filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 2fd82e8e..e35d0bfe 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -90,7 +90,7 @@ class ExtraNetworksPage: subdirs = {} for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: - for root, dirs, _ in os.walk(parentdir): + for root, dirs, _ in os.walk(parentdir, followlinks=True): for dirname in dirs: x = os.path.join(root, dirname) -- cgit v1.2.3 From da10de022f69e7847bcc64a7914d56246d852e20 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 20:52:30 +0300 Subject: Make live previews use JPEG only when the image is lorge enough --- modules/progress.py | 12 ++++++++++-- modules/shared.py | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'modules/shared.py') diff --git a/modules/progress.py b/modules/progress.py index 289dd311..c2e37834 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -95,9 +95,17 @@ def progressapi(req: ProgressRequest): image = shared.state.current_image if image is not None: buffered = io.BytesIO() - image.save(buffered, format=opts.live_previews_format) + format = opts.live_previews_format + save_kwargs = {} + if format == "auto": + if max(*image.size) > 256: + format = "jpeg" + else: + format = "png" + save_kwargs = {"optimize": True} + image.save(buffered, format=format, **save_kwargs) base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') - live_preview = f"data:image/{opts.live_previews_format};base64,{base64_image}" + live_preview = f"data:image/{format};base64,{base64_image}" id_live_preview = shared.state.id_live_preview else: live_preview = None diff --git a/modules/shared.py b/modules/shared.py index f387b5ae..22b45618 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -420,7 +420,7 @@ options_templates.update(options_section(('infotext', "Infotext"), { options_templates.update(options_section(('ui', "Live previews"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_format": OptionInfo("jpeg", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), + "live_previews_format": OptionInfo("auto", "Live preview file format", gr.Radio, {"choices": ["auto", "jpeg", "png", "webp"]}), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}), "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}), -- cgit v1.2.3