From 1f0fdede176989f151da6b97bd9a140b7f0af6e5 Mon Sep 17 00:00:00 2001 From: linkoid <36754150+linkoid@users.noreply.github.com> Date: Fri, 26 May 2023 15:15:59 -0400 Subject: Show full traceback in get_sd_model() to reveal if an error is caused by an extension --- modules/errors.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'modules/errors.py') diff --git a/modules/errors.py b/modules/errors.py index f6b80dbb..da4694f8 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -12,9 +12,13 @@ def print_error_explanation(message): print('=' * max_len, file=sys.stderr) -def display(e: Exception, task): +def display(e: Exception, task, *, full_traceback=False): print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + te = traceback.TracebackException.from_exception(e) + if full_traceback: + # include frames leading up to the try-catch block + te.stack = traceback.StackSummary(traceback.extract_stack()[:-2] + te.stack) + print(*te.format(), sep="", file=sys.stderr) message = str(e) if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message: -- cgit v1.2.3 From 00dfe27f59727407c5b408a80ff2a262934df495 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 08:54:13 +0300 Subject: Add & use modules.errors.print_error where currently printing exception info by hand --- extensions-builtin/LDSR/scripts/ldsr_model.py | 7 ++--- extensions-builtin/ScuNET/scripts/scunet_model.py | 6 ++-- modules/api/api.py | 7 +++-- modules/call_queue.py | 22 ++++++-------- modules/codeformer_model.py | 10 +++---- modules/config_states.py | 12 +++----- modules/errors.py | 16 +++++++++++ modules/extensions.py | 10 +++---- modules/gfpgan_model.py | 6 ++-- modules/hypernetworks/hypernetwork.py | 14 ++++----- modules/images.py | 9 ++---- modules/interrogate.py | 5 ++-- modules/launch_utils.py | 7 +++-- modules/localization.py | 6 ++-- modules/processing.py | 2 +- modules/realesrgan_model.py | 14 ++++----- modules/safe.py | 26 +++++++++-------- modules/script_callbacks.py | 9 +++--- modules/script_loading.py | 7 ++--- modules/scripts.py | 35 ++++++++--------------- modules/sd_hijack_optimizations.py | 6 ++-- modules/textual_inversion/textual_inversion.py | 9 ++---- modules/ui.py | 10 +++---- modules/ui_extensions.py | 9 ++---- scripts/prompts_from_file.py | 6 ++-- 25 files changed, 117 insertions(+), 153 deletions(-) (limited to 'modules/errors.py') diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index c4da79f3..95f1669d 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -1,9 +1,8 @@ import os -import sys -import traceback from basicsr.utils.download_util import load_file_from_url +from modules.errors import print_error from modules.upscaler import Upscaler, UpscalerData from ldsr_model_arch import LDSR from modules import shared, script_callbacks @@ -51,10 +50,8 @@ class UpscalerLDSR(Upscaler): try: return LDSR(model, yaml) - except Exception: - print("Error importing LDSR:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Error importing LDSR", exc_info=True) return None def do_upscale(self, img, path): diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index 45d9297b..dd1b822e 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -1,6 +1,5 @@ import os.path import sys -import traceback import PIL.Image import numpy as np @@ -12,6 +11,8 @@ from basicsr.utils.download_util import load_file_from_url import modules.upscaler from modules import devices, modelloader, script_callbacks from scunet_model_arch import SCUNet as net + +from modules.errors import print_error from modules.shared import opts @@ -38,8 +39,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): scaler_data = modules.upscaler.UpscalerData(name, file, self, 4) scalers.append(scaler_data) except Exception: - print(f"Error loading ScuNET model: {file}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error loading ScuNET model: {file}", exc_info=True) if add_model2: scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self) scalers.append(scaler_data2) diff --git a/modules/api/api.py b/modules/api/api.py index 6a456861..79ce9228 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -16,6 +16,7 @@ from secrets import compare_digest import modules.shared as shared from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing from modules.api import models +from modules.errors import print_error from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.textual_inversion.textual_inversion import create_embedding, train_embedding @@ -108,7 +109,6 @@ def api_middleware(app: FastAPI): from rich.console import Console console = Console() except Exception: - import traceback rich_available = False @app.middleware("http") @@ -139,11 +139,12 @@ def api_middleware(app: FastAPI): "errors": str(e), } if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions - print(f"API error: {request.method}: {request.url} {err}") + message = f"API error: {request.method}: {request.url} {err}" if rich_available: + print(message) console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200])) else: - traceback.print_exc() + print_error(message, exc_info=True) return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err)) @app.middleware("http") diff --git a/modules/call_queue.py b/modules/call_queue.py index 447bb764..dba2a9b4 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -1,10 +1,9 @@ import html -import sys import threading -import traceback import time from modules import shared, progress +from modules.errors import print_error queue_lock = threading.Lock() @@ -56,16 +55,14 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): try: res = list(func(*args, **kwargs)) except Exception as e: - # When printing out our debug argument list, do not print out more than a MB of text - max_debug_str_len = 131072 # (1024*1024)/8 - - print("Error completing request", file=sys.stderr) - argStr = f"Arguments: {args} {kwargs}" - print(argStr[:max_debug_str_len], file=sys.stderr) - if len(argStr) > max_debug_str_len: - print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr) - - print(traceback.format_exc(), file=sys.stderr) + # When printing out our debug argument list, + # do not print out more than a 100 KB of text + max_debug_str_len = 131072 + message = "Error completing request" + arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len] + if len(arg_str) > max_debug_str_len: + arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)" + print_error(f"{message}\n{arg_str}", exc_info=True) shared.state.job = "" shared.state.job_count = 0 @@ -108,4 +105,3 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): return tuple(res) return f - diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index ececdbae..76143e9f 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -1,6 +1,4 @@ import os -import sys -import traceback import cv2 import torch @@ -8,6 +6,7 @@ import torch import modules.face_restoration import modules.shared from modules import shared, devices, modelloader +from modules.errors import print_error from modules.paths import models_path # codeformer people made a choice to include modified basicsr library to their project which makes @@ -105,8 +104,8 @@ def setup_model(dirname): restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1)) del output torch.cuda.empty_cache() - except Exception as error: - print(f'\tFailed inference for CodeFormer: {error}', file=sys.stderr) + except Exception: + print_error('Failed inference for CodeFormer', exc_info=True) restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) restored_face = restored_face.astype('uint8') @@ -135,7 +134,6 @@ def setup_model(dirname): shared.face_restorers.append(codeformer) except Exception: - print("Error setting up CodeFormer:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Error setting up CodeFormer", exc_info=True) # sys.path = stored_sys_path diff --git a/modules/config_states.py b/modules/config_states.py index db65bcdb..faeaf28b 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -3,8 +3,6 @@ Supports saving and restoring webui and extensions from a known working set of c """ import os -import sys -import traceback import json import time import tqdm @@ -14,6 +12,7 @@ from collections import OrderedDict import git from modules import shared, extensions +from modules.errors import print_error from modules.paths_internal import script_path, config_states_dir @@ -53,8 +52,7 @@ def get_webui_config(): if os.path.exists(os.path.join(script_path, ".git")): webui_repo = git.Repo(script_path) except Exception: - print(f"Error reading webui git info from {script_path}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error reading webui git info from {script_path}", exc_info=True) webui_remote = None webui_commit_hash = None @@ -134,8 +132,7 @@ def restore_webui_config(config): if os.path.exists(os.path.join(script_path, ".git")): webui_repo = git.Repo(script_path) except Exception: - print(f"Error reading webui git info from {script_path}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error reading webui git info from {script_path}", exc_info=True) return try: @@ -143,8 +140,7 @@ def restore_webui_config(config): webui_repo.git.reset(webui_commit_hash, hard=True) print(f"* Restored webui to commit {webui_commit_hash}.") except Exception: - print(f"Error restoring webui to commit {webui_commit_hash}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error restoring webui to commit{webui_commit_hash}") def restore_extension_config(config): diff --git a/modules/errors.py b/modules/errors.py index da4694f8..41d8dc93 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -1,7 +1,23 @@ import sys +import textwrap import traceback +def print_error( + message: str, + *, + exc_info: bool = False, +) -> None: + """ + Print an error message to stderr, with optional traceback. + """ + for line in message.splitlines(): + print("***", line, file=sys.stderr) + if exc_info: + print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr) + print("---") + + def print_error_explanation(message): lines = message.strip().split("\n") max_len = max([len(x) for x in lines]) diff --git a/modules/extensions.py b/modules/extensions.py index 624832a0..369d2584 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,11 +1,10 @@ import os -import sys import threading -import traceback import git from modules import shared +from modules.errors import print_error from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 extensions = [] @@ -56,8 +55,7 @@ class Extension: if os.path.exists(os.path.join(self.path, ".git")): repo = git.Repo(self.path) except Exception: - print(f"Error reading github repository info from {self.path}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error reading github repository info from {self.path}", exc_info=True) if repo is None or repo.bare: self.remote = None @@ -72,8 +70,8 @@ class Extension: self.commit_hash = commit.hexsha self.version = self.commit_hash[:8] - except Exception as ex: - print(f"Failed reading extension data from Git repository ({self.name}): {ex}", file=sys.stderr) + except Exception: + print_error(f"Failed reading extension data from Git repository ({self.name})", exc_info=True) self.remote = None self.have_info_from_repo = True diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index 0131dea4..d2f647fe 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -1,12 +1,11 @@ import os -import sys -import traceback import facexlib import gfpgan import modules.face_restoration from modules import paths, shared, devices, modelloader +from modules.errors import print_error model_dir = "GFPGAN" user_path = None @@ -112,5 +111,4 @@ def setup_model(dirname): shared.face_restorers.append(FaceRestorerGFPGAN()) except Exception: - print("Error setting up GFPGAN:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Error setting up GFPGAN", exc_info=True) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 570b5603..fcc1ef20 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -2,8 +2,6 @@ import datetime import glob import html import os -import sys -import traceback import inspect import modules.textual_inversion.dataset @@ -12,6 +10,7 @@ import tqdm from einops import rearrange, repeat from ldm.util import default from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint +from modules.errors import print_error from modules.textual_inversion import textual_inversion, logging from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum @@ -325,17 +324,14 @@ def load_hypernetwork(name): if path is None: return None - hypernetwork = Hypernetwork() - try: + hypernetwork = Hypernetwork() hypernetwork.load(path) + return hypernetwork except Exception: - print(f"Error loading hypernetwork {path}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error loading hypernetwork {path}", exc_info=True) return None - return hypernetwork - def load_hypernetworks(names, multipliers=None): already_loaded = {} @@ -770,7 +766,7 @@ Last saved image: {html.escape(last_saved_image)}

""" except Exception: - print(traceback.format_exc(), file=sys.stderr) + print_error("Exception in training hypernetwork", exc_info=True) finally: pbar.leave = False pbar.close() diff --git a/modules/images.py b/modules/images.py index e21e554c..69151bec 100644 --- a/modules/images.py +++ b/modules/images.py @@ -1,6 +1,4 @@ import datetime -import sys -import traceback import pytz import io @@ -18,6 +16,7 @@ import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors +from modules.errors import print_error from modules.paths_internal import roboto_ttf_file from modules.shared import opts @@ -464,8 +463,7 @@ class FilenameGenerator: replacement = fun(self, *pattern_args) except Exception: replacement = None - print(f"Error adding [{pattern}] to filename", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error adding [{pattern}] to filename", exc_info=True) if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT: continue @@ -697,8 +695,7 @@ def read_info_from_image(image): Negative prompt: {json_info["uc"]} Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337""" except Exception: - print("Error parsing NovelAI image generation parameters:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Error parsing NovelAI image generation parameters", exc_info=True) return geninfo, items diff --git a/modules/interrogate.py b/modules/interrogate.py index 111b1322..d36e1a5a 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -1,6 +1,5 @@ import os import sys -import traceback from collections import namedtuple from pathlib import Path import re @@ -12,6 +11,7 @@ from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from modules import devices, paths, shared, lowvram, modelloader, errors +from modules.errors import print_error blip_image_eval_size = 384 clip_model_name = 'ViT-L/14' @@ -216,8 +216,7 @@ class InterrogateModels: res += f", {match}" except Exception: - print("Error interrogating", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Error interrogating", exc_info=True) res += "" self.unload() diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 35a52310..22edc106 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -8,6 +8,7 @@ import json from functools import lru_cache from modules import cmd_args +from modules.errors import print_error from modules.paths_internal import script_path, extensions_dir args, _ = cmd_args.parser.parse_known_args() @@ -188,7 +189,7 @@ def run_extension_installer(extension_dir): print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) except Exception as e: - print(e, file=sys.stderr) + print_error(str(e)) def list_extensions(settings_file): @@ -198,8 +199,8 @@ def list_extensions(settings_file): if os.path.isfile(settings_file): with open(settings_file, "r", encoding="utf8") as file: settings = json.load(file) - except Exception as e: - print(e, file=sys.stderr) + except Exception: + print_error("Could not load settings", exc_info=True) disabled_extensions = set(settings.get('disabled_extensions', [])) disable_all_extensions = settings.get('disable_all_extensions', 'none') diff --git a/modules/localization.py b/modules/localization.py index ee9c65e7..9a1df343 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -1,8 +1,7 @@ import json import os -import sys -import traceback +from modules.errors import print_error localizations = {} @@ -31,7 +30,6 @@ def localization_js(current_localization_name: str) -> str: with open(fn, "r", encoding="utf8") as file: data = json.load(file) except Exception: - print(f"Error loading localization from {fn}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error loading localization from {fn}", exc_info=True) return f"window.localization = {json.dumps(data)}" diff --git a/modules/processing.py b/modules/processing.py index b75f2515..5c9bcce8 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,4 +1,5 @@ import json +import logging import math import os import sys @@ -23,7 +24,6 @@ import modules.images as images import modules.styles import modules.sd_models as sd_models import modules.sd_vae as sd_vae -import logging from ldm.data.util import AddMiDaS from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index 99983678..c8d0c64f 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -1,12 +1,11 @@ import os -import sys -import traceback import numpy as np from PIL import Image from basicsr.utils.download_util import load_file_from_url from realesrgan import RealESRGANer +from modules.errors import print_error from modules.upscaler import Upscaler, UpscalerData from modules.shared import cmd_opts, opts from modules import modelloader @@ -36,8 +35,7 @@ class UpscalerRealESRGAN(Upscaler): self.scalers.append(scaler) except Exception: - print("Error importing Real-ESRGAN:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Error importing Real-ESRGAN", exc_info=True) self.enable = False self.scalers = [] @@ -76,9 +74,8 @@ class UpscalerRealESRGAN(Upscaler): info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_download_path, progress=True) return info - except Exception as e: - print(f"Error making Real-ESRGAN models list: {e}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + except Exception: + print_error("Error making Real-ESRGAN models list", exc_info=True) return None def load_models(self, _): @@ -135,5 +132,4 @@ def get_realesrgan_models(scaler): ] return models except Exception: - print("Error making Real-ESRGAN models list:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Error making Real-ESRGAN models list", exc_info=True) diff --git a/modules/safe.py b/modules/safe.py index e8f50774..b596f565 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -2,8 +2,6 @@ import pickle import collections -import sys -import traceback import torch import numpy @@ -11,6 +9,8 @@ import _codecs import zipfile import re +from modules.errors import print_error + # PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage @@ -136,17 +136,20 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs): check_pt(filename, extra_handler) except pickle.UnpicklingError: - print(f"Error verifying pickled file from {filename}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - print("-----> !!!! The file is most likely corrupted !!!! <-----", file=sys.stderr) - print("You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", file=sys.stderr) + print_error( + f"Error verifying pickled file from {filename}\n" + "-----> !!!! The file is most likely corrupted !!!! <-----\n" + "You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", + exc_info=True, + ) return None - except Exception: - print(f"Error verifying pickled file from {filename}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - print("\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr) - print("You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr) + print_error( + f"Error verifying pickled file from {filename}\n" + f"The file may be malicious, so the program is not going to read it.\n" + f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n", + exc_info=True, + ) return None return unsafe_torch_load(filename, *args, **kwargs) @@ -190,4 +193,3 @@ with safe.Extra(handler): unsafe_torch_load = torch.load torch.load = load global_extra_handler = None - diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index d2728e12..6aa9c3b6 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -1,16 +1,15 @@ -import sys -import traceback -from collections import namedtuple import inspect +from collections import namedtuple from typing import Optional, Dict, Any from fastapi import FastAPI from gradio import Blocks +from modules.errors import print_error + def report_exception(c, job): - print(f"Error executing callback {job} for {c.script}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error executing callback {job} for {c.script}", exc_info=True) class ImageSaveParams: diff --git a/modules/script_loading.py b/modules/script_loading.py index 57b15862..26efffcb 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -1,8 +1,8 @@ import os -import sys -import traceback import importlib.util +from modules.errors import print_error + def load_module(path): module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path) @@ -27,5 +27,4 @@ def preload_extensions(extensions_dir, parser): module.preload(parser) except Exception: - print(f"Error running preload() for {preload_script}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running preload() for {preload_script}", exc_info=True) diff --git a/modules/scripts.py b/modules/scripts.py index c902804b..a7168fd1 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -1,12 +1,12 @@ import os import re import sys -import traceback from collections import namedtuple import gradio as gr from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing +from modules.errors import print_error AlwaysVisible = object() @@ -264,8 +264,7 @@ def load_scripts(): register_scripts_from_module(script_module) except Exception: - print(f"Error loading script: {scriptfile.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error loading script: {scriptfile.filename}", exc_info=True) finally: sys.path = syspath @@ -280,11 +279,9 @@ def load_scripts(): def wrap_call(func, filename, funcname, *args, default=None, **kwargs): try: - res = func(*args, **kwargs) - return res + return func(*args, **kwargs) except Exception: - print(f"Error calling: {filename}/{funcname}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error calling: {filename}/{funcname}", exc_info=True) return default @@ -450,8 +447,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.process(p, *script_args) except Exception: - print(f"Error running process: {script.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running process: {script.filename}", exc_info=True) def before_process_batch(self, p, **kwargs): for script in self.alwayson_scripts: @@ -459,8 +455,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.before_process_batch(p, *script_args, **kwargs) except Exception: - print(f"Error running before_process_batch: {script.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running before_process_batch: {script.filename}", exc_info=True) def process_batch(self, p, **kwargs): for script in self.alwayson_scripts: @@ -468,8 +463,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.process_batch(p, *script_args, **kwargs) except Exception: - print(f"Error running process_batch: {script.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running process_batch: {script.filename}", exc_info=True) def postprocess(self, p, processed): for script in self.alwayson_scripts: @@ -477,8 +471,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.postprocess(p, processed, *script_args) except Exception: - print(f"Error running postprocess: {script.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running postprocess: {script.filename}", exc_info=True) def postprocess_batch(self, p, images, **kwargs): for script in self.alwayson_scripts: @@ -486,8 +479,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_batch(p, *script_args, images=images, **kwargs) except Exception: - print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running postprocess_batch: {script.filename}", exc_info=True) def postprocess_image(self, p, pp: PostprocessImageArgs): for script in self.alwayson_scripts: @@ -495,24 +487,21 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_image(p, pp, *script_args) except Exception: - print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running postprocess_image: {script.filename}", exc_info=True) def before_component(self, component, **kwargs): for script in self.scripts: try: script.before_component(component, **kwargs) except Exception: - print(f"Error running before_component: {script.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running before_component: {script.filename}", exc_info=True) def after_component(self, component, **kwargs): for script in self.scripts: try: script.after_component(component, **kwargs) except Exception: - print(f"Error running after_component: {script.filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error running after_component: {script.filename}", exc_info=True) def reload_sources(self, cache): for si, script in list(enumerate(self.scripts)): diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 2ec0b049..fd186fa2 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -1,7 +1,5 @@ from __future__ import annotations import math -import sys -import traceback import psutil import torch @@ -11,6 +9,7 @@ from ldm.util import default from einops import rearrange from modules import shared, errors, devices, sub_quadratic_attention +from modules.errors import print_error from modules.hypernetworks import hypernetwork import ldm.modules.attention @@ -140,8 +139,7 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: import xformers.ops shared.xformers_available = True except Exception: - print("Cannot import xformers", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Cannot import xformers", exc_info=True) def get_available_vram(): diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d489ed1e..a040a988 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,6 +1,4 @@ import os -import sys -import traceback from collections import namedtuple import torch @@ -16,6 +14,7 @@ from torch.utils.tensorboard import SummaryWriter from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint import modules.textual_inversion.dataset +from modules.errors import print_error from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay @@ -207,8 +206,7 @@ class EmbeddingDatabase: self.load_from_file(fullfn, fn) except Exception: - print(f"Error loading embedding {fn}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error loading embedding {fn}", exc_info=True) continue def load_textual_inversion_embeddings(self, force_reload=False): @@ -632,8 +630,7 @@ Last saved image: {html.escape(last_saved_image)}
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True) except Exception: - print(traceback.format_exc(), file=sys.stderr) - pass + print_error("Error training embedding", exc_info=True) finally: pbar.leave = False pbar.close() diff --git a/modules/ui.py b/modules/ui.py index 001b9792..1ad94f02 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -2,7 +2,6 @@ import json import mimetypes import os import sys -import traceback from functools import reduce import warnings @@ -14,6 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave +from modules.errors import print_error from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path @@ -231,9 +231,8 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: res = all_seeds[index if 0 <= index < len(all_seeds) else 0] except json.decoder.JSONDecodeError: - if gen_info_string != '': - print("Error parsing JSON generation info:", file=sys.stderr) - print(gen_info_string, file=sys.stderr) + if gen_info_string: + print_error(f"Error parsing JSON generation info: {gen_info_string}") return [res, gr_show(False)] @@ -1753,8 +1752,7 @@ def create_ui(): try: results = modules.extras.run_modelmerger(*args) except Exception as e: - print("Error loading/saving model file:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Error loading/saving model file", exc_info=True) modules.sd_models.list_models() # to remove the potentially missing models from the list return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] return results diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 515ec262..cadf56be 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -1,10 +1,8 @@ import json import os.path -import sys import threading import time from datetime import datetime -import traceback import git @@ -14,6 +12,7 @@ import shutil import errno from modules import extensions, shared, paths, config_states +from modules.errors import print_error from modules.paths_internal import config_states_dir from modules.call_queue import wrap_gradio_gpu_call @@ -46,8 +45,7 @@ def apply_and_restart(disable_list, update_list, disable_all): try: ext.fetch_and_reset_hard() except Exception: - print(f"Error getting updates for {ext.name}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error getting updates for {ext.name}", exc_info=True) shared.opts.disabled_extensions = disabled shared.opts.disable_all_extensions = disable_all @@ -113,8 +111,7 @@ def check_updates(id_task, disable_list): if 'FETCH_HEAD' not in str(e): raise except Exception: - print(f"Error checking updates for {ext.name}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error checking updates for {ext.name}", exc_info=True) shared.state.nextjob() diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index b918a764..4dc24615 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -1,13 +1,12 @@ import copy import random -import sys -import traceback import shlex import modules.scripts as scripts import gradio as gr from modules import sd_samplers +from modules.errors import print_error from modules.processing import Processed, process_images from modules.shared import state @@ -136,8 +135,7 @@ class Script(scripts.Script): try: args = cmdargs(line) except Exception: - print(f"Error parsing line {line} as commandline:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error(f"Error parsing line {line} as commandline", exc_info=True) args = {"prompt": line} else: args = {"prompt": line} -- cgit v1.2.3 From 05933840f0676dd1a90a7e2ad3f2a0672624b2cd Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 31 May 2023 19:56:37 +0300 Subject: rename print_error to report, use it with together with package name --- extensions-builtin/LDSR/scripts/ldsr_model.py | 5 ++--- extensions-builtin/ScuNET/scripts/scunet_model.py | 5 ++--- modules/api/api.py | 5 ++--- modules/call_queue.py | 5 ++--- modules/codeformer_model.py | 7 +++---- modules/config_states.py | 9 ++++----- modules/errors.py | 8 ++------ modules/extensions.py | 7 +++---- modules/gfpgan_model.py | 5 ++--- modules/hypernetworks/hypernetwork.py | 7 +++---- modules/images.py | 5 ++--- modules/interrogate.py | 3 +-- modules/launch_utils.py | 7 +++---- modules/localization.py | 4 ++-- modules/realesrgan_model.py | 10 +++++----- modules/safe.py | 7 ++++--- modules/script_callbacks.py | 4 ++-- modules/script_loading.py | 4 ++-- modules/scripts.py | 23 +++++++++++------------ modules/sd_hijack_optimizations.py | 3 +-- modules/textual_inversion/textual_inversion.py | 7 +++---- modules/ui.py | 7 +++---- modules/ui_extensions.py | 7 +++---- scripts/prompts_from_file.py | 5 ++--- 24 files changed, 69 insertions(+), 90 deletions(-) (limited to 'modules/errors.py') diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index 95f1669d..dbd6d331 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -2,10 +2,9 @@ import os from basicsr.utils.download_util import load_file_from_url -from modules.errors import print_error from modules.upscaler import Upscaler, UpscalerData from ldsr_model_arch import LDSR -from modules import shared, script_callbacks +from modules import shared, script_callbacks, errors import sd_hijack_autoencoder # noqa: F401 import sd_hijack_ddpm_v1 # noqa: F401 @@ -51,7 +50,7 @@ class UpscalerLDSR(Upscaler): try: return LDSR(model, yaml) except Exception: - print_error("Error importing LDSR", exc_info=True) + errors.report("Error importing LDSR", exc_info=True) return None def do_upscale(self, img, path): diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index dd1b822e..85b4505f 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -9,10 +9,9 @@ from tqdm import tqdm from basicsr.utils.download_util import load_file_from_url import modules.upscaler -from modules import devices, modelloader, script_callbacks +from modules import devices, modelloader, script_callbacks, errors from scunet_model_arch import SCUNet as net -from modules.errors import print_error from modules.shared import opts @@ -39,7 +38,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): scaler_data = modules.upscaler.UpscalerData(name, file, self, 4) scalers.append(scaler_data) except Exception: - print_error(f"Error loading ScuNET model: {file}", exc_info=True) + errors.report(f"Error loading ScuNET model: {file}", exc_info=True) if add_model2: scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self) scalers.append(scaler_data2) diff --git a/modules/api/api.py b/modules/api/api.py index fbd616a3..d34ab422 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -14,9 +14,8 @@ from fastapi.encoders import jsonable_encoder from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors from modules.api import models -from modules.errors import print_error from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.textual_inversion.textual_inversion import create_embedding, train_embedding @@ -145,7 +144,7 @@ def api_middleware(app: FastAPI): print(message) console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200])) else: - print_error(message, exc_info=True) + errors.report(message, exc_info=True) return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err)) @app.middleware("http") diff --git a/modules/call_queue.py b/modules/call_queue.py index dba2a9b4..53af6d70 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -2,8 +2,7 @@ import html import threading import time -from modules import shared, progress -from modules.errors import print_error +from modules import shared, progress, errors queue_lock = threading.Lock() @@ -62,7 +61,7 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len] if len(arg_str) > max_debug_str_len: arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)" - print_error(f"{message}\n{arg_str}", exc_info=True) + errors.report(f"{message}\n{arg_str}", exc_info=True) shared.state.job = "" shared.state.job_count = 0 diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 76143e9f..4260b016 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -5,8 +5,7 @@ import torch import modules.face_restoration import modules.shared -from modules import shared, devices, modelloader -from modules.errors import print_error +from modules import shared, devices, modelloader, errors from modules.paths import models_path # codeformer people made a choice to include modified basicsr library to their project which makes @@ -105,7 +104,7 @@ def setup_model(dirname): del output torch.cuda.empty_cache() except Exception: - print_error('Failed inference for CodeFormer', exc_info=True) + errors.report('Failed inference for CodeFormer', exc_info=True) restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) restored_face = restored_face.astype('uint8') @@ -134,6 +133,6 @@ def setup_model(dirname): shared.face_restorers.append(codeformer) except Exception: - print_error("Error setting up CodeFormer", exc_info=True) + errors.report("Error setting up CodeFormer", exc_info=True) # sys.path = stored_sys_path diff --git a/modules/config_states.py b/modules/config_states.py index faeaf28b..6f1ab53f 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -11,8 +11,7 @@ from datetime import datetime from collections import OrderedDict import git -from modules import shared, extensions -from modules.errors import print_error +from modules import shared, extensions, errors from modules.paths_internal import script_path, config_states_dir @@ -52,7 +51,7 @@ def get_webui_config(): if os.path.exists(os.path.join(script_path, ".git")): webui_repo = git.Repo(script_path) except Exception: - print_error(f"Error reading webui git info from {script_path}", exc_info=True) + errors.report(f"Error reading webui git info from {script_path}", exc_info=True) webui_remote = None webui_commit_hash = None @@ -132,7 +131,7 @@ def restore_webui_config(config): if os.path.exists(os.path.join(script_path, ".git")): webui_repo = git.Repo(script_path) except Exception: - print_error(f"Error reading webui git info from {script_path}", exc_info=True) + errors.report(f"Error reading webui git info from {script_path}", exc_info=True) return try: @@ -140,7 +139,7 @@ def restore_webui_config(config): webui_repo.git.reset(webui_commit_hash, hard=True) print(f"* Restored webui to commit {webui_commit_hash}.") except Exception: - print_error(f"Error restoring webui to commit{webui_commit_hash}") + errors.report(f"Error restoring webui to commit{webui_commit_hash}") def restore_extension_config(config): diff --git a/modules/errors.py b/modules/errors.py index 41d8dc93..e408f500 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -3,11 +3,7 @@ import textwrap import traceback -def print_error( - message: str, - *, - exc_info: bool = False, -) -> None: +def report(message: str, *, exc_info: bool = False) -> None: """ Print an error message to stderr, with optional traceback. """ @@ -15,7 +11,7 @@ def print_error( print("***", line, file=sys.stderr) if exc_info: print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr) - print("---") + print("---", file=sys.stderr) def print_error_explanation(message): diff --git a/modules/extensions.py b/modules/extensions.py index 92f93ad9..8608584b 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,8 +1,7 @@ import os import threading -from modules import shared -from modules.errors import print_error +from modules import shared, errors from modules.gitpython_hack import Repo from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 @@ -54,7 +53,7 @@ class Extension: if os.path.exists(os.path.join(self.path, ".git")): repo = Repo(self.path) except Exception: - print_error(f"Error reading github repository info from {self.path}", exc_info=True) + errors.report(f"Error reading github repository info from {self.path}", exc_info=True) if repo is None or repo.bare: self.remote = None @@ -70,7 +69,7 @@ class Extension: self.version = self.commit_hash[:8] except Exception: - print_error(f"Failed reading extension data from Git repository ({self.name})", exc_info=True) + errors.report(f"Failed reading extension data from Git repository ({self.name})", exc_info=True) self.remote = None self.have_info_from_repo = True diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index d2f647fe..e239a09d 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -4,8 +4,7 @@ import facexlib import gfpgan import modules.face_restoration -from modules import paths, shared, devices, modelloader -from modules.errors import print_error +from modules import paths, shared, devices, modelloader, errors model_dir = "GFPGAN" user_path = None @@ -111,4 +110,4 @@ def setup_model(dirname): shared.face_restorers.append(FaceRestorerGFPGAN()) except Exception: - print_error("Error setting up GFPGAN", exc_info=True) + errors.report("Error setting up GFPGAN", exc_info=True) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index fcc1ef20..5d12b449 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -9,8 +9,7 @@ import torch import tqdm from einops import rearrange, repeat from ldm.util import default -from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint -from modules.errors import print_error +from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors from modules.textual_inversion import textual_inversion, logging from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum @@ -329,7 +328,7 @@ def load_hypernetwork(name): hypernetwork.load(path) return hypernetwork except Exception: - print_error(f"Error loading hypernetwork {path}", exc_info=True) + errors.report(f"Error loading hypernetwork {path}", exc_info=True) return None @@ -766,7 +765,7 @@ Last saved image: {html.escape(last_saved_image)}

""" except Exception: - print_error("Exception in training hypernetwork", exc_info=True) + errors.report("Exception in training hypernetwork", exc_info=True) finally: pbar.leave = False pbar.close() diff --git a/modules/images.py b/modules/images.py index 09f728df..30e9ffc5 100644 --- a/modules/images.py +++ b/modules/images.py @@ -16,7 +16,6 @@ import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors -from modules.errors import print_error from modules.paths_internal import roboto_ttf_file from modules.shared import opts @@ -463,7 +462,7 @@ class FilenameGenerator: replacement = fun(self, *pattern_args) except Exception: replacement = None - print_error(f"Error adding [{pattern}] to filename", exc_info=True) + errors.report(f"Error adding [{pattern}] to filename", exc_info=True) if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT: continue @@ -698,7 +697,7 @@ def read_info_from_image(image): Negative prompt: {json_info["uc"]} Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337""" except Exception: - print_error("Error parsing NovelAI image generation parameters", exc_info=True) + errors.report("Error parsing NovelAI image generation parameters", exc_info=True) return geninfo, items diff --git a/modules/interrogate.py b/modules/interrogate.py index d36e1a5a..9b2c5b60 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -11,7 +11,6 @@ from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from modules import devices, paths, shared, lowvram, modelloader, errors -from modules.errors import print_error blip_image_eval_size = 384 clip_model_name = 'ViT-L/14' @@ -216,7 +215,7 @@ class InterrogateModels: res += f", {match}" except Exception: - print_error("Error interrogating", exc_info=True) + errors.report("Error interrogating", exc_info=True) res += "" self.unload() diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 0bf4cb7e..6e9bb770 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -7,8 +7,7 @@ import platform import json from functools import lru_cache -from modules import cmd_args -from modules.errors import print_error +from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir args, _ = cmd_args.parser.parse_known_args() @@ -189,7 +188,7 @@ def run_extension_installer(extension_dir): print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) except Exception as e: - print_error(str(e)) + errors.report(str(e)) def list_extensions(settings_file): @@ -200,7 +199,7 @@ def list_extensions(settings_file): with open(settings_file, "r", encoding="utf8") as file: settings = json.load(file) except Exception: - print_error("Could not load settings", exc_info=True) + errors.report("Could not load settings", exc_info=True) disabled_extensions = set(settings.get('disabled_extensions', [])) disable_all_extensions = settings.get('disable_all_extensions', 'none') diff --git a/modules/localization.py b/modules/localization.py index 9a1df343..e8f585da 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -1,7 +1,7 @@ import json import os -from modules.errors import print_error +from modules import errors localizations = {} @@ -30,6 +30,6 @@ def localization_js(current_localization_name: str) -> str: with open(fn, "r", encoding="utf8") as file: data = json.load(file) except Exception: - print_error(f"Error loading localization from {fn}", exc_info=True) + errors.report(f"Error loading localization from {fn}", exc_info=True) return f"window.localization = {json.dumps(data)}" diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index c8d0c64f..2d27b321 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -5,10 +5,10 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url from realesrgan import RealESRGANer -from modules.errors import print_error from modules.upscaler import Upscaler, UpscalerData from modules.shared import cmd_opts, opts -from modules import modelloader +from modules import modelloader, errors + class UpscalerRealESRGAN(Upscaler): def __init__(self, path): @@ -35,7 +35,7 @@ class UpscalerRealESRGAN(Upscaler): self.scalers.append(scaler) except Exception: - print_error("Error importing Real-ESRGAN", exc_info=True) + errors.report("Error importing Real-ESRGAN", exc_info=True) self.enable = False self.scalers = [] @@ -75,7 +75,7 @@ class UpscalerRealESRGAN(Upscaler): return info except Exception: - print_error("Error making Real-ESRGAN models list", exc_info=True) + errors.report("Error making Real-ESRGAN models list", exc_info=True) return None def load_models(self, _): @@ -132,4 +132,4 @@ def get_realesrgan_models(scaler): ] return models except Exception: - print_error("Error making Real-ESRGAN models list", exc_info=True) + errors.report("Error making Real-ESRGAN models list", exc_info=True) diff --git a/modules/safe.py b/modules/safe.py index b596f565..b1d08a79 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -9,9 +9,10 @@ import _codecs import zipfile import re -from modules.errors import print_error # PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage +from modules import errors + TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage def encode(*args): @@ -136,7 +137,7 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs): check_pt(filename, extra_handler) except pickle.UnpicklingError: - print_error( + errors.report( f"Error verifying pickled file from {filename}\n" "-----> !!!! The file is most likely corrupted !!!! <-----\n" "You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", @@ -144,7 +145,7 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs): ) return None except Exception: - print_error( + errors.report( f"Error verifying pickled file from {filename}\n" f"The file may be malicious, so the program is not going to read it.\n" f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n", diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 6aa9c3b6..ec1469d0 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -5,11 +5,11 @@ from typing import Optional, Dict, Any from fastapi import FastAPI from gradio import Blocks -from modules.errors import print_error +from modules import errors def report_exception(c, job): - print_error(f"Error executing callback {job} for {c.script}", exc_info=True) + errors.report(f"Error executing callback {job} for {c.script}", exc_info=True) class ImageSaveParams: diff --git a/modules/script_loading.py b/modules/script_loading.py index 26efffcb..306a1f35 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -1,7 +1,7 @@ import os import importlib.util -from modules.errors import print_error +from modules import errors def load_module(path): @@ -27,4 +27,4 @@ def preload_extensions(extensions_dir, parser): module.preload(parser) except Exception: - print_error(f"Error running preload() for {preload_script}", exc_info=True) + errors.report(f"Error running preload() for {preload_script}", exc_info=True) diff --git a/modules/scripts.py b/modules/scripts.py index a7168fd1..0970f38e 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -5,8 +5,7 @@ from collections import namedtuple import gradio as gr -from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing -from modules.errors import print_error +from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, errors AlwaysVisible = object() @@ -264,7 +263,7 @@ def load_scripts(): register_scripts_from_module(script_module) except Exception: - print_error(f"Error loading script: {scriptfile.filename}", exc_info=True) + errors.report(f"Error loading script: {scriptfile.filename}", exc_info=True) finally: sys.path = syspath @@ -281,7 +280,7 @@ def wrap_call(func, filename, funcname, *args, default=None, **kwargs): try: return func(*args, **kwargs) except Exception: - print_error(f"Error calling: {filename}/{funcname}", exc_info=True) + errors.report(f"Error calling: {filename}/{funcname}", exc_info=True) return default @@ -447,7 +446,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.process(p, *script_args) except Exception: - print_error(f"Error running process: {script.filename}", exc_info=True) + errors.report(f"Error running process: {script.filename}", exc_info=True) def before_process_batch(self, p, **kwargs): for script in self.alwayson_scripts: @@ -455,7 +454,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.before_process_batch(p, *script_args, **kwargs) except Exception: - print_error(f"Error running before_process_batch: {script.filename}", exc_info=True) + errors.report(f"Error running before_process_batch: {script.filename}", exc_info=True) def process_batch(self, p, **kwargs): for script in self.alwayson_scripts: @@ -463,7 +462,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.process_batch(p, *script_args, **kwargs) except Exception: - print_error(f"Error running process_batch: {script.filename}", exc_info=True) + errors.report(f"Error running process_batch: {script.filename}", exc_info=True) def postprocess(self, p, processed): for script in self.alwayson_scripts: @@ -471,7 +470,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.postprocess(p, processed, *script_args) except Exception: - print_error(f"Error running postprocess: {script.filename}", exc_info=True) + errors.report(f"Error running postprocess: {script.filename}", exc_info=True) def postprocess_batch(self, p, images, **kwargs): for script in self.alwayson_scripts: @@ -479,7 +478,7 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_batch(p, *script_args, images=images, **kwargs) except Exception: - print_error(f"Error running postprocess_batch: {script.filename}", exc_info=True) + errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True) def postprocess_image(self, p, pp: PostprocessImageArgs): for script in self.alwayson_scripts: @@ -487,21 +486,21 @@ class ScriptRunner: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_image(p, pp, *script_args) except Exception: - print_error(f"Error running postprocess_image: {script.filename}", exc_info=True) + errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True) def before_component(self, component, **kwargs): for script in self.scripts: try: script.before_component(component, **kwargs) except Exception: - print_error(f"Error running before_component: {script.filename}", exc_info=True) + errors.report(f"Error running before_component: {script.filename}", exc_info=True) def after_component(self, component, **kwargs): for script in self.scripts: try: script.after_component(component, **kwargs) except Exception: - print_error(f"Error running after_component: {script.filename}", exc_info=True) + errors.report(f"Error running after_component: {script.filename}", exc_info=True) def reload_sources(self, cache): for si, script in list(enumerate(self.scripts)): diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index fd186fa2..5f0ff513 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -9,7 +9,6 @@ from ldm.util import default from einops import rearrange from modules import shared, errors, devices, sub_quadratic_attention -from modules.errors import print_error from modules.hypernetworks import hypernetwork import ldm.modules.attention @@ -139,7 +138,7 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: import xformers.ops shared.xformers_available = True except Exception: - print_error("Cannot import xformers", exc_info=True) + errors.report("Cannot import xformers", exc_info=True) def get_available_vram(): diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index b3dcb140..8da050ca 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -12,9 +12,8 @@ import numpy as np from PIL import Image, PngImagePlugin from torch.utils.tensorboard import SummaryWriter -from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint +from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors import modules.textual_inversion.dataset -from modules.errors import print_error from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay @@ -219,7 +218,7 @@ class EmbeddingDatabase: self.load_from_file(fullfn, fn) except Exception: - print_error(f"Error loading embedding {fn}", exc_info=True) + errors.report(f"Error loading embedding {fn}", exc_info=True) continue def load_textual_inversion_embeddings(self, force_reload=False): @@ -643,7 +642,7 @@ Last saved image: {html.escape(last_saved_image)}
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True) except Exception: - print_error("Error training embedding", exc_info=True) + errors.report("Error training embedding", exc_info=True) finally: pbar.leave = False pbar.close() diff --git a/modules/ui.py b/modules/ui.py index fb6b2498..f361264c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,8 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave -from modules.errors import print_error +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path @@ -232,7 +231,7 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: except json.decoder.JSONDecodeError: if gen_info_string: - print_error(f"Error parsing JSON generation info: {gen_info_string}") + errors.report(f"Error parsing JSON generation info: {gen_info_string}") return [res, gr_show(False)] @@ -1752,7 +1751,7 @@ def create_ui(): try: results = modules.extras.run_modelmerger(*args) except Exception as e: - print_error("Error loading/saving model file", exc_info=True) + errors.report("Error loading/saving model file", exc_info=True) modules.sd_models.list_models() # to remove the potentially missing models from the list return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] return results diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index e2ee9d72..3140ed64 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -11,8 +11,7 @@ import html import shutil import errno -from modules import extensions, shared, paths, config_states -from modules.errors import print_error +from modules import extensions, shared, paths, config_states, errors from modules.paths_internal import config_states_dir from modules.call_queue import wrap_gradio_gpu_call @@ -45,7 +44,7 @@ def apply_and_restart(disable_list, update_list, disable_all): try: ext.fetch_and_reset_hard() except Exception: - print_error(f"Error getting updates for {ext.name}", exc_info=True) + errors.report(f"Error getting updates for {ext.name}", exc_info=True) shared.opts.disabled_extensions = disabled shared.opts.disable_all_extensions = disable_all @@ -111,7 +110,7 @@ def check_updates(id_task, disable_list): if 'FETCH_HEAD' not in str(e): raise except Exception: - print_error(f"Error checking updates for {ext.name}", exc_info=True) + errors.report(f"Error checking updates for {ext.name}", exc_info=True) shared.state.nextjob() diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 4dc24615..83a2f220 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -5,8 +5,7 @@ import shlex import modules.scripts as scripts import gradio as gr -from modules import sd_samplers -from modules.errors import print_error +from modules import sd_samplers, errors from modules.processing import Processed, process_images from modules.shared import state @@ -135,7 +134,7 @@ class Script(scripts.Script): try: args = cmdargs(line) except Exception: - print_error(f"Error parsing line {line} as commandline", exc_info=True) + errors.report(f"Error parsing line {line} as commandline", exc_info=True) args = {"prompt": line} else: args = {"prompt": line} -- cgit v1.2.3 From 7393c1f99c9e33871e8b4aaad45f2675e7b410af Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 3 Jun 2023 13:55:35 +0300 Subject: Added sysinfo tab to settings --- modules/errors.py | 26 ++++++++++ modules/sysinfo.py | 135 +++++++++++++++++++++++++++++++++++++++++++++++++ modules/ui.py | 15 +++++- modules/ui_settings.py | 28 +++++++++- style.css | 10 ++++ 5 files changed, 212 insertions(+), 2 deletions(-) create mode 100644 modules/sysinfo.py (limited to 'modules/errors.py') diff --git a/modules/errors.py b/modules/errors.py index e408f500..5271a9fe 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -3,10 +3,30 @@ import textwrap import traceback +exception_records = [] + + +def record_exception(): + _, e, tb = sys.exc_info() + if e is None: + return + + if exception_records and exception_records[-1] == e: + return + + exception_records.append((e, tb)) + + if len(exception_records) > 5: + exception_records.pop(0) + + def report(message: str, *, exc_info: bool = False) -> None: """ Print an error message to stderr, with optional traceback. """ + + record_exception() + for line in message.splitlines(): print("***", line, file=sys.stderr) if exc_info: @@ -15,6 +35,8 @@ def report(message: str, *, exc_info: bool = False) -> None: def print_error_explanation(message): + record_exception() + lines = message.strip().split("\n") max_len = max([len(x) for x in lines]) @@ -25,6 +47,8 @@ def print_error_explanation(message): def display(e: Exception, task, *, full_traceback=False): + record_exception() + print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr) te = traceback.TracebackException.from_exception(e) if full_traceback: @@ -44,6 +68,8 @@ already_displayed = {} def display_once(e: Exception, task): + record_exception() + if task in already_displayed: return diff --git a/modules/sysinfo.py b/modules/sysinfo.py new file mode 100644 index 00000000..00f684f6 --- /dev/null +++ b/modules/sysinfo.py @@ -0,0 +1,135 @@ +import json +import os +import sys +import traceback + +import platform +import hashlib +import pkg_resources +import psutil +import re + +import launch +from modules import paths_internal, timer + +checksum_token = "DontStealMyGamePlz__WINNERS_DONT_USE_DRUGS__DONT_COPY_THAT_FLOPPY" + + +def pretty_bytes(num, suffix="B"): + for unit in ["", "K", "M", "G", "T", "P", "E", "Z", "Y"]: + if abs(num) < 1024 or unit == 'Y': + return f"{num:.0f}{unit}{suffix}" + num /= 1024 + + +def get(): + res = get_dict() + + text = json.dumps(res, ensure_ascii=False, indent=4) + + h = hashlib.sha256(text.encode("utf8")) + text = text.replace(checksum_token, h.hexdigest()) + + return text + + +re_checksum = re.compile(r'"Checksum": "([0-9a-fA-F]{64})"') + + +def check(x): + m = re.search(re_checksum, x) + if not m: + return False + + replaced = re.sub(re_checksum, f'"Checksum": "{checksum_token}"', x) + + h = hashlib.sha256(replaced.encode("utf8")) + return h.hexdigest() == m.group(1) + + +def get_dict(): + ram = psutil.virtual_memory() + + res = { + "Platform": platform.platform(), + "Python": platform.python_version(), + "Version": launch.git_tag(), + "Commit": launch.commit_hash(), + "Script path": paths_internal.script_path, + "Data path": paths_internal.data_path, + "Extensions dir": paths_internal.extensions_dir, + "Checksum": checksum_token, + "Commandline": sys.argv, + "Torch env info": get_torch_sysinfo(), + "Exceptions": get_exceptions(), + "CPU": { + "model": platform.processor(), + "count logical": psutil.cpu_count(logical=True), + "count physical": psutil.cpu_count(logical=False), + }, + "RAM": { + x: pretty_bytes(getattr(ram, x, 0)) for x in ["total", "used", "free", "active", "inactive", "buffers", "cached", "shared"] if getattr(ram, x, 0) != 0 + }, + "Extensions": get_extensions(enabled=True), + "Inactive extensions": get_extensions(enabled=False), + "Environment": {k: os.environ[k] for k in sorted(os.environ)}, + "Config": get_config(), + "Startup": timer.startup_record, + "Packages": sorted([f"{pkg.key}=={pkg.version}" for pkg in pkg_resources.working_set]), + } + + return res + + +def format_traceback(tb): + return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)] + + +def get_exceptions(): + try: + from modules import errors + items = [x for x in reversed(errors.exception_records)] + + return [{"exception": str(e), "traceback": format_traceback(tb)} for e, tb in items] + except Exception as e: + return str(e) + + +re_newline = re.compile(r"\r*\n") + + +def get_torch_sysinfo(): + try: + import torch.utils.collect_env + info = torch.utils.collect_env.get_env_info()._asdict() + + return {k: re.split(re_newline, str(v)) if "\n" in str(v) else v for k, v in info.items()} + except Exception as e: + return str(e) + + +def get_extensions(*, enabled): + + try: + from modules import extensions + + def to_json(x: extensions.Extension): + return { + "name": x.name, + "path": x.path, + "version": x.version, + "branch": x.branch, + "remote": x.remote, + } + + return [to_json(x) for x in extensions.extensions if not x.is_builtin and x.enabled == enabled] + except Exception as e: + return str(e) + + +def get_config(): + try: + from modules import shared + return shared.opts.data + except Exception as e: + return str(e) diff --git a/modules/ui.py b/modules/ui.py index 988b2003..38e585ca 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1,3 +1,4 @@ +import datetime import json import mimetypes import os @@ -11,7 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -1598,3 +1599,15 @@ def setup_ui_api(app): app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) + + def download_sysinfo(attachment=False): + from fastapi.responses import PlainTextResponse + + text = sysinfo.get() + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + + return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) + + app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) + app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) + diff --git a/modules/ui_settings.py b/modules/ui_settings.py index 7874298e..49445bd8 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -1,6 +1,6 @@ import gradio as gr -from modules import ui_common, shared, script_callbacks, scripts, sd_models +from modules import ui_common, shared, script_callbacks, scripts, sd_models, sysinfo from modules.call_queue import wrap_gradio_call from modules.shared import opts from modules.ui_components import FormRow @@ -157,6 +157,17 @@ class UiSettings: with gr.TabItem("Defaults", id="defaults", elem_id="settings_tab_defaults"): loadsave.create_ui() + with gr.TabItem("Sysinfo", id="sysinfo", elem_id="settings_tab_sysinfo"): + gr.HTML('Download system info', elem_id="sysinfo_download") + + with gr.Row(): + with gr.Column(scale=1): + sysinfo_check_file = gr.File(label="Check system info for validity", type='binary') + with gr.Column(scale=1): + sysinfo_check_output = gr.HTML("", elem_id="sysinfo_validity") + with gr.Column(scale=100): + pass + with gr.TabItem("Actions", id="actions", elem_id="settings_tab_actions"): request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") download_localization = gr.Button(value='Download localization template', elem_id="download_localization") @@ -215,6 +226,21 @@ class UiSettings: outputs=[], ) + def check_file(x): + if x is None: + return '' + + if sysinfo.check(x.decode('utf8', errors='ignore')): + return 'Valid' + + return 'Invalid' + + sysinfo_check_file.change( + fn=check_file, + inputs=[sysinfo_check_file], + outputs=[sysinfo_check_output], + ) + self.interface = settings_interface def add_quicksettings(self): diff --git a/style.css b/style.css index ba081b56..d8bdb73b 100644 --- a/style.css +++ b/style.css @@ -450,6 +450,16 @@ table.popup-table .link{ opacity: 0.75; } +#sysinfo_download a{ + font-size: 24pt; + text-decoration: underline; +} + +#sysinfo_validity{ + font-size: 18pt; +} + + /* live preview */ .progressDiv{ position: relative; -- cgit v1.2.3