From d8b427f8aa787e2ee21a63c1bea5e0eabaaf4979 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 10:21:59 +0300 Subject: remove the warning at startup related to previous PR with batch processing --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index b6d5dcd8..738ac945 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -649,7 +649,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): image = gr.Image(label="Source", source="upload", interactive=True, type="pil") with gr.TabItem('Batch Process'): - image_batch = gr.File(label="Batch Process", file_count="multiple", source="upload", interactive=True, type="file") + image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file") upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2) -- cgit v1.2.3 From ed6787ca2fe950f633a925ccb0467eafd4ec0f43 Mon Sep 17 00:00:00 2001 From: EyeDeck Date: Sat, 17 Sep 2022 00:49:31 -0400 Subject: Add VRAM monitoring --- modules/memmon.py | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ modules/shared.py | 5 ++++ modules/ui.py | 14 +++++++++- style.css | 18 ++++++++++++- 4 files changed, 112 insertions(+), 2 deletions(-) create mode 100644 modules/memmon.py (limited to 'modules/ui.py') diff --git a/modules/memmon.py b/modules/memmon.py new file mode 100644 index 00000000..f2cac841 --- /dev/null +++ b/modules/memmon.py @@ -0,0 +1,77 @@ +import threading +import time +from collections import defaultdict + +import torch + + +class MemUsageMonitor(threading.Thread): + run_flag = None + device = None + disabled = False + opts = None + data = None + + def __init__(self, name, device, opts): + threading.Thread.__init__(self) + self.name = name + self.device = device + self.opts = opts + + self.daemon = True + self.run_flag = threading.Event() + self.data = defaultdict(int) + + def run(self): + if self.disabled: + return + + while True: + self.run_flag.wait() + + torch.cuda.reset_peak_memory_stats() + self.data.clear() + + if self.opts.memmon_poll_rate <= 0: + self.run_flag.clear() + continue + + self.data["min_free"] = torch.cuda.mem_get_info()[0] + + while self.run_flag.is_set(): + free, total = torch.cuda.mem_get_info() # calling with self.device errors, torch bug? + self.data["min_free"] = min(self.data["min_free"], free) + + time.sleep(1 / self.opts.memmon_poll_rate) + + def dump_debug(self): + print(self, 'recorded data:') + for k, v in self.read().items(): + print(k, -(v // -(1024 ** 2))) + + print(self, 'raw torch memory stats:') + tm = torch.cuda.memory_stats(self.device) + for k, v in tm.items(): + if 'bytes' not in k: + continue + print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2))) + + print(torch.cuda.memory_summary()) + + def monitor(self): + self.run_flag.set() + + def read(self): + free, total = torch.cuda.mem_get_info() + self.data["total"] = total + + torch_stats = torch.cuda.memory_stats(self.device) + self.data["active_peak"] = torch_stats["active_bytes.all.peak"] + self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"] + self.data["system_peak"] = total - self.data["min_free"] + + return self.data + + def stop(self): + self.run_flag.clear() + return self.read() diff --git a/modules/shared.py b/modules/shared.py index da56b6ae..4f877036 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -12,6 +12,7 @@ from modules.paths import script_path, sd_path from modules.devices import get_optimal_device import modules.styles import modules.interrogate +import modules.memmon sd_model_file = os.path.join(script_path, 'model.ckpt') if not os.path.exists(sd_model_file): @@ -138,6 +139,7 @@ class Options: "show_progressbar": OptionInfo(True, "Show progressbar"), "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."), + "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step":1}), "face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), @@ -217,3 +219,6 @@ class TotalTQDM: total_tqdm = TotalTQDM() + +mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) +mem_mon.start() diff --git a/modules/ui.py b/modules/ui.py index 738ac945..01b2ba85 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -119,6 +119,7 @@ def save_files(js_data, images, index): def wrap_gradio_call(func): def f(*args, **kwargs): + shared.mem_mon.monitor() t = time.perf_counter() try: @@ -135,8 +136,19 @@ def wrap_gradio_call(func): elapsed = time.perf_counter() - t + mem_stats = {k:-(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()} + active_peak = mem_stats['active_peak'] + reserved_peak = mem_stats['reserved_peak'] + sys_peak = '?' if opts.memmon_poll_rate <= 0 else mem_stats['system_peak'] + sys_total = mem_stats['total'] + sys_pct = '?' if opts.memmon_poll_rate <= 0 else round(sys_peak/sys_total * 100, 2) + vram_tooltip = "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data. " \ + "Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data. " \ + "Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)." + # last item is always HTML - res[-1] = res[-1] + f"

Time taken: {elapsed:.2f}s

" + res[-1] += f"

Time taken: {elapsed:.2f}s

" \ + f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" shared.state.interrupted = False diff --git a/style.css b/style.css index d41c098c..67ce8550 100644 --- a/style.css +++ b/style.css @@ -1,5 +1,21 @@ .output-html p {margin: 0 0.5em;} -.performance { font-size: 0.85em; color: #444; } + +.performance { + font-size: 0.85em; + color: #444; + display: flex; + justify-content: space-between; + white-space: nowrap; +} + +.performance .time { + margin-right: 0; +} + +.performance .vram { + margin-left: 0; + text-align: right; +} #generate{ min-height: 4.5em; -- cgit v1.2.3 From b8be33dad13d4937c6ef8fbb49715d843c3dd586 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 09:23:31 +0300 Subject: hide VRAM text if polling is disabled --- modules/ui.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 01b2ba85..437bce66 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -136,7 +136,7 @@ def wrap_gradio_call(func): elapsed = time.perf_counter() - t - mem_stats = {k:-(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()} + mem_stats = {k: -(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()} active_peak = mem_stats['active_peak'] reserved_peak = mem_stats['reserved_peak'] sys_peak = '?' if opts.memmon_poll_rate <= 0 else mem_stats['system_peak'] @@ -146,9 +146,10 @@ def wrap_gradio_call(func): "Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data. " \ "Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)." + vram_html = '' if opts.memmon_poll_rate == 0 else f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" + # last item is always HTML - res[-1] += f"

Time taken: {elapsed:.2f}s

" \ - f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" + res[-1] += f"

Time taken: {elapsed:.2f}s

{vram_html}
" shared.state.interrupted = False -- cgit v1.2.3 From 247f58a5e740a7bd3980815961425b778d77ec28 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 12:05:04 +0300 Subject: add support for switching model checkpoints at runtime --- modules/images.py | 2 +- modules/processing.py | 2 +- modules/sd_models.py | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++ modules/shared.py | 19 +++++-- modules/ui.py | 5 ++ webui.py | 61 ++++----------------- 6 files changed, 179 insertions(+), 58 deletions(-) create mode 100644 modules/sd_models.py (limited to 'modules/ui.py') diff --git a/modules/images.py b/modules/images.py index b62c48f8..a3064333 100644 --- a/modules/images.py +++ b/modules/images.py @@ -274,7 +274,7 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[height]", str(p.height)) x = x.replace("[sampler]", sd_samplers.samplers[p.sampler_index].name) - x = x.replace("[model_hash]", shared.sd_model_hash) + x = x.replace("[model_hash]", shared.sd_model.sd_model_hash) x = x.replace("[date]", datetime.date.today().isoformat()) if cmd_opts.hide_ui_dir_config: diff --git a/modules/processing.py b/modules/processing.py index 81c83f06..3a4ff224 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -227,7 +227,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: "Seed": all_seeds[index], "Face restoration": (opts.face_restoration_model if p.restore_faces else None), "Size": f"{p.width}x{p.height}", - "Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model_hash else shared.sd_model_hash), + "Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), diff --git a/modules/sd_models.py b/modules/sd_models.py new file mode 100644 index 00000000..036af0e4 --- /dev/null +++ b/modules/sd_models.py @@ -0,0 +1,148 @@ +import glob +import os.path +import sys +from collections import namedtuple +import torch +from omegaconf import OmegaConf + + +from ldm.util import instantiate_from_config + +from modules import shared + +CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash']) +checkpoints_list = {} + +try: + # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start. + + from transformers import logging + + logging.set_verbosity_error() +except Exception: + pass + + +def list_models(): + checkpoints_list.clear() + + model_dir = os.path.abspath(shared.cmd_opts.ckpt_dir) + + def modeltitle(path, h): + abspath = os.path.abspath(path) + + if abspath.startswith(model_dir): + name = abspath.replace(model_dir, '') + else: + name = os.path.basename(path) + + if name.startswith("\\") or name.startswith("/"): + name = name[1:] + + return f'{name} [{h}]' + + cmd_ckpt = shared.cmd_opts.ckpt + if os.path.exists(cmd_ckpt): + h = model_hash(cmd_ckpt) + title = modeltitle(cmd_ckpt, h) + checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h) + elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file: + print(f"Checkpoint in --ckpt argument not found: {cmd_ckpt}", file=sys.stderr) + + if os.path.exists(model_dir): + for filename in glob.glob(model_dir + '/**/*.ckpt', recursive=True): + h = model_hash(filename) + title = modeltitle(filename, h) + checkpoints_list[title] = CheckpointInfo(filename, title, h) + + +def model_hash(filename): + try: + with open(filename, "rb") as file: + import hashlib + m = hashlib.sha256() + + file.seek(0x100000) + m.update(file.read(0x10000)) + return m.hexdigest()[0:8] + except FileNotFoundError: + return 'NOFILE' + + +def select_checkpoint(): + model_checkpoint = shared.opts.sd_model_checkpoint + checkpoint_info = checkpoints_list.get(model_checkpoint, None) + if checkpoint_info is not None: + return checkpoint_info + + if len(checkpoints_list) == 0: + print(f"Checkpoint {model_checkpoint} not found and no other checkpoints found", file=sys.stderr) + return None + + checkpoint_info = next(iter(checkpoints_list.values())) + if model_checkpoint is not None: + print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr) + + return checkpoint_info + + +def load_model_weights(model, checkpoint_file, sd_model_hash): + print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}") + + pl_sd = torch.load(checkpoint_file, map_location="cpu") + if "global_step" in pl_sd: + print(f"Global Step: {pl_sd['global_step']}") + sd = pl_sd["state_dict"] + + model.load_state_dict(sd, strict=False) + + if shared.cmd_opts.opt_channelslast: + model.to(memory_format=torch.channels_last) + + if not shared.cmd_opts.no_half: + model.half() + + model.sd_model_hash = sd_model_hash + model.sd_model_checkpint = checkpoint_file + + +def load_model(): + from modules import lowvram, sd_hijack + checkpoint_info = select_checkpoint() + + sd_config = OmegaConf.load(shared.cmd_opts.config) + sd_model = instantiate_from_config(sd_config.model) + load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash) + + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram) + else: + sd_model.to(shared.device) + + sd_hijack.model_hijack.hijack(sd_model) + + sd_model.eval() + + print(f"Model loaded.") + return sd_model + + +def reload_model_weights(sd_model): + from modules import lowvram, devices + checkpoint_info = select_checkpoint() + + if sd_model.sd_model_checkpint == checkpoint_info.filename: + return + + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + lowvram.send_everything_to_cpu() + else: + sd_model.to(devices.cpu) + + load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash) + + if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: + sd_model.to(devices.device) + + print(f"Weights loaded.") + return sd_model diff --git a/modules/shared.py b/modules/shared.py index 4f877036..3c3aa9b6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -13,14 +13,15 @@ from modules.devices import get_optimal_device import modules.styles import modules.interrogate import modules.memmon +import modules.sd_models sd_model_file = os.path.join(script_path, 'model.ckpt') -if not os.path.exists(sd_model_file): - sd_model_file = "models/ldm/stable-diffusion-v1/model.ckpt" +default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",) -parser.add_argument("--ckpt", type=str, default=os.path.join(sd_path, sd_model_file), help="path to checkpoint of model",) +parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",) +parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",) parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default='GFPGANv1.3.pth') parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats") @@ -88,13 +89,17 @@ interrogator = modules.interrogate.InterrogateModels("interrogate") face_restorers = [] +modules.sd_models.list_models() + + class Options: class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None): + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None): self.default = default self.label = label self.component = component self.component_args = component_args + self.onchange = onchange data = None hide_dirs = {"visible": False} if cmd_opts.hide_ui_dir_config else None @@ -150,6 +155,7 @@ class Options: "interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), "interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), "interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"), + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}), } def __init__(self): @@ -180,6 +186,10 @@ class Options: with open(filename, "r", encoding="utf8") as file: self.data = json.load(file) + def onchange(self, key, func): + item = self.data_labels.get(key) + item.onchange = func + opts = Options() if os.path.exists(config_filename): @@ -188,7 +198,6 @@ if os.path.exists(config_filename): sd_upscalers = [] sd_model = None -sd_model_hash = '' progress_print_out = sys.stdout diff --git a/modules/ui.py b/modules/ui.py index 437bce66..36e3c664 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -758,7 +758,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: continue + oldval = opts.data.get(key, None) opts.data[key] = value + + if oldval != value and opts.data_labels[key].onchange is not None: + opts.data_labels[key].onchange() + up.append(comp.update(value=value)) opts.save(shared.config_filename) diff --git a/webui.py b/webui.py index add72123..ff8997db 100644 --- a/webui.py +++ b/webui.py @@ -3,13 +3,8 @@ import threading from modules.paths import script_path -import torch -from omegaconf import OmegaConf - import signal -from ldm.util import instantiate_from_config - from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.ui @@ -24,6 +19,7 @@ import modules.extras import modules.lowvram import modules.txt2img import modules.img2img +import modules.sd_models modules.codeformer_model.setup_codeformer() @@ -33,29 +29,17 @@ shared.face_restorers.append(modules.face_restoration.FaceRestoration()) esrgan.load_models(cmd_opts.esrgan_models_path) realesrgan.setup_realesrgan() +queue_lock = threading.Lock() -def load_model_from_config(config, ckpt, verbose=False): - print(f"Loading model [{shared.sd_model_hash}] from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") - if "global_step" in pl_sd: - print(f"Global Step: {pl_sd['global_step']}") - sd = pl_sd["state_dict"] - model = instantiate_from_config(config.model) - m, u = model.load_state_dict(sd, strict=False) - if len(m) > 0 and verbose: - print("missing keys:") - print(m) - if len(u) > 0 and verbose: - print("unexpected keys:") - print(u) - if cmd_opts.opt_channelslast: - model = model.to(memory_format=torch.channels_last) - model.eval() - return model +def wrap_queued_call(func): + def f(*args, **kwargs): + with queue_lock: + res = func(*args, **kwargs) + return res -queue_lock = threading.Lock() + return f def wrap_gradio_gpu_call(func): @@ -80,33 +64,8 @@ def wrap_gradio_gpu_call(func): modules.scripts.load_scripts(os.path.join(script_path, "scripts")) -try: - # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start. - - from transformers import logging - - logging.set_verbosity_error() -except Exception: - pass - -with open(cmd_opts.ckpt, "rb") as file: - import hashlib - m = hashlib.sha256() - - file.seek(0x100000) - m.update(file.read(0x10000)) - shared.sd_model_hash = m.hexdigest()[0:8] - -sd_config = OmegaConf.load(cmd_opts.config) -shared.sd_model = load_model_from_config(sd_config, cmd_opts.ckpt) -shared.sd_model = (shared.sd_model if cmd_opts.no_half else shared.sd_model.half()) - -if cmd_opts.lowvram or cmd_opts.medvram: - modules.lowvram.setup_for_low_vram(shared.sd_model, cmd_opts.medvram) -else: - shared.sd_model = shared.sd_model.to(shared.device) - -modules.sd_hijack.model_hijack.hijack(shared.sd_model) +shared.sd_model = modules.sd_models.load_model() +shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model))) def webui(): -- cgit v1.2.3 From 99585b3514e2d7e987651d5c6a0806f933af012b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 12:38:15 +0300 Subject: moved progressbar to top by request --- modules/ui.py | 7 ++++--- style.css | 6 ++++++ 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 36e3c664..960f1e36 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -337,6 +337,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False) with gr.Column(variant='panel'): + progressbar = gr.HTML(elem_id="progressbar") + with gr.Group(): txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False) txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery').style(grid=4) @@ -349,8 +351,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): send_to_extras = gr.Button('Send to extras') interrupt = gr.Button('Interrupt') - progressbar = gr.HTML(elem_id="progressbar") - with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) @@ -474,6 +474,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True) with gr.Column(variant='panel'): + progressbar = gr.HTML(elem_id="progressbar") + with gr.Group(): img2img_preview = gr.Image(elem_id='img2img_preview', visible=False) img2img_gallery = gr.Gallery(label='Output', elem_id='img2img_gallery').style(grid=4) @@ -487,7 +489,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): interrupt = gr.Button('Interrupt') img2img_save_style = gr.Button('Save prompt as style') - progressbar = gr.HTML(elem_id="progressbar") with gr.Group(): html_info = gr.HTML() diff --git a/style.css b/style.css index 67ce8550..752d2cf4 100644 --- a/style.css +++ b/style.css @@ -167,6 +167,12 @@ input[type="range"]{ #txt2img_negative_prompt, #img2img_negative_prompt{ } +#progressbar{ + position: absolute; + z-index: 1000; + right: 0; +} + .progressDiv{ width: 100%; height: 30px; -- cgit v1.2.3 From 8d197b6a92fbcea8e3394159247c19cea080c975 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 16:28:19 +0300 Subject: added user.css support --- .gitignore | 1 + modules/ui.py | 5 +++++ style.css | 3 --- 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'modules/ui.py') diff --git a/.gitignore b/.gitignore index 1dffb108..4f830e61 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ __pycache__ /webui-user.bat /webui-user.sh /interrogate +/user.css diff --git a/modules/ui.py b/modules/ui.py index 960f1e36..b97ffd07 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -801,6 +801,11 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file: css = file.read() + if os.path.exists(os.path.join(script_path, "style.css")): + with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file: + usercss = file.read() + css += usercss + if not cmd_opts.no_progressbar_hiding: css += css_hide_progressbar diff --git a/style.css b/style.css index 2bdd1e0e..36d073fa 100644 --- a/style.css +++ b/style.css @@ -21,9 +21,6 @@ min-height: 4.5em; } -#txt2img_gallery, #img2img_gallery{ - min-height: 768px; -} #txt2img_gallery img, #img2img_gallery img{ object-fit: scale-down; } -- cgit v1.2.3 From 56ff118845748d1302968039e13703b6ad8107c4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 16:35:58 +0300 Subject: typo --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index b97ffd07..2f6eb307 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -801,7 +801,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file: css = file.read() - if os.path.exists(os.path.join(script_path, "style.css")): + if os.path.exists(os.path.join(script_path, "user.css")): with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file: usercss = file.read() css += usercss -- cgit v1.2.3