From 5d7d1823afab0a051a3fbbdb3213bae8051350b7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 1 Jan 2024 17:25:30 +0300 Subject: rename infotext.py again, this time to infotext_utils.py; I didn't realize infotext would be used for variable names in multiple places, which makes it awkward to import the module; also fix the bug I caused by this rename that breaks tests --- modules/infotext_utils.py | 502 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 502 insertions(+) create mode 100644 modules/infotext_utils.py (limited to 'modules/infotext_utils.py') diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py new file mode 100644 index 00000000..26e9b949 --- /dev/null +++ b/modules/infotext_utils.py @@ -0,0 +1,502 @@ +from __future__ import annotations +import base64 +import io +import json +import os +import re +import sys + +import gradio as gr +from modules.paths import data_path +from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions +from PIL import Image + +sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name + +re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' +re_param = re.compile(re_param_code) +re_imagesize = re.compile(r"^(\d+)x(\d+)$") +re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") +type_of_gr_update = type(gr.update()) + + +class ParamBinding: + def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None): + self.paste_button = paste_button + self.tabname = tabname + self.source_text_component = source_text_component + self.source_image_component = source_image_component + self.source_tabname = source_tabname + self.override_settings_component = override_settings_component + self.paste_field_names = paste_field_names or [] + + +class PasteField(tuple): + def __new__(cls, component, target, *, api=None): + return super().__new__(cls, (component, target)) + + def __init__(self, component, target, *, api=None): + super().__init__() + + self.api = api + self.component = component + self.label = target if isinstance(target, str) else None + self.function = target if callable(target) else None + + +paste_fields: dict[str, dict] = {} +registered_param_bindings: list[ParamBinding] = [] + + +def reset(): + paste_fields.clear() + registered_param_bindings.clear() + + +def quote(text): + if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text): + return text + + return json.dumps(text, ensure_ascii=False) + + +def unquote(text): + if len(text) == 0 or text[0] != '"' or text[-1] != '"': + return text + + try: + return json.loads(text) + except Exception: + return text + + +def image_from_url_text(filedata): + if filedata is None: + return None + + if type(filedata) == list and filedata and type(filedata[0]) == dict and filedata[0].get("is_file", False): + filedata = filedata[0] + + if type(filedata) == dict and filedata.get("is_file", False): + filename = filedata["name"] + is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) + assert is_in_right_dir, 'trying to open image file outside of allowed directories' + + filename = filename.rsplit('?', 1)[0] + return Image.open(filename) + + if type(filedata) == list: + if len(filedata) == 0: + return None + + filedata = filedata[0] + + if filedata.startswith("data:image/png;base64,"): + filedata = filedata[len("data:image/png;base64,"):] + + filedata = base64.decodebytes(filedata.encode('utf-8')) + image = Image.open(io.BytesIO(filedata)) + return image + + +def add_paste_fields(tabname, init_img, fields, override_settings_component=None): + + if fields: + for i in range(len(fields)): + if not isinstance(fields[i], PasteField): + fields[i] = PasteField(*fields[i]) + + paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component} + + # backwards compatibility for existing extensions + import modules.ui + if tabname == 'txt2img': + modules.ui.txt2img_paste_fields = fields + elif tabname == 'img2img': + modules.ui.img2img_paste_fields = fields + + +def create_buttons(tabs_list): + buttons = {} + for tab in tabs_list: + buttons[tab] = gr.Button(f"Send to {tab}", elem_id=f"{tab}_tab") + return buttons + + +def bind_buttons(buttons, send_image, send_generate_info): + """old function for backwards compatibility; do not use this, use register_paste_params_button""" + for tabname, button in buttons.items(): + source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None + source_tabname = send_generate_info if isinstance(send_generate_info, str) else None + + register_paste_params_button(ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname)) + + +def register_paste_params_button(binding: ParamBinding): + registered_param_bindings.append(binding) + + +def connect_paste_params_buttons(): + for binding in registered_param_bindings: + destination_image_component = paste_fields[binding.tabname]["init_img"] + fields = paste_fields[binding.tabname]["fields"] + override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"] + + destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None) + destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) + + if binding.source_image_component and destination_image_component: + if isinstance(binding.source_image_component, gr.Gallery): + func = send_image_and_dimensions if destination_width_component else image_from_url_text + jsfunc = "extract_image_from_gallery" + else: + func = send_image_and_dimensions if destination_width_component else lambda x: x + jsfunc = None + + binding.paste_button.click( + fn=func, + _js=jsfunc, + inputs=[binding.source_image_component], + outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component], + show_progress=False, + ) + + if binding.source_text_component is not None and fields is not None: + connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname) + + if binding.source_tabname is not None and fields is not None: + paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) + binding.paste_field_names + binding.paste_button.click( + fn=lambda *x: x, + inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names], + outputs=[field for field, name in fields if name in paste_field_names], + show_progress=False, + ) + + binding.paste_button.click( + fn=None, + _js=f"switch_to_{binding.tabname}", + inputs=None, + outputs=None, + show_progress=False, + ) + + +def send_image_and_dimensions(x): + if isinstance(x, Image.Image): + img = x + else: + img = image_from_url_text(x) + + if shared.opts.send_size and isinstance(img, Image.Image): + w = img.width + h = img.height + else: + w = gr.update() + h = gr.update() + + return img, w, h + + +def restore_old_hires_fix_params(res): + """for infotexts that specify old First pass size parameter, convert it into + width, height, and hr scale""" + + firstpass_width = res.get('First pass size-1', None) + firstpass_height = res.get('First pass size-2', None) + + if shared.opts.use_old_hires_fix_width_height: + hires_width = int(res.get("Hires resize-1", 0)) + hires_height = int(res.get("Hires resize-2", 0)) + + if hires_width and hires_height: + res['Size-1'] = hires_width + res['Size-2'] = hires_height + return + + if firstpass_width is None or firstpass_height is None: + return + + firstpass_width, firstpass_height = int(firstpass_width), int(firstpass_height) + width = int(res.get("Size-1", 512)) + height = int(res.get("Size-2", 512)) + + if firstpass_width == 0 or firstpass_height == 0: + firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height) + + res['Size-1'] = firstpass_width + res['Size-2'] = firstpass_height + res['Hires resize-1'] = width + res['Hires resize-2'] = height + + +def parse_generation_parameters(x: str): + """parses generation parameters string, the one you see in text field under the picture in UI: +``` +girl with an artist's beret, determined, blue eyes, desert scene, computer monitors, heavy makeup, by Alphonse Mucha and Charlie Bowater, ((eyeshadow)), (coquettish), detailed, intricate +Negative prompt: ugly, fat, obese, chubby, (((deformed))), [blurry], bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), messy drawing +Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hash: 45dee52b +``` + + returns a dict with field values + """ + + res = {} + + prompt = "" + negative_prompt = "" + + done_with_prompt = False + + *lines, lastline = x.strip().split("\n") + if len(re_param.findall(lastline)) < 3: + lines.append(lastline) + lastline = '' + + for line in lines: + line = line.strip() + if line.startswith("Negative prompt:"): + done_with_prompt = True + line = line[16:].strip() + if done_with_prompt: + negative_prompt += ("" if negative_prompt == "" else "\n") + line + else: + prompt += ("" if prompt == "" else "\n") + line + + if shared.opts.infotext_styles != "Ignore": + found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt) + + if shared.opts.infotext_styles == "Apply": + res["Styles array"] = found_styles + elif shared.opts.infotext_styles == "Apply if any" and found_styles: + res["Styles array"] = found_styles + + res["Prompt"] = prompt + res["Negative prompt"] = negative_prompt + + for k, v in re_param.findall(lastline): + try: + if v[0] == '"' and v[-1] == '"': + v = unquote(v) + + m = re_imagesize.match(v) + if m is not None: + res[f"{k}-1"] = m.group(1) + res[f"{k}-2"] = m.group(2) + else: + res[k] = v + except Exception: + print(f"Error parsing \"{k}: {v}\"") + + # Missing CLIP skip means it was set to 1 (the default) + if "Clip skip" not in res: + res["Clip skip"] = "1" + + hypernet = res.get("Hypernet", None) + if hypernet is not None: + res["Prompt"] += f"""""" + + if "Hires resize-1" not in res: + res["Hires resize-1"] = 0 + res["Hires resize-2"] = 0 + + if "Hires sampler" not in res: + res["Hires sampler"] = "Use same sampler" + + if "Hires checkpoint" not in res: + res["Hires checkpoint"] = "Use same checkpoint" + + if "Hires prompt" not in res: + res["Hires prompt"] = "" + + if "Hires negative prompt" not in res: + res["Hires negative prompt"] = "" + + restore_old_hires_fix_params(res) + + # Missing RNG means the default was set, which is GPU RNG + if "RNG" not in res: + res["RNG"] = "GPU" + + if "Schedule type" not in res: + res["Schedule type"] = "Automatic" + + if "Schedule max sigma" not in res: + res["Schedule max sigma"] = 0 + + if "Schedule min sigma" not in res: + res["Schedule min sigma"] = 0 + + if "Schedule rho" not in res: + res["Schedule rho"] = 0 + + if "VAE Encoder" not in res: + res["VAE Encoder"] = "Full" + + if "VAE Decoder" not in res: + res["VAE Decoder"] = "Full" + + if "FP8 weight" not in res: + res["FP8 weight"] = "Disable" + + if "Cache FP16 weight for LoRA" not in res and res["FP8 weight"] != "Disable": + res["Cache FP16 weight for LoRA"] = False + + infotext_versions.backcompat(res) + + skip = set(shared.opts.infotext_skip_pasting) + res = {k: v for k, v in res.items() if k not in skip} + + return res + + +infotext_to_setting_name_mapping = [ + +] +"""Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead. +Example content: + +infotext_to_setting_name_mapping = [ + ('Conditional mask weight', 'inpainting_mask_weight'), + ('Model hash', 'sd_model_checkpoint'), + ('ENSD', 'eta_noise_seed_delta'), + ('Schedule type', 'k_sched_type'), +] +""" + + +def create_override_settings_dict(text_pairs): + """creates processing's override_settings parameters from gradio's multiselect + + Example input: + ['Clip skip: 2', 'Model hash: e6e99610c4', 'ENSD: 31337'] + + Example output: + {'CLIP_stop_at_last_layers': 2, 'sd_model_checkpoint': 'e6e99610c4', 'eta_noise_seed_delta': 31337} + """ + + res = {} + + params = {} + for pair in text_pairs: + k, v = pair.split(":", maxsplit=1) + + params[k] = v.strip() + + mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext] + for param_name, setting_name in mapping + infotext_to_setting_name_mapping: + value = params.get(param_name, None) + + if value is None: + continue + + res[setting_name] = shared.opts.cast_value(setting_name, value) + + return res + + +def get_override_settings(params, *, skip_fields=None): + """Returns a list of settings overrides from the infotext parameters dictionary. + + This function checks the `params` dictionary for any keys that correspond to settings in `shared.opts` and returns + a list of tuples containing the parameter name, setting name, and new value cast to correct type. + + It checks for conditions before adding an override: + - ignores settings that match the current value + - ignores parameter keys present in skip_fields argument. + + Example input: + {"Clip skip": "2"} + + Example output: + [("Clip skip", "CLIP_stop_at_last_layers", 2)] + """ + + res = [] + + mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext] + for param_name, setting_name in mapping + infotext_to_setting_name_mapping: + if param_name in (skip_fields or {}): + continue + + v = params.get(param_name, None) + if v is None: + continue + + if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap: + continue + + v = shared.opts.cast_value(setting_name, v) + current_value = getattr(shared.opts, setting_name, None) + + if v == current_value: + continue + + res.append((param_name, setting_name, v)) + + return res + + +def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname): + def paste_func(prompt): + if not prompt and not shared.cmd_opts.hide_ui_dir_config: + filename = os.path.join(data_path, "params.txt") + if os.path.exists(filename): + with open(filename, "r", encoding="utf8") as file: + prompt = file.read() + + params = parse_generation_parameters(prompt) + script_callbacks.infotext_pasted_callback(prompt, params) + res = [] + + for output, key in paste_fields: + if callable(key): + v = key(params) + else: + v = params.get(key, None) + + if v is None: + res.append(gr.update()) + elif isinstance(v, type_of_gr_update): + res.append(v) + else: + try: + valtype = type(output.value) + + if valtype == bool and v == "False": + val = False + else: + val = valtype(v) + + res.append(gr.update(value=val)) + except Exception: + res.append(gr.update()) + + return res + + if override_settings_component is not None: + already_handled_fields = {key: 1 for _, key in paste_fields} + + def paste_settings(params): + vals = get_override_settings(params, skip_fields=already_handled_fields) + + vals_pairs = [f"{infotext_text}: {value}" for infotext_text, setting_name, value in vals] + + return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=bool(vals_pairs)) + + paste_fields = paste_fields + [(override_settings_component, paste_settings)] + + button.click( + fn=paste_func, + inputs=[input_comp], + outputs=[x[0] for x in paste_fields], + show_progress=False, + ) + button.click( + fn=None, + _js=f"recalculate_prompts_{tabname}", + inputs=[], + outputs=[], + show_progress=False, + ) + -- cgit v1.2.3 From 80873b1538e6ca0c7ebe558f8ce4213b06fd8307 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 2 Jan 2024 07:05:05 +0300 Subject: fix #14497 --- modules/img2img.py | 15 --------------- modules/infotext_utils.py | 12 ++++++++++++ modules/processing.py | 13 +++++++++++++ 3 files changed, 25 insertions(+), 15 deletions(-) (limited to 'modules/infotext_utils.py') diff --git a/modules/img2img.py b/modules/img2img.py index 9e09c0a0..f81405df 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -222,21 +222,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if shared.opts.enable_console_prompts: print(f"\nimg2img: {prompt}", file=shared.progress_print_out) - if mask: - p.extra_generation_params["Mask blur"] = mask_blur - - if inpainting_mask_invert is not None: - p.extra_generation_params["Mask mode"] = inpainting_mask_invert - - if inpainting_fill is not None: - p.extra_generation_params["Masked content"] = inpainting_fill - - if inpaint_full_res is not None: - p.extra_generation_params["Inpaint area"] = inpaint_full_res - - if inpaint_full_res_padding is not None: - p.extra_generation_params["Only masked padding, pixels"] = inpaint_full_res_padding - with closing(p): if is_batch: assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index 26e9b949..e582ee47 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -312,6 +312,18 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Hires negative prompt" not in res: res["Hires negative prompt"] = "" + if "Mask mode" not in res: + res["Mask mode"] = "Inpaint masked" + + if "Masked content" not in res: + res["Masked content"] = 'original' + + if "Inpaint area" not in res: + res["Inpaint area"] = "Whole picture" + + if "Masked area padding" not in res: + res["Masked area padding"] = 32 + restore_old_hires_fix_params(res) # Missing RNG means the default was set, which is GPU RNG diff --git a/modules/processing.py b/modules/processing.py index 045c7d79..84e7b1b4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1530,6 +1530,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.inpainting_mask_invert: image_mask = ImageOps.invert(image_mask) + self.extra_generation_params["Mask mode"] = "Inpaint not masked" if self.mask_blur_x > 0: np_mask = np.array(image_mask) @@ -1543,6 +1544,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y) image_mask = Image.fromarray(np_mask) + if self.mask_blur_x > 0 or self.mask_blur_y > 0: + self.extra_generation_params["Mask blur"] = self.mask_blur + if self.inpaint_full_res: self.mask_for_overlay = image_mask mask = image_mask.convert('L') @@ -1553,6 +1557,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): mask = mask.crop(crop_region) image_mask = images.resize_image(2, mask, self.width, self.height) self.paste_to = (x1, y1, x2-x1, y2-y1) + + self.extra_generation_params["Inpaint area"] = "Only masked" + self.extra_generation_params["Masked area padding"] = self.inpaint_full_res_padding else: image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) np_mask = np.array(image_mask) @@ -1594,6 +1601,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.inpainting_fill != 1: image = masking.fill(image, latent_mask) + if self.inpainting_fill == 0: + self.extra_generation_params["Masked content"] = 'fill' + if add_color_corrections: self.color_corrections.append(setup_color_correction(image)) @@ -1643,8 +1653,11 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): # this needs to be fixed to be done in sample() using actual seeds for batches if self.inpainting_fill == 2: self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask + self.extra_generation_params["Masked content"] = 'latent noise' + elif self.inpainting_fill == 3: self.init_latent = self.init_latent * self.mask + self.extra_generation_params["Masked content"] = 'latent nothing' self.image_conditioning = self.img2img_image_conditioning(image * 2 - 1, self.init_latent, image_mask, self.mask_round) -- cgit v1.2.3 From bfc48fbc244130770991fab284f6fedcef2054e7 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 4 Jan 2024 03:46:05 +0900 Subject: paste infotext cast int as float --- modules/infotext_utils.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules/infotext_utils.py') diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index e582ee47..a21329e6 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -477,6 +477,8 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, if valtype == bool and v == "False": val = False + elif valtype == int: + val = float(v) else: val = valtype(v) -- cgit v1.2.3 From d9034b48a526f0a0c3e8f0dbf7c171bf4f0597fd Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 4 Jan 2024 00:16:58 +0200 Subject: Avoid unnecessary `isfile`/`exists` calls --- modules/cache.py | 17 ++++++++--------- modules/extensions.py | 11 ++++++----- modules/extra_networks.py | 7 ++++--- modules/infotext_utils.py | 4 +++- modules/launch_utils.py | 7 ++++--- modules/postprocessing.py | 7 ++++--- modules/shared_init.py | 4 +++- modules/ui_gradio_extensions.py | 8 +++----- modules/ui_loadsave.py | 5 +++-- modules/util.py | 6 +++--- 10 files changed, 41 insertions(+), 35 deletions(-) (limited to 'modules/infotext_utils.py') diff --git a/modules/cache.py b/modules/cache.py index 2d37e7b9..a9822a0e 100644 --- a/modules/cache.py +++ b/modules/cache.py @@ -62,16 +62,15 @@ def cache(subsection): if cache_data is None: with cache_lock: if cache_data is None: - if not os.path.isfile(cache_filename): + try: + with open(cache_filename, "r", encoding="utf8") as file: + cache_data = json.load(file) + except FileNotFoundError: + cache_data = {} + except Exception: + os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json")) + print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache') cache_data = {} - else: - try: - with open(cache_filename, "r", encoding="utf8") as file: - cache_data = json.load(file) - except Exception: - os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json")) - print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache') - cache_data = {} s = cache_data.get(subsection, {}) cache_data[subsection] = s diff --git a/modules/extensions.py b/modules/extensions.py index 1899cd52..99e7ee60 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -32,11 +32,12 @@ class ExtensionMetadata: self.config = configparser.ConfigParser() filepath = os.path.join(path, self.filename) - if os.path.isfile(filepath): - try: - self.config.read(filepath) - except Exception: - errors.report(f"Error reading {self.filename} for extension {canonical_name}.", exc_info=True) + # `self.config.read()` will quietly swallow OSErrors (which FileNotFoundError is), + # so no need to check whether the file exists beforehand. + try: + self.config.read(filepath) + except Exception: + errors.report(f"Error reading {self.filename} for extension {canonical_name}.", exc_info=True) self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name) self.canonical_name = canonical_name.lower().strip() diff --git a/modules/extra_networks.py b/modules/extra_networks.py index b9533677..cd030fa3 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -215,9 +215,10 @@ def get_user_metadata(filename): metadata = {} try: - if os.path.isfile(metadata_filename): - with open(metadata_filename, "r", encoding="utf8") as file: - metadata = json.load(file) + with open(metadata_filename, "r", encoding="utf8") as file: + metadata = json.load(file) + except FileNotFoundError: + pass except Exception as e: errors.display(e, f"reading extra network user metadata from {metadata_filename}") diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index e582ee47..6978a0bf 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -453,9 +453,11 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, def paste_func(prompt): if not prompt and not shared.cmd_opts.hide_ui_dir_config: filename = os.path.join(data_path, "params.txt") - if os.path.exists(filename): + try: with open(filename, "r", encoding="utf8") as file: prompt = file.read() + except OSError: + pass params = parse_generation_parameters(prompt) script_callbacks.infotext_pasted_callback(prompt, params) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index c2cbd8ce..febd8c24 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -245,9 +245,10 @@ def list_extensions(settings_file): settings = {} try: - if os.path.isfile(settings_file): - with open(settings_file, "r", encoding="utf8") as file: - settings = json.load(file) + with open(settings_file, "r", encoding="utf8") as file: + settings = json.load(file) + except FileNotFoundError: + pass except Exception: errors.report("Could not load settings", exc_info=True) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 7850328f..7449b0dc 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -97,11 +97,12 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if pp.caption: caption_filename = os.path.splitext(fullfn)[0] + ".txt" - if os.path.isfile(caption_filename): + existing_caption = "" + try: with open(caption_filename, encoding="utf8") as file: existing_caption = file.read().strip() - else: - existing_caption = "" + except FileNotFoundError: + pass action = shared.opts.postprocessing_existing_caption_action if action == 'Prepend' and existing_caption: diff --git a/modules/shared_init.py b/modules/shared_init.py index d3fb687e..586be342 100644 --- a/modules/shared_init.py +++ b/modules/shared_init.py @@ -18,8 +18,10 @@ def initialize(): shared.options_templates = shared_options.options_templates shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts) shared.restricted_opts = shared_options.restricted_opts - if os.path.exists(shared.config_filename): + try: shared.opts.load(shared.config_filename) + except FileNotFoundError: + pass from modules import devices devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ diff --git a/modules/ui_gradio_extensions.py b/modules/ui_gradio_extensions.py index a86c368e..f5278d22 100644 --- a/modules/ui_gradio_extensions.py +++ b/modules/ui_gradio_extensions.py @@ -35,13 +35,11 @@ def css_html(): return f'' for cssfile in scripts.list_files_with_name("style.css"): - if not os.path.isfile(cssfile): - continue - head += stylesheet(cssfile) - if os.path.exists(os.path.join(data_path, "user.css")): - head += stylesheet(os.path.join(data_path, "user.css")) + user_css = os.path.join(data_path, "user.css") + if os.path.exists(user_css): + head += stylesheet(user_css) return head diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index 693ff75c..2555cdb6 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -26,8 +26,9 @@ class UiLoadsave: self.ui_defaults_review = None try: - if os.path.exists(self.filename): - self.ui_settings = self.read_from_file() + self.ui_settings = self.read_from_file() + except FileNotFoundError: + pass except Exception as e: self.error_loading = True errors.display(e, "loading settings") diff --git a/modules/util.py b/modules/util.py index 4861bcb0..d503f267 100644 --- a/modules/util.py +++ b/modules/util.py @@ -21,11 +21,11 @@ def html_path(filename): def html(filename): path = html_path(filename) - if os.path.exists(path): + try: with open(path, encoding="utf8") as file: return file.read() - - return "" + except OSError: + return "" def walk_files(path, allowed_extensions=None): -- cgit v1.2.3 From 6916de5c0bd8df3835d450caa3327d1924db081c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 16 Jan 2024 20:16:07 +0900 Subject: parse_generation_parameters skip_fields --- modules/infotext_utils.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'modules/infotext_utils.py') diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index 9a02cdf2..1049c6c3 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -230,7 +230,7 @@ def restore_old_hires_fix_params(res): res['Hires resize-2'] = height -def parse_generation_parameters(x: str): +def parse_generation_parameters(x: str, skip_fields: list[str] | None = None): """parses generation parameters string, the one you see in text field under the picture in UI: ``` girl with an artist's beret, determined, blue eyes, desert scene, computer monitors, heavy makeup, by Alphonse Mucha and Charlie Bowater, ((eyeshadow)), (coquettish), detailed, intricate @@ -240,6 +240,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model returns a dict with field values """ + if skip_fields is None: + skip_fields = shared.opts.infotext_skip_pasting res = {} @@ -356,8 +358,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model infotext_versions.backcompat(res) - skip = set(shared.opts.infotext_skip_pasting) - res = {k: v for k, v in res.items() if k not in skip} + for key in skip_fields: + res.pop(key, None) return res -- cgit v1.2.3 From e2b19900ec37ef517d8175a7d86c1925ca9f9e91 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 11 Feb 2024 09:39:51 +0300 Subject: add infotext entry for emphasis; put emphasis into a separate file, add an option to parse but still ignore emphasis --- modules/infotext_utils.py | 3 ++ modules/processing.py | 1 + modules/sd_emphasis.py | 70 +++++++++++++++++++++++++++++++++++++++++++ modules/sd_hijack_clip.py | 21 +++++++------ modules/sd_hijack_clip_old.py | 2 +- modules/shared_options.py | 5 ++-- 6 files changed, 89 insertions(+), 13 deletions(-) create mode 100644 modules/sd_emphasis.py (limited to 'modules/infotext_utils.py') diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index 1049c6c3..a938aa2a 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -356,6 +356,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Cache FP16 weight for LoRA" not in res and res["FP8 weight"] != "Disable": res["Cache FP16 weight for LoRA"] = False + if "Emphasis" not in res: + res["Emphasis"] = "Original" + infotext_versions.backcompat(res) for key in skip_fields: diff --git a/modules/processing.py b/modules/processing.py index 52f00bfb..f4aa165d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -455,6 +455,7 @@ class StableDiffusionProcessing: self.height, opts.fp8_storage, opts.cache_fp16_weight, + opts.emphasis, ) def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data, hires_steps=None): diff --git a/modules/sd_emphasis.py b/modules/sd_emphasis.py new file mode 100644 index 00000000..654817b6 --- /dev/null +++ b/modules/sd_emphasis.py @@ -0,0 +1,70 @@ +from __future__ import annotations +import torch + + +class Emphasis: + """Emphasis class decides how to death with (emphasized:1.1) text in prompts""" + + name: str = "Base" + description: str = "" + + tokens: list[list[int]] + """tokens from the chunk of the prompt""" + + multipliers: torch.Tensor + """tensor with multipliers, once for each token""" + + z: torch.Tensor + """output of cond transformers network (CLIP)""" + + def after_transformers(self): + """Called after cond transformers network has processed the chunk of the prompt; this function should modify self.z to apply the emphasis""" + + pass + + +class EmphasisNone(Emphasis): + name = "None" + description = "disable the mechanism entirely and treat (:.1.1) as literal characters" + + +class EmphasisIgnore(Emphasis): + name = "Ignore" + description = "treat all empasised words as if they have no emphasis" + + +class EmphasisOriginal(Emphasis): + name = "Original" + description = "the orginal emphasis implementation" + + def after_transformers(self): + original_mean = self.z.mean() + self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape) + + # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise + new_mean = self.z.mean() + self.z = self.z * (original_mean / new_mean) + + +class EmphasisOriginalNoNorm(EmphasisOriginal): + name = "No norm" + description = "same as orginal, but without normalization (seems to work better for SDXL)" + + def after_transformers(self): + self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape) + + +def get_current_option(emphasis_option_name): + return next(iter([x for x in options if x.name == emphasis_option_name]), EmphasisOriginal) + + +def get_options_descriptions(): + return ", ".join(f"{x.name}: {x.description}" for x in options) + + +options = [ + EmphasisNone, + EmphasisIgnore, + EmphasisOriginal, + EmphasisOriginalNoNorm, +] diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 89634fbf..98350ac4 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -3,7 +3,7 @@ from collections import namedtuple import torch -from modules import prompt_parser, devices, sd_hijack +from modules import prompt_parser, devices, sd_hijack, sd_emphasis from modules.shared import opts @@ -88,7 +88,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): Returns the list and the total number of tokens in the prompt. """ - if opts.enable_emphasis: + if opts.emphasis != "None": parsed = prompt_parser.parse_prompt_attention(line) else: parsed = [[line, 1.0]] @@ -249,6 +249,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): hashes.append(self.hijack.extra_generation_params.get("TI hashes")) self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes) + if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original": + self.hijack.extra_generation_params["Emphasis"] = opts.emphasis + if getattr(self.wrapped, 'return_pooled', False): return torch.hstack(zs), zs[0].pooled else: @@ -274,14 +277,14 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): pooled = getattr(z, 'pooled', None) - # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers = torch.asarray(batch_multipliers).to(devices.device) - original_mean = z.mean() - z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) - new_mean = z.mean() + emphasis = sd_emphasis.get_current_option(opts.emphasis)() + emphasis.tokens = remade_batch_tokens + emphasis.multipliers = torch.asarray(batch_multipliers).to(devices.device) + emphasis.z = z + + emphasis.after_transformers() - if not getattr(opts, "disable_normalize_embeddings", False): - z = z * (original_mean / new_mean) + z = emphasis.z if pooled is not None: z.pooled = pooled diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py index c5c6270b..43e9b952 100644 --- a/modules/sd_hijack_clip_old.py +++ b/modules/sd_hijack_clip_old.py @@ -32,7 +32,7 @@ def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) - mult_change = self.token_mults.get(token) if shared.opts.enable_emphasis else None + mult_change = self.token_mults.get(token) if shared.opts.emphasis != "None" else None if mult_change is not None: mult *= mult_change i += 1 diff --git a/modules/shared_options.py b/modules/shared_options.py index 417a42b2..ba6d731d 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -1,7 +1,7 @@ import os import gradio as gr -from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes, util +from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes, util, sd_emphasis from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir, default_output_dir # noqa: F401 from modules.shared_cmd_options import cmd_opts from modules.options import options_section, OptionInfo, OptionHTML, categories @@ -154,8 +154,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), { "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), - "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "disable_normalize_embeddings": OptionInfo(False, "Disable normalize embeddings").info("Do not normalize embeddings after calculating emphasis. It can be expected to be effective in preventing artifacts in SDXL."), + "emphasis": OptionInfo("Original", "Emphasis mode", gr.Radio, lambda: {"choices": [x.name for x in sd_emphasis.options]}, infotext="Emphasis").info("makes it possible to make model to pay (more:1.1) or (less:0.9) attention to text when you use the syntax in prompt; " + sd_emphasis.get_options_descriptions()), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), -- cgit v1.2.3