From 5ef7590324891ec7263c767d178a51827a6f9b33 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 16 Jul 2023 11:38:59 +0300 Subject: always show extra networks tabs in the UI --- modules/ui.py | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 07ecee7b..085561c1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -310,7 +310,6 @@ def create_toprow(is_img2img): with gr.Row(elem_id=f"{id_part}_tools"): paste = ToolButton(value=paste_symbol, elem_id="paste") clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) @@ -331,7 +330,7 @@ def create_toprow(is_img2img): prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, None, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button def setup_progressbar(*args, **kwargs): @@ -419,16 +418,15 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, _, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs") + extra_tabs.__enter__() - with gr.Row().style(equal_height=False): + with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row().style(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): modules.scripts.scripts_txt2img.prepare_ui() @@ -656,21 +654,24 @@ def create_ui(): token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + from modules import ui_extra_networks + extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img') + ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + + extra_tabs.__exit__() modules.scripts.scripts_current = modules.scripts.scripts_img2img modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, _, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs") + extra_tabs.__enter__() - with FormRow().style(equal_height=False): + with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow().style(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): copy_image_buttons = [] copy_image_destinations = {} @@ -1026,8 +1027,6 @@ def create_ui(): token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - img2img_paste_fields = [ (img2img_prompt, "Prompt"), (img2img_negative_prompt, "Negative prompt"), @@ -1055,6 +1054,12 @@ def create_ui(): paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, )) + from modules import ui_extra_networks + extra_networks_ui_img2img = ui_extra_networks.create_ui(img2img_interface, [img2img_generation_tab], 'img2img') + ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) + + extra_tabs.__exit__() + modules.scripts.scripts_current = None with gr.Blocks(analytics_enabled=False) as extras_interface: -- cgit v1.2.3 From 3bca90b249d749ed5429f76e380d2ffa52fc0d41 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 13:48:27 +0300 Subject: hires fix checkpoint selection --- modules/generation_parameters_copypaste.py | 3 ++ modules/processing.py | 47 +++++++++++++++++++----------- modules/sd_models.py | 22 ++++++++------ modules/shared.py | 19 ++++++++---- modules/txt2img.py | 3 +- modules/ui.py | 8 ++++- 6 files changed, 68 insertions(+), 34 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a3448be9..4e286558 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -280,6 +280,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Hires sampler" not in res: res["Hires sampler"] = "Use same sampler" + if "Hires checkpoint" not in res: + res["Hires checkpoint"] = "Use same checkpoint" + if "Hires prompt" not in res: res["Hires prompt"] = "" diff --git a/modules/processing.py b/modules/processing.py index b0992ee1..7026487a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -935,7 +935,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): cached_hr_uc = [None, None] cached_hr_c = [None, None] - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_checkpoint_name: str = None, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength @@ -946,11 +946,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_resize_y = hr_resize_y self.hr_upscale_to_x = hr_resize_x self.hr_upscale_to_y = hr_resize_y + self.hr_checkpoint_name = hr_checkpoint_name + self.hr_checkpoint_info = None self.hr_sampler_name = hr_sampler_name self.hr_prompt = hr_prompt self.hr_negative_prompt = hr_negative_prompt self.all_hr_prompts = None self.all_hr_negative_prompts = None + self.latent_scale_mode = None if firstphase_width != 0 or firstphase_height != 0: self.hr_upscale_to_x = self.width @@ -973,6 +976,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: + if self.hr_checkpoint_name: + self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name) + + if self.hr_checkpoint_info is None: + raise Exception(f'Could not find checkpoint with name {self.hr_checkpoint_name}') + + self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title + if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: self.extra_generation_params["Hires sampler"] = self.hr_sampler_name @@ -982,6 +993,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt): self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt + self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") + if self.enable_hr and self.latent_scale_mode is None: + if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): + raise Exception(f"could not find upscaler named {self.hr_upscaler}") + if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): self.hr_resize_x = self.width self.hr_resize_y = self.height @@ -1020,14 +1036,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f - # special case: the user has chosen to do nothing - if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height: - self.enable_hr = False - self.denoising_strength = None - self.extra_generation_params.pop("Hires upscale", None) - self.extra_generation_params.pop("Hires resize", None) - return - if not state.processing_has_refined_job_count: if state.job_count == -1: state.job_count = self.n_iter @@ -1045,17 +1053,22 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") - if self.enable_hr and latent_scale_mode is None: - if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): - raise Exception(f"could not find upscaler named {self.hr_upscaler}") - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) if not self.enable_hr: return samples + current = shared.sd_model.sd_checkpoint_info + try: + if self.hr_checkpoint_info is not None: + sd_models.reload_model_weights(info=self.hr_checkpoint_info) + + return self.sample_hr_pass(samples, seeds, subseeds, subseed_strength, prompts) + finally: + sd_models.reload_model_weights(info=current) + + def sample_hr_pass(self, samples, seeds, subseeds, subseed_strength, prompts): self.is_hr_pass = True target_width = self.hr_upscale_to_x @@ -1073,11 +1086,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index) images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix") - if latent_scale_mode is not None: + if self.latent_scale_mode is not None: for i in range(samples.shape[0]): save_intermediate(samples, i) - samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"]) + samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=self.latent_scale_mode["mode"], antialias=self.latent_scale_mode["antialias"]) # Avoid making the inpainting conditioning unless necessary as # this does need some extra compute to decode / encode the image again. @@ -1193,7 +1206,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_uc = None self.hr_c = None - if self.enable_hr: + if self.enable_hr and self.hr_checkpoint_info is None: if shared.opts.hires_fix_use_firstpass_conds: self.calculate_hr_conds() diff --git a/modules/sd_models.py b/modules/sd_models.py index acb1e817..cb67e425 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -52,6 +52,7 @@ class CheckpointInfo: self.shorthash = self.sha256[0:10] if self.sha256 else None self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]' + self.short_title = self.name_for_extra if self.shorthash is None else f'{self.name_for_extra} [{self.shorthash}]' self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) @@ -81,6 +82,7 @@ class CheckpointInfo: checkpoints_list.pop(self.title) self.title = f'{self.name} [{self.shorthash}]' + self.short_title = f'{self.name_for_extra} [{self.shorthash}]' self.register() return self.shorthash @@ -101,14 +103,8 @@ def setup_model(): enable_midas_autodownload() -def checkpoint_tiles(): - def convert(name): - return int(name) if name.isdigit() else name.lower() - - def alphanumeric_key(key): - return [convert(c) for c in re.split('([0-9]+)', key)] - - return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key) +def checkpoint_tiles(use_short=False): + return [x.short_title if use_short else x.title for x in checkpoints_list.values()] def list_models(): @@ -131,11 +127,14 @@ def list_models(): elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file: print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr) - for filename in sorted(model_list, key=str.lower): + for filename in model_list: checkpoint_info = CheckpointInfo(filename) checkpoint_info.register() +re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$") + + def get_closet_checkpoint_match(search_string): checkpoint_info = checkpoint_aliases.get(search_string, None) if checkpoint_info is not None: @@ -145,6 +144,11 @@ def get_closet_checkpoint_match(search_string): if found: return found[0] + search_string_without_checksum = re.sub(re_strip_checksum, '', search_string) + found = sorted([info for info in checkpoints_list.values() if search_string_without_checksum in info.title], key=lambda x: len(x.title)) + if found: + return found[0] + return None diff --git a/modules/shared.py b/modules/shared.py index aa72c9c8..807fb9e3 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -220,12 +220,19 @@ class State: return import modules.sd_samplers - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - self.current_image_sampling_step = self.sampling_step + try: + if opts.show_progress_grid: + self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) + else: + self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) + + self.current_image_sampling_step = self.sampling_step + + except Exception: + # when switching models during genration, VAE would be on CPU, so creating an image will fail. + # we silently ignore this error + errors.record_exception() def assign_current_image(self, image): self.current_image = image @@ -512,7 +519,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_restart(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires sampler selection").needs_restart(), + "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_restart(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_restart(), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(), })) diff --git a/modules/txt2img.py b/modules/txt2img.py index 29d94e8c..935ed418 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -41,6 +41,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step hr_second_pass_steps=hr_second_pass_steps, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y, + hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name, hr_sampler_name=sd_samplers.samplers_for_img2img[hr_sampler_index - 1].name if hr_sampler_index != 0 else None, hr_prompt=hr_prompt, hr_negative_prompt=hr_negative_prompt, diff --git a/modules/ui.py b/modules/ui.py index 07ecee7b..6d8265f2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -476,6 +476,10 @@ def create_ui(): hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + checkpoint_choices = lambda: ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True) + hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=checkpoint_choices(), value="Use same checkpoint") + create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": checkpoint_choices()}, "hr_checkpoint_refresh") + hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: @@ -553,6 +557,7 @@ def create_ui(): hr_second_pass_steps, hr_resize_x, hr_resize_y, + hr_checkpoint_name, hr_sampler_index, hr_prompt, hr_negative_prompt, @@ -630,8 +635,9 @@ def create_ui(): (hr_second_pass_steps, "Hires steps"), (hr_resize_x, "Hires resize-1"), (hr_resize_y, "Hires resize-2"), + (hr_checkpoint_name, "Hires checkpoint"), (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), + (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" else gr.update()), (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), -- cgit v1.2.3 From 77761e7bad8a7cbffc9028dc0b2f63169aaf25f9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 14:10:33 +0300 Subject: linter --- modules/processing.py | 2 +- modules/ui.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/processing.py b/modules/processing.py index b8af1301..21dbef16 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1055,6 +1055,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + del x if not self.enable_hr: return samples @@ -1137,7 +1138,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) # GC now before running the next img2img to prevent running out of memory - x = None devices.torch_gc() if not self.disable_extra_networks: diff --git a/modules/ui.py b/modules/ui.py index 6d8265f2..6fc9de83 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -476,9 +476,9 @@ def create_ui(): hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - checkpoint_choices = lambda: ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True) - hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=checkpoint_choices(), value="Use same checkpoint") - create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": checkpoint_choices()}, "hr_checkpoint_refresh") + + hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") + create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") -- cgit v1.2.3 From b98fa1c397a677c99f375982817d44bdecc3f813 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:15:15 +0300 Subject: Split history: mv modules/ui.py temp --- modules/ui.py | 1621 --------------------------------------------------------- temp | 1621 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1621 insertions(+), 1621 deletions(-) delete mode 100644 modules/ui.py create mode 100644 temp (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py deleted file mode 100644 index 07ecee7b..00000000 --- a/modules/ui.py +++ /dev/null @@ -1,1621 +0,0 @@ -import datetime -import json -import mimetypes -import os -import sys -from functools import reduce -import warnings - -import gradio as gr -import gradio.utils -import numpy as np -from PIL import Image, PngImagePlugin # noqa: F401 -from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call - -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML -from modules.paths import script_path -from modules.ui_common import create_refresh_button -from modules.ui_gradio_extensions import reload_javascript - - -from modules.shared import opts, cmd_opts - -import modules.codeformer_model -import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts -import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui -from modules import prompt_parser -from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui -from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras - -create_setting_component = ui_settings.create_setting_component - -warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) - -# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI -mimetypes.init() -mimetypes.add_type('application/javascript', '.js') - -if not cmd_opts.share and not cmd_opts.listen: - # fix gradio phoning home - gradio.utils.version_check = lambda: None - gradio.utils.get_local_ip_address = lambda: '127.0.0.1' - -if cmd_opts.ngrok is not None: - import modules.ngrok as ngrok - print('ngrok authtoken detected, trying to connect...') - ngrok.connect( - cmd_opts.ngrok, - cmd_opts.port if cmd_opts.port is not None else 7860, - cmd_opts.ngrok_options - ) - - -def gr_show(visible=True): - return {"visible": visible, "__type__": "update"} - - -sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" -sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None - -# Using constants for these since the variation selector isn't visible. -# Important that they exactly match script.js for tooltip to work. -random_symbol = '\U0001f3b2\ufe0f' # 🎲️ -reuse_symbol = '\u267b\ufe0f' # ♻️ -paste_symbol = '\u2199\ufe0f' # ↙ -refresh_symbol = '\U0001f504' # 🔄 -save_style_symbol = '\U0001f4be' # 💾 -apply_style_symbol = '\U0001f4cb' # 📋 -clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ -extra_networks_symbol = '\U0001F3B4' # 🎴 -switch_values_symbol = '\U000021C5' # ⇅ -restore_progress_symbol = '\U0001F300' # 🌀 -detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ - - -plaintext_to_html = ui_common.plaintext_to_html - - -def send_gradio_gallery_to_image(x): - if len(x) == 0: - return None - return image_from_url_text(x[0]) - - -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - -def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - - if not enable: - return "" - - p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) - - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" - - -def resize_from_to_html(width, height, scale_by): - target_width = int(width * scale_by) - target_height = int(height * scale_by) - - if not target_width or not target_height: - return "no image selected" - - return f"resize: from {width}x{height} to {target_width}x{target_height}" - - -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - -def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): - if mode in {0, 1, 3, 4}: - return [interrogation_function(ii_singles[mode]), None] - elif mode == 2: - return [interrogation_function(ii_singles[mode]["image"]), None] - elif mode == 5: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - images = shared.listfiles(ii_input_dir) - print(f"Will process {len(images)} images.") - if ii_output_dir != "": - os.makedirs(ii_output_dir, exist_ok=True) - else: - ii_output_dir = ii_input_dir - - for image in images: - img = Image.open(image) - filename = os.path.basename(image) - left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) - - return [gr.update(), None] - - -def interrogate(image): - prompt = shared.interrogator.interrogate(image.convert("RGB")) - return gr.update() if prompt is None else prompt - - -def interrogate_deepbooru(image): - prompt = deepbooru.model.tag(image) - return gr.update() if prompt is None else prompt - - -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - -def connect_clear_prompt(button): - """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" - button.click( - _js="clear_prompt", - fn=None, - inputs=[], - outputs=[], - ) - - -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - -def update_token_counter(text, steps): - try: - text, _ = extra_networks.parse_prompt(text) - - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) - prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) - - except Exception: - # a parsing error can happen here during typing, and we don't want to bother the user with - # messages related to it in console - prompt_schedules = [[[steps, text]]] - - flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step, prompt_text in flat_prompts] - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) - return f"{token_count}/{max_length}" - - -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) - - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button - - -def setup_progressbar(*args, **kwargs): - pass - - -def apply_setting(key, value): - if value is None: - return gr.update() - - if shared.cmd_opts.freeze_settings: - return gr.update() - - # dont allow model to be swapped when model hash exists in prompt - if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: - return gr.update() - - if key == "sd_model_checkpoint": - ckpt_info = sd_models.get_closet_checkpoint_match(value) - - if ckpt_info is not None: - value = ckpt_info.title - else: - return gr.update() - - comp_args = opts.data_labels[key].component_args - if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: - return - - valtype = type(opts.data_labels[key].default) - oldval = opts.data.get(key, None) - opts.data[key] = valtype(value) if valtype != type(None) else value - if oldval != value and opts.data_labels[key].onchange is not None: - opts.data_labels[key].onchange() - - opts.save(shared.config_filename) - return getattr(opts, key) - - -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) - - -def create_sampler_and_steps_selection(choices, tabname): - if opts.samplers_in_dropdown: - with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - else: - with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - - return steps, sampler_index - - -def ordered_ui_categories(): - user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} - - for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): - yield category - - -def create_override_settings_dropdown(tabname, row): - dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) - - dropdown.change( - fn=lambda x: gr.Dropdown.update(visible=bool(x)), - inputs=[dropdown], - outputs=[dropdown], - ) - - return dropdown - - -def create_ui(): - import modules.img2img - import modules.txt2img - - reload_javascript() - - parameters_copypaste.reset() - - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) - - with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) - - dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') - - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) - - elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") - - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) - - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - - else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) - - hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] - - for component in hr_resolution_preview_inputs: - event = component.release if isinstance(component, gr.Slider) else component.change - - event( - fn=calc_resolution_hires, - inputs=hr_resolution_preview_inputs, - outputs=[hr_final_resolution], - show_progress=False, - ) - event( - None, - _js="onCalcResolutionHires", - inputs=hr_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), - _js="submit", - inputs=[ - dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, - steps, - sampler_index, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - height, - width, - enable_hr, - denoising_strength, - hr_scale, - hr_upscaler, - hr_second_pass_steps, - hr_resize_x, - hr_resize_y, - hr_sampler_index, - hr_prompt, - hr_negative_prompt, - override_settings, - - ] + custom_inputs, - - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressTxt2img", - inputs=[dummy_component], - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - txt2img_prompt, - txt_prompt_img - ], - show_progress=False, - ) - - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - - txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (hr_scale, "Hires upscale"), - (hr_upscaler, "Hires upscaler"), - (hr_second_pass_steps, "Hires steps"), - (hr_resize_x, "Hires resize-1"), - (hr_resize_y, "Hires resize-2"), - (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), - (hr_prompt, "Hires prompt"), - (hr_negative_prompt, "Hires negative prompt"), - (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, - )) - - txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, - steps, - sampler_index, - cfg_scale, - seed, - width, - height, - ] - - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) - - with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) - - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') - - with FormRow().style(equal_height=False): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - modules.scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) - - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() - - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) - - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - img2img_prompt, - img2img_prompt_img - ], - show_progress=False, - ) - - img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), - _js="submit_img2img", - inputs=[ - dummy_component, - dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - inpaint_color_sketch_orig, - init_img_inpaint, - init_mask_inpaint, - steps, - sampler_index, - mask_blur, - mask_alpha, - inpainting_fill, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - image_cfg_scale, - denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - selected_scale_tab, - height, - width, - scale_by, - resize_mode, - inpaint_full_res, - inpaint_full_res_padding, - inpainting_mask_invert, - img2img_batch_input_dir, - img2img_batch_output_dir, - img2img_batch_inpaint_mask_dir, - override_settings, - img2img_batch_use_png_info, - img2img_batch_png_info_props, - img2img_batch_png_info_dir, - ] + custom_inputs, - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - interrogate_args = dict( - _js="get_img2img_tab_index", - inputs=[ - dummy_component, - img2img_batch_input_dir, - img2img_batch_output_dir, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - init_img_inpaint, - ], - outputs=[img2img_prompt, dummy_component], - ) - - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) - - detect_image_size_btn.click( - fn=lambda w, h, _: (w or gr.update(), h or gr.update()), - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, dummy_component], - outputs=[width, height], - show_progress=False, - ) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressImg2img", - inputs=[dummy_component], - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - img2img_interrogate.click( - fn=lambda *args: process_interrogate(interrogate, *args), - **interrogate_args, - ) - - img2img_deepbooru.click( - fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), - **interrogate_args, - ) - - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - - img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, - )) - - modules.scripts.scripts_current = None - - with gr.Blocks(analytics_enabled=False) as extras_interface: - ui_postprocessing.create_ui() - - with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='panel'): - image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") - - with gr.Column(variant='panel'): - html = gr.HTML() - generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") - html2 = gr.HTML() - with gr.Row(): - buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) - - for tabname, button in buttons.items(): - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, - )) - - image.change( - fn=wrap_gradio_call(modules.extras.run_pnginfo), - inputs=[image], - outputs=[html, generation_info, html2], - ) - - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - - with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): - gr.HTML(value="

See wiki for detailed explanation.

") - - with gr.Row(variant="compact").style(equal_height=False): - with gr.Tabs(elem_id="train_tabs"): - - with gr.Tab(label="Create embedding", id="create_embedding"): - new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") - initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - - with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") - new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - - def get_textual_inversion_template_names(): - return sorted(textual_inversion.textual_inversion_templates) - - with gr.Tab(label="Train", id="train"): - gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") - with FormRow(): - train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) - create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") - - with FormRow(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - - with FormRow(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) - - with FormRow(): - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") - - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") - - with FormRow(): - template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) - create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") - - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") - varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") - steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") - - with FormRow(): - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") - - use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") - - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") - - with gr.Row(): - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") - interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - - params = script_callbacks.UiTrainTabParams(txt2img_preview_params) - - script_callbacks.ui_train_tabs_callback(params) - - with gr.Column(elem_id='ti_gallery_container'): - ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - gr.HTML(elem_id="ti_progress", value="") - ti_outcome = gr.HTML(elem_id="ti_error", value="") - - create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, - inputs=[ - new_embedding_name, - initialization_text, - nvpt, - overwrite_old_embedding, - ], - outputs=[ - train_embedding_name, - ti_output, - ti_outcome, - ] - ) - - create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, - inputs=[ - new_hypernetwork_name, - new_hypernetwork_sizes, - overwrite_old_hypernetwork, - new_hypernetwork_layer_structure, - new_hypernetwork_activation_func, - new_hypernetwork_initialization_option, - new_hypernetwork_add_layer_norm, - new_hypernetwork_use_dropout, - new_hypernetwork_dropout_structure - ], - outputs=[ - train_hypernetwork_name, - ti_output, - ti_outcome, - ] - ) - - run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - - train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_embedding_name, - embedding_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - save_image_with_stored_embedding, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_hypernetwork_name, - hypernetwork_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - interrupt_training.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) - - settings = ui_settings.UiSettings() - settings.create_ui(loadsave, dummy_component) - - interfaces = [ - (txt2img_interface, "txt2img", "txt2img"), - (img2img_interface, "img2img", "img2img"), - (extras_interface, "Extras", "extras"), - (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "train"), - ] - - interfaces += script_callbacks.ui_tabs_callback() - interfaces += [(settings.interface, "Settings", "settings")] - - extensions_interface = ui_extensions.create_ui() - interfaces += [(extensions_interface, "Extensions", "extensions")] - - shared.tab_names = [] - for _interface, label, _ifid in interfaces: - shared.tab_names.append(label) - - with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: - settings.add_quicksettings() - - parameters_copypaste.connect_paste_params_buttons() - - with gr.Tabs(elem_id="tabs") as tabs: - tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} - sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) - - for interface, label, ifid in sorted_interfaces: - if label in shared.opts.hidden_tabs: - continue - with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): - interface.render() - - for interface, _label, ifid in interfaces: - if ifid in ["extensions", "settings"]: - continue - - loadsave.add_block(interface, ifid) - - loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) - - loadsave.setup_ui() - - if os.path.exists(os.path.join(script_path, "notification.mp3")): - gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) - - footer = shared.html("footer.html") - footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") - gr.HTML(footer, elem_id="footer") - - settings.add_functionality(demo) - - update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), - _js='modelmerger', - inputs=[ - dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, - ], - outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, - ] - ) - - loadsave.dump_defaults() - demo.ui_loadsave = loadsave - - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - - return demo - - -def versions_html(): - import torch - import launch - - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - commit = launch.commit_hash() - tag = launch.git_tag() - - if shared.xformers_available: - import xformers - xformers_version = xformers.__version__ - else: - xformers_version = "N/A" - - return f""" -version: {tag} - •  -python: {python_version} - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -xformers: {xformers_version} - •  -gradio: {gr.__version__} - •  -checkpoint: N/A -""" - - -def setup_ui_api(app): - from pydantic import BaseModel, Field - from typing import List - - class QuicksettingsHint(BaseModel): - name: str = Field(title="Name of the quicksettings field") - label: str = Field(title="Label of the quicksettings field") - - def quicksettings_hint(): - return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] - - app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) - - app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) - - app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) - - def download_sysinfo(attachment=False): - from fastapi.responses import PlainTextResponse - - text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" - - return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) - - app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) - app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) - diff --git a/temp b/temp new file mode 100644 index 00000000..07ecee7b --- /dev/null +++ b/temp @@ -0,0 +1,1621 @@ +import datetime +import json +import mimetypes +import os +import sys +from functools import reduce +import warnings + +import gradio as gr +import gradio.utils +import numpy as np +from PIL import Image, PngImagePlugin # noqa: F401 +from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call + +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML +from modules.paths import script_path +from modules.ui_common import create_refresh_button +from modules.ui_gradio_extensions import reload_javascript + + +from modules.shared import opts, cmd_opts + +import modules.codeformer_model +import modules.generation_parameters_copypaste as parameters_copypaste +import modules.gfpgan_model +import modules.hypernetworks.ui +import modules.scripts +import modules.shared as shared +import modules.styles +import modules.textual_inversion.ui +from modules import prompt_parser +from modules.sd_hijack import model_hijack +from modules.sd_samplers import samplers, samplers_for_img2img +from modules.textual_inversion import textual_inversion +import modules.hypernetworks.ui +from modules.generation_parameters_copypaste import image_from_url_text +import modules.extras + +create_setting_component = ui_settings.create_setting_component + +warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) + +# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI +mimetypes.init() +mimetypes.add_type('application/javascript', '.js') + +if not cmd_opts.share and not cmd_opts.listen: + # fix gradio phoning home + gradio.utils.version_check = lambda: None + gradio.utils.get_local_ip_address = lambda: '127.0.0.1' + +if cmd_opts.ngrok is not None: + import modules.ngrok as ngrok + print('ngrok authtoken detected, trying to connect...') + ngrok.connect( + cmd_opts.ngrok, + cmd_opts.port if cmd_opts.port is not None else 7860, + cmd_opts.ngrok_options + ) + + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + + +sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" +sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None + +# Using constants for these since the variation selector isn't visible. +# Important that they exactly match script.js for tooltip to work. +random_symbol = '\U0001f3b2\ufe0f' # 🎲️ +reuse_symbol = '\u267b\ufe0f' # ♻️ +paste_symbol = '\u2199\ufe0f' # ↙ +refresh_symbol = '\U0001f504' # 🔄 +save_style_symbol = '\U0001f4be' # 💾 +apply_style_symbol = '\U0001f4cb' # 📋 +clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ +extra_networks_symbol = '\U0001F3B4' # 🎴 +switch_values_symbol = '\U000021C5' # ⇅ +restore_progress_symbol = '\U0001F300' # 🌀 +detect_image_size_symbol = '\U0001F4D0' # 📐 +up_down_symbol = '\u2195\ufe0f' # ↕️ + + +plaintext_to_html = ui_common.plaintext_to_html + + +def send_gradio_gallery_to_image(x): + if len(x) == 0: + return None + return image_from_url_text(x[0]) + + +def add_style(name: str, prompt: str, negative_prompt: str): + if name is None: + return [gr_show() for x in range(4)] + + style = modules.styles.PromptStyle(name, prompt, negative_prompt) + shared.prompt_styles.styles[style.name] = style + # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we + # reserialize all styles every time we save them + shared.prompt_styles.save_styles(shared.styles_filename) + + return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] + + +def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): + from modules import processing, devices + + if not enable: + return "" + + p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + + with devices.autocast(): + p.init([""], [0], [0]) + + return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" + + +def resize_from_to_html(width, height, scale_by): + target_width = int(width * scale_by) + target_height = int(height * scale_by) + + if not target_width or not target_height: + return "no image selected" + + return f"resize: from {width}x{height} to {target_width}x{target_height}" + + +def apply_styles(prompt, prompt_neg, styles): + prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) + prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) + + return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] + + +def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): + if mode in {0, 1, 3, 4}: + return [interrogation_function(ii_singles[mode]), None] + elif mode == 2: + return [interrogation_function(ii_singles[mode]["image"]), None] + elif mode == 5: + assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + images = shared.listfiles(ii_input_dir) + print(f"Will process {len(images)} images.") + if ii_output_dir != "": + os.makedirs(ii_output_dir, exist_ok=True) + else: + ii_output_dir = ii_input_dir + + for image in images: + img = Image.open(image) + filename = os.path.basename(image) + left, _ = os.path.splitext(filename) + print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) + + return [gr.update(), None] + + +def interrogate(image): + prompt = shared.interrogator.interrogate(image.convert("RGB")) + return gr.update() if prompt is None else prompt + + +def interrogate_deepbooru(image): + prompt = deepbooru.model.tag(image) + return gr.update() if prompt is None else prompt + + +def create_seed_inputs(target_interface): + with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + seed.style(container=False) + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') + reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') + + seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) + + # Components to show/hide based on the 'Extra' checkbox + seed_extras = [] + + with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: + seed_extras.append(seed_extra_row_1) + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") + subseed.style(container=False) + random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") + reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") + + with FormRow(visible=False) as seed_extra_row_2: + seed_extras.append(seed_extra_row_2) + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") + + random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) + random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) + + def change_visibility(show): + return {comp: gr_show(show) for comp in seed_extras} + + seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) + + return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox + + + +def connect_clear_prompt(button): + """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" + button.click( + _js="clear_prompt", + fn=None, + inputs=[], + outputs=[], + ) + + +def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): + """ Connects a 'reuse (sub)seed' button's click event so that it copies last used + (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength + was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" + def copy_seed(gen_info_string: str, index): + res = -1 + + try: + gen_info = json.loads(gen_info_string) + index -= gen_info.get('index_of_first_image', 0) + + if is_subseed and gen_info.get('subseed_strength', 0) > 0: + all_subseeds = gen_info.get('all_subseeds', [-1]) + res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] + else: + all_seeds = gen_info.get('all_seeds', [-1]) + res = all_seeds[index if 0 <= index < len(all_seeds) else 0] + + except json.decoder.JSONDecodeError: + if gen_info_string: + errors.report(f"Error parsing JSON generation info: {gen_info_string}") + + return [res, gr_show(False)] + + reuse_seed.click( + fn=copy_seed, + _js="(x, y) => [x, selected_gallery_index()]", + show_progress=False, + inputs=[generation_info, dummy_component], + outputs=[seed, dummy_component] + ) + + +def update_token_counter(text, steps): + try: + text, _ = extra_networks.parse_prompt(text) + + _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) + + except Exception: + # a parsing error can happen here during typing, and we don't want to bother the user with + # messages related to it in console + prompt_schedules = [[[steps, text]]] + + flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) + prompts = [prompt_text for step, prompt_text in flat_prompts] + token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) + return f"{token_count}/{max_length}" + + +def create_toprow(is_img2img): + id_part = "img2img" if is_img2img else "txt2img" + + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + button_interrogate = None + button_deepbooru = None + if is_img2img: + with gr.Column(scale=1, elem_classes="interrogate-col"): + button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + + with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): + with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): + interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") + skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") + submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + + skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) + + interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + with gr.Row(elem_id=f"{id_part}_tools"): + paste = ToolButton(value=paste_symbol, elem_id="paste") + clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") + extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") + prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") + save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") + restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) + + token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) + token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") + negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) + negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") + + clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[prompt, negative_prompt], + outputs=[prompt, negative_prompt], + ) + + with gr.Row(elem_id=f"{id_part}_styles_row"): + prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) + create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") + + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + + +def setup_progressbar(*args, **kwargs): + pass + + +def apply_setting(key, value): + if value is None: + return gr.update() + + if shared.cmd_opts.freeze_settings: + return gr.update() + + # dont allow model to be swapped when model hash exists in prompt + if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: + return gr.update() + + if key == "sd_model_checkpoint": + ckpt_info = sd_models.get_closet_checkpoint_match(value) + + if ckpt_info is not None: + value = ckpt_info.title + else: + return gr.update() + + comp_args = opts.data_labels[key].component_args + if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: + return + + valtype = type(opts.data_labels[key].default) + oldval = opts.data.get(key, None) + opts.data[key] = valtype(value) if valtype != type(None) else value + if oldval != value and opts.data_labels[key].onchange is not None: + opts.data_labels[key].onchange() + + opts.save(shared.config_filename) + return getattr(opts, key) + + +def create_output_panel(tabname, outdir): + return ui_common.create_output_panel(tabname, outdir) + + +def create_sampler_and_steps_selection(choices, tabname): + if opts.samplers_in_dropdown: + with FormRow(elem_id=f"sampler_selection_{tabname}"): + sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + else: + with FormGroup(elem_id=f"sampler_selection_{tabname}"): + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + + return steps, sampler_index + + +def ordered_ui_categories(): + user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} + + for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): + yield category + + +def create_override_settings_dropdown(tabname, row): + dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) + + dropdown.change( + fn=lambda x: gr.Dropdown.update(visible=bool(x)), + inputs=[dropdown], + outputs=[dropdown], + ) + + return dropdown + + +def create_ui(): + import modules.img2img + import modules.txt2img + + reload_javascript() + + parameters_copypaste.reset() + + modules.scripts.scripts_current = modules.scripts.scripts_txt2img + modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) + + with gr.Blocks(analytics_enabled=False) as txt2img_interface: + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + + dummy_component = gr.Label(visible=False) + txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact', elem_id="txt2img_settings"): + modules.scripts.scripts_txt2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + + with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "cfg": + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) + + elif category == "hires_fix": + with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") + + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="txt2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('txt2img', row) + + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + + else: + modules.scripts.scripts_txt2img.setup_ui_for_section(category) + + hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] + + for component in hr_resolution_preview_inputs: + event = component.release if isinstance(component, gr.Slider) else component.change + + event( + fn=calc_resolution_hires, + inputs=hr_resolution_preview_inputs, + outputs=[hr_final_resolution], + show_progress=False, + ) + event( + None, + _js="onCalcResolutionHires", + inputs=hr_resolution_preview_inputs, + outputs=[], + show_progress=False, + ) + + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + txt2img_args = dict( + fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), + _js="submit", + inputs=[ + dummy_component, + txt2img_prompt, + txt2img_negative_prompt, + txt2img_prompt_styles, + steps, + sampler_index, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + height, + width, + enable_hr, + denoising_strength, + hr_scale, + hr_upscaler, + hr_second_pass_steps, + hr_resize_x, + hr_resize_y, + hr_sampler_index, + hr_prompt, + hr_negative_prompt, + override_settings, + + ] + custom_inputs, + + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt2img_prompt.submit(**txt2img_args) + submit.click(**txt2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressTxt2img", + inputs=[dummy_component], + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + txt_prompt_img + ], + outputs=[ + txt2img_prompt, + txt_prompt_img + ], + show_progress=False, + ) + + enable_hr.change( + fn=lambda x: gr_show(x), + inputs=[enable_hr], + outputs=[hr_options], + show_progress = False, + ) + + txt2img_paste_fields = [ + (txt2img_prompt, "Prompt"), + (txt2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (enable_hr, lambda d: "Denoising strength" in d), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), + (hr_scale, "Hires upscale"), + (hr_upscaler, "Hires upscaler"), + (hr_second_pass_steps, "Hires steps"), + (hr_resize_x, "Hires resize-1"), + (hr_resize_y, "Hires resize-2"), + (hr_sampler_index, "Hires sampler"), + (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), + (hr_prompt, "Hires prompt"), + (hr_negative_prompt, "Hires negative prompt"), + (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), + *modules.scripts.scripts_txt2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, + )) + + txt2img_preview_params = [ + txt2img_prompt, + txt2img_negative_prompt, + steps, + sampler_index, + cfg_scale, + seed, + width, + height, + ] + + token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + + modules.scripts.scripts_current = modules.scripts.scripts_img2img + modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) + + with gr.Blocks(analytics_enabled=False) as img2img_interface: + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + + img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + + with FormRow().style(equal_height=False): + with gr.Column(variant='compact', elem_id="img2img_settings"): + copy_image_buttons = [] + copy_image_destinations = {} + + def add_copy_image_controls(tab_name, elem): + with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): + gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") + + for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): + if name == tab_name: + gr.Button(title, interactive=False) + copy_image_destinations[name] = elem + continue + + button = gr.Button(title) + copy_image_buttons.append((button, name, elem)) + + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) + + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('sketch', sketch) + + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('inpaint', init_img_with_mask) + + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state + + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") + + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], + ) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + + modules.scripts.scripts_img2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "cfg": + with FormGroup(): + with FormRow(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="img2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('img2img', row) + + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = modules.scripts.scripts_img2img.setup_ui() + + elif category == "inpaint": + with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: + with FormRow(): + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") + + with FormRow(): + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + + def select_img2img_tab(tab): + return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), + + for i, elem in enumerate(img2img_tabs): + elem.select( + fn=lambda tab=i: select_img2img_tab(tab), + inputs=[], + outputs=[inpaint_controls, mask_alpha], + ) + else: + modules.scripts.scripts_img2img.setup_ui_for_section(category) + + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + img2img_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + img2img_prompt_img + ], + outputs=[ + img2img_prompt, + img2img_prompt_img + ], + show_progress=False, + ) + + img2img_args = dict( + fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), + _js="submit_img2img", + inputs=[ + dummy_component, + dummy_component, + img2img_prompt, + img2img_negative_prompt, + img2img_prompt_styles, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + inpaint_color_sketch_orig, + init_img_inpaint, + init_mask_inpaint, + steps, + sampler_index, + mask_blur, + mask_alpha, + inpainting_fill, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + image_cfg_scale, + denoising_strength, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + selected_scale_tab, + height, + width, + scale_by, + resize_mode, + inpaint_full_res, + inpaint_full_res_padding, + inpainting_mask_invert, + img2img_batch_input_dir, + img2img_batch_output_dir, + img2img_batch_inpaint_mask_dir, + override_settings, + img2img_batch_use_png_info, + img2img_batch_png_info_props, + img2img_batch_png_info_dir, + ] + custom_inputs, + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + interrogate_args = dict( + _js="get_img2img_tab_index", + inputs=[ + dummy_component, + img2img_batch_input_dir, + img2img_batch_output_dir, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + init_img_inpaint, + ], + outputs=[img2img_prompt, dummy_component], + ) + + img2img_prompt.submit(**img2img_args) + submit.click(**img2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) + + detect_image_size_btn.click( + fn=lambda w, h, _: (w or gr.update(), h or gr.update()), + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, dummy_component], + outputs=[width, height], + show_progress=False, + ) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressImg2img", + inputs=[dummy_component], + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + img2img_interrogate.click( + fn=lambda *args: process_interrogate(interrogate, *args), + **interrogate_args, + ) + + img2img_deepbooru.click( + fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), + **interrogate_args, + ) + + prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] + style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] + style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] + + for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): + button.click( + fn=add_style, + _js="ask_for_style_name", + # Have to pass empty dummy component here, because the JavaScript and Python function have to accept + # the same number of parameters, but we only know the style-name after the JavaScript prompt + inputs=[dummy_component, prompt, negative_prompt], + outputs=[txt2img_prompt_styles, img2img_prompt_styles], + ) + + for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): + button.click( + fn=apply_styles, + _js=js_func, + inputs=[prompt, negative_prompt, styles], + outputs=[prompt, negative_prompt, styles], + ) + + token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) + + img2img_paste_fields = [ + (img2img_prompt, "Prompt"), + (img2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (image_cfg_scale, "Image CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (mask_blur, "Mask blur"), + *modules.scripts.scripts_img2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) + parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, + )) + + modules.scripts.scripts_current = None + + with gr.Blocks(analytics_enabled=False) as extras_interface: + ui_postprocessing.create_ui() + + with gr.Blocks(analytics_enabled=False) as pnginfo_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='panel'): + image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") + + with gr.Column(variant='panel'): + html = gr.HTML() + generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") + html2 = gr.HTML() + with gr.Row(): + buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) + + for tabname, button in buttons.items(): + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, + )) + + image.change( + fn=wrap_gradio_call(modules.extras.run_pnginfo), + inputs=[image], + outputs=[html, generation_info, html2], + ) + + def update_interp_description(value): + interp_description_css = "

{}

" + interp_descriptions = { + "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), + "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), + "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") + } + return interp_descriptions[value] + + with gr.Blocks(analytics_enabled=False) as modelmerger_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact'): + interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") + + with FormRow(elem_id="modelmerger_models"): + primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") + + secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") + + tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") + + custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") + interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) + + with FormRow(): + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") + + with FormRow(): + with gr.Column(): + config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") + + with gr.Column(): + with FormRow(): + bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") + create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") + + with FormRow(): + discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") + + with gr.Row(): + modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') + + with gr.Column(variant='compact', elem_id="modelmerger_results_container"): + with gr.Group(elem_id="modelmerger_results_panel"): + modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) + + with gr.Blocks(analytics_enabled=False) as train_interface: + with gr.Row().style(equal_height=False): + gr.HTML(value="

See wiki for detailed explanation.

") + + with gr.Row(variant="compact").style(equal_height=False): + with gr.Tabs(elem_id="train_tabs"): + + with gr.Tab(label="Create embedding", id="create_embedding"): + new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") + initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") + nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") + overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") + + with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): + new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") + new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") + new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") + new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") + new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") + new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") + overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") + + with gr.Tab(label="Preprocess images", id="preprocess_images"): + process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") + process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") + process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") + process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") + preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") + + with gr.Row(): + process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") + process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") + process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") + process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") + process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") + process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") + process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") + + with gr.Row(visible=False) as process_split_extra_row: + process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") + process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") + + with gr.Row(visible=False) as process_focal_crop_row: + process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") + process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") + process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") + process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") + + with gr.Column(visible=False) as process_multicrop_col: + gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') + with gr.Row(): + process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") + process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") + with gr.Row(): + process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") + process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") + with gr.Row(): + process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") + process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + with gr.Row(): + interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") + run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") + + process_split.change( + fn=lambda show: gr_show(show), + inputs=[process_split], + outputs=[process_split_extra_row], + ) + + process_focal_crop.change( + fn=lambda show: gr_show(show), + inputs=[process_focal_crop], + outputs=[process_focal_crop_row], + ) + + process_multicrop.change( + fn=lambda show: gr_show(show), + inputs=[process_multicrop], + outputs=[process_multicrop_col], + ) + + def get_textual_inversion_template_names(): + return sorted(textual_inversion.textual_inversion_templates) + + with gr.Tab(label="Train", id="train"): + gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") + with FormRow(): + train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") + + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") + + with FormRow(): + embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") + hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") + + with FormRow(): + clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) + clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) + + with FormRow(): + batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + + with FormRow(): + template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) + create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") + + training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") + training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") + varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") + steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + + with FormRow(): + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + + use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") + + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") + preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") + + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") + + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + + with gr.Row(): + train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") + interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") + + params = script_callbacks.UiTrainTabParams(txt2img_preview_params) + + script_callbacks.ui_train_tabs_callback(params) + + with gr.Column(elem_id='ti_gallery_container'): + ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.HTML(elem_id="ti_progress", value="") + ti_outcome = gr.HTML(elem_id="ti_error", value="") + + create_embedding.click( + fn=modules.textual_inversion.ui.create_embedding, + inputs=[ + new_embedding_name, + initialization_text, + nvpt, + overwrite_old_embedding, + ], + outputs=[ + train_embedding_name, + ti_output, + ti_outcome, + ] + ) + + create_hypernetwork.click( + fn=modules.hypernetworks.ui.create_hypernetwork, + inputs=[ + new_hypernetwork_name, + new_hypernetwork_sizes, + overwrite_old_hypernetwork, + new_hypernetwork_layer_structure, + new_hypernetwork_activation_func, + new_hypernetwork_initialization_option, + new_hypernetwork_add_layer_norm, + new_hypernetwork_use_dropout, + new_hypernetwork_dropout_structure + ], + outputs=[ + train_hypernetwork_name, + ti_output, + ti_outcome, + ] + ) + + run_preprocess.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + process_src, + process_dst, + process_width, + process_height, + preprocess_txt_action, + process_keep_original_size, + process_flip, + process_split, + process_caption, + process_caption_deepbooru, + process_split_threshold, + process_overlap_ratio, + process_focal_crop, + process_focal_crop_face_weight, + process_focal_crop_entropy_weight, + process_focal_crop_edges_weight, + process_focal_crop_debug, + process_multicrop, + process_multicrop_mindim, + process_multicrop_maxdim, + process_multicrop_minarea, + process_multicrop_maxarea, + process_multicrop_objective, + process_multicrop_threshold, + ], + outputs=[ + ti_output, + ti_outcome, + ], + ) + + train_embedding.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_embedding_name, + embedding_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + save_image_with_stored_embedding, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + train_hypernetwork.click( + fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_hypernetwork_name, + hypernetwork_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + interrupt_training.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + interrupt_preprocessing.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) + + settings = ui_settings.UiSettings() + settings.create_ui(loadsave, dummy_component) + + interfaces = [ + (txt2img_interface, "txt2img", "txt2img"), + (img2img_interface, "img2img", "img2img"), + (extras_interface, "Extras", "extras"), + (pnginfo_interface, "PNG Info", "pnginfo"), + (modelmerger_interface, "Checkpoint Merger", "modelmerger"), + (train_interface, "Train", "train"), + ] + + interfaces += script_callbacks.ui_tabs_callback() + interfaces += [(settings.interface, "Settings", "settings")] + + extensions_interface = ui_extensions.create_ui() + interfaces += [(extensions_interface, "Extensions", "extensions")] + + shared.tab_names = [] + for _interface, label, _ifid in interfaces: + shared.tab_names.append(label) + + with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: + settings.add_quicksettings() + + parameters_copypaste.connect_paste_params_buttons() + + with gr.Tabs(elem_id="tabs") as tabs: + tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} + sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) + + for interface, label, ifid in sorted_interfaces: + if label in shared.opts.hidden_tabs: + continue + with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): + interface.render() + + for interface, _label, ifid in interfaces: + if ifid in ["extensions", "settings"]: + continue + + loadsave.add_block(interface, ifid) + + loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) + + loadsave.setup_ui() + + if os.path.exists(os.path.join(script_path, "notification.mp3")): + gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + + footer = shared.html("footer.html") + footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") + gr.HTML(footer, elem_id="footer") + + settings.add_functionality(demo) + + update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") + settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + + def modelmerger(*args): + try: + results = modules.extras.run_modelmerger(*args) + except Exception as e: + errors.report("Error loading/saving model file", exc_info=True) + modules.sd_models.list_models() # to remove the potentially missing models from the list + return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] + return results + + modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) + modelmerger_merge.click( + fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), + _js='modelmerger', + inputs=[ + dummy_component, + primary_model_name, + secondary_model_name, + tertiary_model_name, + interp_method, + interp_amount, + save_as_half, + custom_name, + checkpoint_format, + config_source, + bake_in_vae, + discard_weights, + save_metadata, + ], + outputs=[ + primary_model_name, + secondary_model_name, + tertiary_model_name, + settings.component_dict['sd_model_checkpoint'], + modelmerger_result, + ] + ) + + loadsave.dump_defaults() + demo.ui_loadsave = loadsave + + # Required as a workaround for change() event not triggering when loading values from ui-config.json + interp_description.value = update_interp_description(interp_method.value) + + return demo + + +def versions_html(): + import torch + import launch + + python_version = ".".join([str(x) for x in sys.version_info[0:3]]) + commit = launch.commit_hash() + tag = launch.git_tag() + + if shared.xformers_available: + import xformers + xformers_version = xformers.__version__ + else: + xformers_version = "N/A" + + return f""" +version: {tag} + •  +python: {python_version} + •  +torch: {getattr(torch, '__long_version__',torch.__version__)} + •  +xformers: {xformers_version} + •  +gradio: {gr.__version__} + •  +checkpoint: N/A +""" + + +def setup_ui_api(app): + from pydantic import BaseModel, Field + from typing import List + + class QuicksettingsHint(BaseModel): + name: str = Field(title="Name of the quicksettings field") + label: str = Field(title="Label of the quicksettings field") + + def quicksettings_hint(): + return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] + + app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) + + app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) + + app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) + + def download_sysinfo(attachment=False): + from fastapi.responses import PlainTextResponse + + text = sysinfo.get() + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + + return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) + + app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) + app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) + -- cgit v1.2.3 From 00429544905aaea124fee29b87e3a3b9469c64b0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:15:16 +0300 Subject: Split history: mv temp modules/ui.py --- modules/ui.py | 1621 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ temp | 1621 --------------------------------------------------------- 2 files changed, 1621 insertions(+), 1621 deletions(-) create mode 100644 modules/ui.py delete mode 100644 temp (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py new file mode 100644 index 00000000..07ecee7b --- /dev/null +++ b/modules/ui.py @@ -0,0 +1,1621 @@ +import datetime +import json +import mimetypes +import os +import sys +from functools import reduce +import warnings + +import gradio as gr +import gradio.utils +import numpy as np +from PIL import Image, PngImagePlugin # noqa: F401 +from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call + +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML +from modules.paths import script_path +from modules.ui_common import create_refresh_button +from modules.ui_gradio_extensions import reload_javascript + + +from modules.shared import opts, cmd_opts + +import modules.codeformer_model +import modules.generation_parameters_copypaste as parameters_copypaste +import modules.gfpgan_model +import modules.hypernetworks.ui +import modules.scripts +import modules.shared as shared +import modules.styles +import modules.textual_inversion.ui +from modules import prompt_parser +from modules.sd_hijack import model_hijack +from modules.sd_samplers import samplers, samplers_for_img2img +from modules.textual_inversion import textual_inversion +import modules.hypernetworks.ui +from modules.generation_parameters_copypaste import image_from_url_text +import modules.extras + +create_setting_component = ui_settings.create_setting_component + +warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) + +# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI +mimetypes.init() +mimetypes.add_type('application/javascript', '.js') + +if not cmd_opts.share and not cmd_opts.listen: + # fix gradio phoning home + gradio.utils.version_check = lambda: None + gradio.utils.get_local_ip_address = lambda: '127.0.0.1' + +if cmd_opts.ngrok is not None: + import modules.ngrok as ngrok + print('ngrok authtoken detected, trying to connect...') + ngrok.connect( + cmd_opts.ngrok, + cmd_opts.port if cmd_opts.port is not None else 7860, + cmd_opts.ngrok_options + ) + + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + + +sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" +sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None + +# Using constants for these since the variation selector isn't visible. +# Important that they exactly match script.js for tooltip to work. +random_symbol = '\U0001f3b2\ufe0f' # 🎲️ +reuse_symbol = '\u267b\ufe0f' # ♻️ +paste_symbol = '\u2199\ufe0f' # ↙ +refresh_symbol = '\U0001f504' # 🔄 +save_style_symbol = '\U0001f4be' # 💾 +apply_style_symbol = '\U0001f4cb' # 📋 +clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ +extra_networks_symbol = '\U0001F3B4' # 🎴 +switch_values_symbol = '\U000021C5' # ⇅ +restore_progress_symbol = '\U0001F300' # 🌀 +detect_image_size_symbol = '\U0001F4D0' # 📐 +up_down_symbol = '\u2195\ufe0f' # ↕️ + + +plaintext_to_html = ui_common.plaintext_to_html + + +def send_gradio_gallery_to_image(x): + if len(x) == 0: + return None + return image_from_url_text(x[0]) + + +def add_style(name: str, prompt: str, negative_prompt: str): + if name is None: + return [gr_show() for x in range(4)] + + style = modules.styles.PromptStyle(name, prompt, negative_prompt) + shared.prompt_styles.styles[style.name] = style + # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we + # reserialize all styles every time we save them + shared.prompt_styles.save_styles(shared.styles_filename) + + return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] + + +def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): + from modules import processing, devices + + if not enable: + return "" + + p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + + with devices.autocast(): + p.init([""], [0], [0]) + + return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" + + +def resize_from_to_html(width, height, scale_by): + target_width = int(width * scale_by) + target_height = int(height * scale_by) + + if not target_width or not target_height: + return "no image selected" + + return f"resize: from {width}x{height} to {target_width}x{target_height}" + + +def apply_styles(prompt, prompt_neg, styles): + prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) + prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) + + return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] + + +def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): + if mode in {0, 1, 3, 4}: + return [interrogation_function(ii_singles[mode]), None] + elif mode == 2: + return [interrogation_function(ii_singles[mode]["image"]), None] + elif mode == 5: + assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + images = shared.listfiles(ii_input_dir) + print(f"Will process {len(images)} images.") + if ii_output_dir != "": + os.makedirs(ii_output_dir, exist_ok=True) + else: + ii_output_dir = ii_input_dir + + for image in images: + img = Image.open(image) + filename = os.path.basename(image) + left, _ = os.path.splitext(filename) + print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) + + return [gr.update(), None] + + +def interrogate(image): + prompt = shared.interrogator.interrogate(image.convert("RGB")) + return gr.update() if prompt is None else prompt + + +def interrogate_deepbooru(image): + prompt = deepbooru.model.tag(image) + return gr.update() if prompt is None else prompt + + +def create_seed_inputs(target_interface): + with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + seed.style(container=False) + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') + reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') + + seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) + + # Components to show/hide based on the 'Extra' checkbox + seed_extras = [] + + with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: + seed_extras.append(seed_extra_row_1) + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") + subseed.style(container=False) + random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") + reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") + + with FormRow(visible=False) as seed_extra_row_2: + seed_extras.append(seed_extra_row_2) + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") + + random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) + random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) + + def change_visibility(show): + return {comp: gr_show(show) for comp in seed_extras} + + seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) + + return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox + + + +def connect_clear_prompt(button): + """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" + button.click( + _js="clear_prompt", + fn=None, + inputs=[], + outputs=[], + ) + + +def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): + """ Connects a 'reuse (sub)seed' button's click event so that it copies last used + (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength + was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" + def copy_seed(gen_info_string: str, index): + res = -1 + + try: + gen_info = json.loads(gen_info_string) + index -= gen_info.get('index_of_first_image', 0) + + if is_subseed and gen_info.get('subseed_strength', 0) > 0: + all_subseeds = gen_info.get('all_subseeds', [-1]) + res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] + else: + all_seeds = gen_info.get('all_seeds', [-1]) + res = all_seeds[index if 0 <= index < len(all_seeds) else 0] + + except json.decoder.JSONDecodeError: + if gen_info_string: + errors.report(f"Error parsing JSON generation info: {gen_info_string}") + + return [res, gr_show(False)] + + reuse_seed.click( + fn=copy_seed, + _js="(x, y) => [x, selected_gallery_index()]", + show_progress=False, + inputs=[generation_info, dummy_component], + outputs=[seed, dummy_component] + ) + + +def update_token_counter(text, steps): + try: + text, _ = extra_networks.parse_prompt(text) + + _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) + + except Exception: + # a parsing error can happen here during typing, and we don't want to bother the user with + # messages related to it in console + prompt_schedules = [[[steps, text]]] + + flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) + prompts = [prompt_text for step, prompt_text in flat_prompts] + token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) + return f"{token_count}/{max_length}" + + +def create_toprow(is_img2img): + id_part = "img2img" if is_img2img else "txt2img" + + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + button_interrogate = None + button_deepbooru = None + if is_img2img: + with gr.Column(scale=1, elem_classes="interrogate-col"): + button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + + with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): + with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): + interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") + skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") + submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + + skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) + + interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + with gr.Row(elem_id=f"{id_part}_tools"): + paste = ToolButton(value=paste_symbol, elem_id="paste") + clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") + extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") + prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") + save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") + restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) + + token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) + token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") + negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) + negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") + + clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[prompt, negative_prompt], + outputs=[prompt, negative_prompt], + ) + + with gr.Row(elem_id=f"{id_part}_styles_row"): + prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) + create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") + + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + + +def setup_progressbar(*args, **kwargs): + pass + + +def apply_setting(key, value): + if value is None: + return gr.update() + + if shared.cmd_opts.freeze_settings: + return gr.update() + + # dont allow model to be swapped when model hash exists in prompt + if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: + return gr.update() + + if key == "sd_model_checkpoint": + ckpt_info = sd_models.get_closet_checkpoint_match(value) + + if ckpt_info is not None: + value = ckpt_info.title + else: + return gr.update() + + comp_args = opts.data_labels[key].component_args + if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: + return + + valtype = type(opts.data_labels[key].default) + oldval = opts.data.get(key, None) + opts.data[key] = valtype(value) if valtype != type(None) else value + if oldval != value and opts.data_labels[key].onchange is not None: + opts.data_labels[key].onchange() + + opts.save(shared.config_filename) + return getattr(opts, key) + + +def create_output_panel(tabname, outdir): + return ui_common.create_output_panel(tabname, outdir) + + +def create_sampler_and_steps_selection(choices, tabname): + if opts.samplers_in_dropdown: + with FormRow(elem_id=f"sampler_selection_{tabname}"): + sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + else: + with FormGroup(elem_id=f"sampler_selection_{tabname}"): + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + + return steps, sampler_index + + +def ordered_ui_categories(): + user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} + + for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): + yield category + + +def create_override_settings_dropdown(tabname, row): + dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) + + dropdown.change( + fn=lambda x: gr.Dropdown.update(visible=bool(x)), + inputs=[dropdown], + outputs=[dropdown], + ) + + return dropdown + + +def create_ui(): + import modules.img2img + import modules.txt2img + + reload_javascript() + + parameters_copypaste.reset() + + modules.scripts.scripts_current = modules.scripts.scripts_txt2img + modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) + + with gr.Blocks(analytics_enabled=False) as txt2img_interface: + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + + dummy_component = gr.Label(visible=False) + txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact', elem_id="txt2img_settings"): + modules.scripts.scripts_txt2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + + with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "cfg": + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) + + elif category == "hires_fix": + with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") + + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="txt2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('txt2img', row) + + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + + else: + modules.scripts.scripts_txt2img.setup_ui_for_section(category) + + hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] + + for component in hr_resolution_preview_inputs: + event = component.release if isinstance(component, gr.Slider) else component.change + + event( + fn=calc_resolution_hires, + inputs=hr_resolution_preview_inputs, + outputs=[hr_final_resolution], + show_progress=False, + ) + event( + None, + _js="onCalcResolutionHires", + inputs=hr_resolution_preview_inputs, + outputs=[], + show_progress=False, + ) + + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + txt2img_args = dict( + fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), + _js="submit", + inputs=[ + dummy_component, + txt2img_prompt, + txt2img_negative_prompt, + txt2img_prompt_styles, + steps, + sampler_index, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + height, + width, + enable_hr, + denoising_strength, + hr_scale, + hr_upscaler, + hr_second_pass_steps, + hr_resize_x, + hr_resize_y, + hr_sampler_index, + hr_prompt, + hr_negative_prompt, + override_settings, + + ] + custom_inputs, + + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt2img_prompt.submit(**txt2img_args) + submit.click(**txt2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressTxt2img", + inputs=[dummy_component], + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + txt_prompt_img + ], + outputs=[ + txt2img_prompt, + txt_prompt_img + ], + show_progress=False, + ) + + enable_hr.change( + fn=lambda x: gr_show(x), + inputs=[enable_hr], + outputs=[hr_options], + show_progress = False, + ) + + txt2img_paste_fields = [ + (txt2img_prompt, "Prompt"), + (txt2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (enable_hr, lambda d: "Denoising strength" in d), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), + (hr_scale, "Hires upscale"), + (hr_upscaler, "Hires upscaler"), + (hr_second_pass_steps, "Hires steps"), + (hr_resize_x, "Hires resize-1"), + (hr_resize_y, "Hires resize-2"), + (hr_sampler_index, "Hires sampler"), + (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), + (hr_prompt, "Hires prompt"), + (hr_negative_prompt, "Hires negative prompt"), + (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), + *modules.scripts.scripts_txt2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, + )) + + txt2img_preview_params = [ + txt2img_prompt, + txt2img_negative_prompt, + steps, + sampler_index, + cfg_scale, + seed, + width, + height, + ] + + token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + + modules.scripts.scripts_current = modules.scripts.scripts_img2img + modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) + + with gr.Blocks(analytics_enabled=False) as img2img_interface: + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + + img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + + with FormRow().style(equal_height=False): + with gr.Column(variant='compact', elem_id="img2img_settings"): + copy_image_buttons = [] + copy_image_destinations = {} + + def add_copy_image_controls(tab_name, elem): + with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): + gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") + + for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): + if name == tab_name: + gr.Button(title, interactive=False) + copy_image_destinations[name] = elem + continue + + button = gr.Button(title) + copy_image_buttons.append((button, name, elem)) + + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) + + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('sketch', sketch) + + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('inpaint', init_img_with_mask) + + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state + + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") + + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], + ) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + + modules.scripts.scripts_img2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "cfg": + with FormGroup(): + with FormRow(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="img2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('img2img', row) + + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = modules.scripts.scripts_img2img.setup_ui() + + elif category == "inpaint": + with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: + with FormRow(): + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") + + with FormRow(): + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + + def select_img2img_tab(tab): + return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), + + for i, elem in enumerate(img2img_tabs): + elem.select( + fn=lambda tab=i: select_img2img_tab(tab), + inputs=[], + outputs=[inpaint_controls, mask_alpha], + ) + else: + modules.scripts.scripts_img2img.setup_ui_for_section(category) + + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + img2img_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + img2img_prompt_img + ], + outputs=[ + img2img_prompt, + img2img_prompt_img + ], + show_progress=False, + ) + + img2img_args = dict( + fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), + _js="submit_img2img", + inputs=[ + dummy_component, + dummy_component, + img2img_prompt, + img2img_negative_prompt, + img2img_prompt_styles, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + inpaint_color_sketch_orig, + init_img_inpaint, + init_mask_inpaint, + steps, + sampler_index, + mask_blur, + mask_alpha, + inpainting_fill, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + image_cfg_scale, + denoising_strength, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + selected_scale_tab, + height, + width, + scale_by, + resize_mode, + inpaint_full_res, + inpaint_full_res_padding, + inpainting_mask_invert, + img2img_batch_input_dir, + img2img_batch_output_dir, + img2img_batch_inpaint_mask_dir, + override_settings, + img2img_batch_use_png_info, + img2img_batch_png_info_props, + img2img_batch_png_info_dir, + ] + custom_inputs, + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + interrogate_args = dict( + _js="get_img2img_tab_index", + inputs=[ + dummy_component, + img2img_batch_input_dir, + img2img_batch_output_dir, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + init_img_inpaint, + ], + outputs=[img2img_prompt, dummy_component], + ) + + img2img_prompt.submit(**img2img_args) + submit.click(**img2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) + + detect_image_size_btn.click( + fn=lambda w, h, _: (w or gr.update(), h or gr.update()), + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, dummy_component], + outputs=[width, height], + show_progress=False, + ) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressImg2img", + inputs=[dummy_component], + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + img2img_interrogate.click( + fn=lambda *args: process_interrogate(interrogate, *args), + **interrogate_args, + ) + + img2img_deepbooru.click( + fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), + **interrogate_args, + ) + + prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] + style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] + style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] + + for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): + button.click( + fn=add_style, + _js="ask_for_style_name", + # Have to pass empty dummy component here, because the JavaScript and Python function have to accept + # the same number of parameters, but we only know the style-name after the JavaScript prompt + inputs=[dummy_component, prompt, negative_prompt], + outputs=[txt2img_prompt_styles, img2img_prompt_styles], + ) + + for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): + button.click( + fn=apply_styles, + _js=js_func, + inputs=[prompt, negative_prompt, styles], + outputs=[prompt, negative_prompt, styles], + ) + + token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) + + img2img_paste_fields = [ + (img2img_prompt, "Prompt"), + (img2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (image_cfg_scale, "Image CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (mask_blur, "Mask blur"), + *modules.scripts.scripts_img2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) + parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, + )) + + modules.scripts.scripts_current = None + + with gr.Blocks(analytics_enabled=False) as extras_interface: + ui_postprocessing.create_ui() + + with gr.Blocks(analytics_enabled=False) as pnginfo_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='panel'): + image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") + + with gr.Column(variant='panel'): + html = gr.HTML() + generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") + html2 = gr.HTML() + with gr.Row(): + buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) + + for tabname, button in buttons.items(): + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, + )) + + image.change( + fn=wrap_gradio_call(modules.extras.run_pnginfo), + inputs=[image], + outputs=[html, generation_info, html2], + ) + + def update_interp_description(value): + interp_description_css = "

{}

" + interp_descriptions = { + "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), + "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), + "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") + } + return interp_descriptions[value] + + with gr.Blocks(analytics_enabled=False) as modelmerger_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact'): + interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") + + with FormRow(elem_id="modelmerger_models"): + primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") + + secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") + + tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") + + custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") + interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) + + with FormRow(): + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") + + with FormRow(): + with gr.Column(): + config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") + + with gr.Column(): + with FormRow(): + bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") + create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") + + with FormRow(): + discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") + + with gr.Row(): + modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') + + with gr.Column(variant='compact', elem_id="modelmerger_results_container"): + with gr.Group(elem_id="modelmerger_results_panel"): + modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) + + with gr.Blocks(analytics_enabled=False) as train_interface: + with gr.Row().style(equal_height=False): + gr.HTML(value="

See wiki for detailed explanation.

") + + with gr.Row(variant="compact").style(equal_height=False): + with gr.Tabs(elem_id="train_tabs"): + + with gr.Tab(label="Create embedding", id="create_embedding"): + new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") + initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") + nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") + overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") + + with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): + new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") + new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") + new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") + new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") + new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") + new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") + overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") + + with gr.Tab(label="Preprocess images", id="preprocess_images"): + process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") + process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") + process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") + process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") + preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") + + with gr.Row(): + process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") + process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") + process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") + process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") + process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") + process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") + process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") + + with gr.Row(visible=False) as process_split_extra_row: + process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") + process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") + + with gr.Row(visible=False) as process_focal_crop_row: + process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") + process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") + process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") + process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") + + with gr.Column(visible=False) as process_multicrop_col: + gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') + with gr.Row(): + process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") + process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") + with gr.Row(): + process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") + process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") + with gr.Row(): + process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") + process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + with gr.Row(): + interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") + run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") + + process_split.change( + fn=lambda show: gr_show(show), + inputs=[process_split], + outputs=[process_split_extra_row], + ) + + process_focal_crop.change( + fn=lambda show: gr_show(show), + inputs=[process_focal_crop], + outputs=[process_focal_crop_row], + ) + + process_multicrop.change( + fn=lambda show: gr_show(show), + inputs=[process_multicrop], + outputs=[process_multicrop_col], + ) + + def get_textual_inversion_template_names(): + return sorted(textual_inversion.textual_inversion_templates) + + with gr.Tab(label="Train", id="train"): + gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") + with FormRow(): + train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") + + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") + + with FormRow(): + embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") + hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") + + with FormRow(): + clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) + clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) + + with FormRow(): + batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + + with FormRow(): + template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) + create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") + + training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") + training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") + varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") + steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + + with FormRow(): + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + + use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") + + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") + preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") + + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") + + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + + with gr.Row(): + train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") + interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") + + params = script_callbacks.UiTrainTabParams(txt2img_preview_params) + + script_callbacks.ui_train_tabs_callback(params) + + with gr.Column(elem_id='ti_gallery_container'): + ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.HTML(elem_id="ti_progress", value="") + ti_outcome = gr.HTML(elem_id="ti_error", value="") + + create_embedding.click( + fn=modules.textual_inversion.ui.create_embedding, + inputs=[ + new_embedding_name, + initialization_text, + nvpt, + overwrite_old_embedding, + ], + outputs=[ + train_embedding_name, + ti_output, + ti_outcome, + ] + ) + + create_hypernetwork.click( + fn=modules.hypernetworks.ui.create_hypernetwork, + inputs=[ + new_hypernetwork_name, + new_hypernetwork_sizes, + overwrite_old_hypernetwork, + new_hypernetwork_layer_structure, + new_hypernetwork_activation_func, + new_hypernetwork_initialization_option, + new_hypernetwork_add_layer_norm, + new_hypernetwork_use_dropout, + new_hypernetwork_dropout_structure + ], + outputs=[ + train_hypernetwork_name, + ti_output, + ti_outcome, + ] + ) + + run_preprocess.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + process_src, + process_dst, + process_width, + process_height, + preprocess_txt_action, + process_keep_original_size, + process_flip, + process_split, + process_caption, + process_caption_deepbooru, + process_split_threshold, + process_overlap_ratio, + process_focal_crop, + process_focal_crop_face_weight, + process_focal_crop_entropy_weight, + process_focal_crop_edges_weight, + process_focal_crop_debug, + process_multicrop, + process_multicrop_mindim, + process_multicrop_maxdim, + process_multicrop_minarea, + process_multicrop_maxarea, + process_multicrop_objective, + process_multicrop_threshold, + ], + outputs=[ + ti_output, + ti_outcome, + ], + ) + + train_embedding.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_embedding_name, + embedding_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + save_image_with_stored_embedding, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + train_hypernetwork.click( + fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_hypernetwork_name, + hypernetwork_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + interrupt_training.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + interrupt_preprocessing.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) + + settings = ui_settings.UiSettings() + settings.create_ui(loadsave, dummy_component) + + interfaces = [ + (txt2img_interface, "txt2img", "txt2img"), + (img2img_interface, "img2img", "img2img"), + (extras_interface, "Extras", "extras"), + (pnginfo_interface, "PNG Info", "pnginfo"), + (modelmerger_interface, "Checkpoint Merger", "modelmerger"), + (train_interface, "Train", "train"), + ] + + interfaces += script_callbacks.ui_tabs_callback() + interfaces += [(settings.interface, "Settings", "settings")] + + extensions_interface = ui_extensions.create_ui() + interfaces += [(extensions_interface, "Extensions", "extensions")] + + shared.tab_names = [] + for _interface, label, _ifid in interfaces: + shared.tab_names.append(label) + + with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: + settings.add_quicksettings() + + parameters_copypaste.connect_paste_params_buttons() + + with gr.Tabs(elem_id="tabs") as tabs: + tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} + sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) + + for interface, label, ifid in sorted_interfaces: + if label in shared.opts.hidden_tabs: + continue + with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): + interface.render() + + for interface, _label, ifid in interfaces: + if ifid in ["extensions", "settings"]: + continue + + loadsave.add_block(interface, ifid) + + loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) + + loadsave.setup_ui() + + if os.path.exists(os.path.join(script_path, "notification.mp3")): + gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + + footer = shared.html("footer.html") + footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") + gr.HTML(footer, elem_id="footer") + + settings.add_functionality(demo) + + update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") + settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + + def modelmerger(*args): + try: + results = modules.extras.run_modelmerger(*args) + except Exception as e: + errors.report("Error loading/saving model file", exc_info=True) + modules.sd_models.list_models() # to remove the potentially missing models from the list + return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] + return results + + modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) + modelmerger_merge.click( + fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), + _js='modelmerger', + inputs=[ + dummy_component, + primary_model_name, + secondary_model_name, + tertiary_model_name, + interp_method, + interp_amount, + save_as_half, + custom_name, + checkpoint_format, + config_source, + bake_in_vae, + discard_weights, + save_metadata, + ], + outputs=[ + primary_model_name, + secondary_model_name, + tertiary_model_name, + settings.component_dict['sd_model_checkpoint'], + modelmerger_result, + ] + ) + + loadsave.dump_defaults() + demo.ui_loadsave = loadsave + + # Required as a workaround for change() event not triggering when loading values from ui-config.json + interp_description.value = update_interp_description(interp_method.value) + + return demo + + +def versions_html(): + import torch + import launch + + python_version = ".".join([str(x) for x in sys.version_info[0:3]]) + commit = launch.commit_hash() + tag = launch.git_tag() + + if shared.xformers_available: + import xformers + xformers_version = xformers.__version__ + else: + xformers_version = "N/A" + + return f""" +version: {tag} + •  +python: {python_version} + •  +torch: {getattr(torch, '__long_version__',torch.__version__)} + •  +xformers: {xformers_version} + •  +gradio: {gr.__version__} + •  +checkpoint: N/A +""" + + +def setup_ui_api(app): + from pydantic import BaseModel, Field + from typing import List + + class QuicksettingsHint(BaseModel): + name: str = Field(title="Name of the quicksettings field") + label: str = Field(title="Label of the quicksettings field") + + def quicksettings_hint(): + return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] + + app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) + + app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) + + app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) + + def download_sysinfo(attachment=False): + from fastapi.responses import PlainTextResponse + + text = sysinfo.get() + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + + return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) + + app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) + app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) + diff --git a/temp b/temp deleted file mode 100644 index 07ecee7b..00000000 --- a/temp +++ /dev/null @@ -1,1621 +0,0 @@ -import datetime -import json -import mimetypes -import os -import sys -from functools import reduce -import warnings - -import gradio as gr -import gradio.utils -import numpy as np -from PIL import Image, PngImagePlugin # noqa: F401 -from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call - -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML -from modules.paths import script_path -from modules.ui_common import create_refresh_button -from modules.ui_gradio_extensions import reload_javascript - - -from modules.shared import opts, cmd_opts - -import modules.codeformer_model -import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts -import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui -from modules import prompt_parser -from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui -from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras - -create_setting_component = ui_settings.create_setting_component - -warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) - -# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI -mimetypes.init() -mimetypes.add_type('application/javascript', '.js') - -if not cmd_opts.share and not cmd_opts.listen: - # fix gradio phoning home - gradio.utils.version_check = lambda: None - gradio.utils.get_local_ip_address = lambda: '127.0.0.1' - -if cmd_opts.ngrok is not None: - import modules.ngrok as ngrok - print('ngrok authtoken detected, trying to connect...') - ngrok.connect( - cmd_opts.ngrok, - cmd_opts.port if cmd_opts.port is not None else 7860, - cmd_opts.ngrok_options - ) - - -def gr_show(visible=True): - return {"visible": visible, "__type__": "update"} - - -sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" -sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None - -# Using constants for these since the variation selector isn't visible. -# Important that they exactly match script.js for tooltip to work. -random_symbol = '\U0001f3b2\ufe0f' # 🎲️ -reuse_symbol = '\u267b\ufe0f' # ♻️ -paste_symbol = '\u2199\ufe0f' # ↙ -refresh_symbol = '\U0001f504' # 🔄 -save_style_symbol = '\U0001f4be' # 💾 -apply_style_symbol = '\U0001f4cb' # 📋 -clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ -extra_networks_symbol = '\U0001F3B4' # 🎴 -switch_values_symbol = '\U000021C5' # ⇅ -restore_progress_symbol = '\U0001F300' # 🌀 -detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ - - -plaintext_to_html = ui_common.plaintext_to_html - - -def send_gradio_gallery_to_image(x): - if len(x) == 0: - return None - return image_from_url_text(x[0]) - - -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - -def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - - if not enable: - return "" - - p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) - - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" - - -def resize_from_to_html(width, height, scale_by): - target_width = int(width * scale_by) - target_height = int(height * scale_by) - - if not target_width or not target_height: - return "no image selected" - - return f"resize: from {width}x{height} to {target_width}x{target_height}" - - -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - -def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): - if mode in {0, 1, 3, 4}: - return [interrogation_function(ii_singles[mode]), None] - elif mode == 2: - return [interrogation_function(ii_singles[mode]["image"]), None] - elif mode == 5: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - images = shared.listfiles(ii_input_dir) - print(f"Will process {len(images)} images.") - if ii_output_dir != "": - os.makedirs(ii_output_dir, exist_ok=True) - else: - ii_output_dir = ii_input_dir - - for image in images: - img = Image.open(image) - filename = os.path.basename(image) - left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) - - return [gr.update(), None] - - -def interrogate(image): - prompt = shared.interrogator.interrogate(image.convert("RGB")) - return gr.update() if prompt is None else prompt - - -def interrogate_deepbooru(image): - prompt = deepbooru.model.tag(image) - return gr.update() if prompt is None else prompt - - -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - -def connect_clear_prompt(button): - """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" - button.click( - _js="clear_prompt", - fn=None, - inputs=[], - outputs=[], - ) - - -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - -def update_token_counter(text, steps): - try: - text, _ = extra_networks.parse_prompt(text) - - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) - prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) - - except Exception: - # a parsing error can happen here during typing, and we don't want to bother the user with - # messages related to it in console - prompt_schedules = [[[steps, text]]] - - flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step, prompt_text in flat_prompts] - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) - return f"{token_count}/{max_length}" - - -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) - - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button - - -def setup_progressbar(*args, **kwargs): - pass - - -def apply_setting(key, value): - if value is None: - return gr.update() - - if shared.cmd_opts.freeze_settings: - return gr.update() - - # dont allow model to be swapped when model hash exists in prompt - if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: - return gr.update() - - if key == "sd_model_checkpoint": - ckpt_info = sd_models.get_closet_checkpoint_match(value) - - if ckpt_info is not None: - value = ckpt_info.title - else: - return gr.update() - - comp_args = opts.data_labels[key].component_args - if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: - return - - valtype = type(opts.data_labels[key].default) - oldval = opts.data.get(key, None) - opts.data[key] = valtype(value) if valtype != type(None) else value - if oldval != value and opts.data_labels[key].onchange is not None: - opts.data_labels[key].onchange() - - opts.save(shared.config_filename) - return getattr(opts, key) - - -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) - - -def create_sampler_and_steps_selection(choices, tabname): - if opts.samplers_in_dropdown: - with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - else: - with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - - return steps, sampler_index - - -def ordered_ui_categories(): - user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} - - for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): - yield category - - -def create_override_settings_dropdown(tabname, row): - dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) - - dropdown.change( - fn=lambda x: gr.Dropdown.update(visible=bool(x)), - inputs=[dropdown], - outputs=[dropdown], - ) - - return dropdown - - -def create_ui(): - import modules.img2img - import modules.txt2img - - reload_javascript() - - parameters_copypaste.reset() - - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) - - with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) - - dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') - - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) - - elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") - - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) - - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - - else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) - - hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] - - for component in hr_resolution_preview_inputs: - event = component.release if isinstance(component, gr.Slider) else component.change - - event( - fn=calc_resolution_hires, - inputs=hr_resolution_preview_inputs, - outputs=[hr_final_resolution], - show_progress=False, - ) - event( - None, - _js="onCalcResolutionHires", - inputs=hr_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), - _js="submit", - inputs=[ - dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, - steps, - sampler_index, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - height, - width, - enable_hr, - denoising_strength, - hr_scale, - hr_upscaler, - hr_second_pass_steps, - hr_resize_x, - hr_resize_y, - hr_sampler_index, - hr_prompt, - hr_negative_prompt, - override_settings, - - ] + custom_inputs, - - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressTxt2img", - inputs=[dummy_component], - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - txt2img_prompt, - txt_prompt_img - ], - show_progress=False, - ) - - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - - txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (hr_scale, "Hires upscale"), - (hr_upscaler, "Hires upscaler"), - (hr_second_pass_steps, "Hires steps"), - (hr_resize_x, "Hires resize-1"), - (hr_resize_y, "Hires resize-2"), - (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), - (hr_prompt, "Hires prompt"), - (hr_negative_prompt, "Hires negative prompt"), - (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, - )) - - txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, - steps, - sampler_index, - cfg_scale, - seed, - width, - height, - ] - - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) - - with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) - - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') - - with FormRow().style(equal_height=False): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - modules.scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) - - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() - - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) - - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - img2img_prompt, - img2img_prompt_img - ], - show_progress=False, - ) - - img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), - _js="submit_img2img", - inputs=[ - dummy_component, - dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - inpaint_color_sketch_orig, - init_img_inpaint, - init_mask_inpaint, - steps, - sampler_index, - mask_blur, - mask_alpha, - inpainting_fill, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - image_cfg_scale, - denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - selected_scale_tab, - height, - width, - scale_by, - resize_mode, - inpaint_full_res, - inpaint_full_res_padding, - inpainting_mask_invert, - img2img_batch_input_dir, - img2img_batch_output_dir, - img2img_batch_inpaint_mask_dir, - override_settings, - img2img_batch_use_png_info, - img2img_batch_png_info_props, - img2img_batch_png_info_dir, - ] + custom_inputs, - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - interrogate_args = dict( - _js="get_img2img_tab_index", - inputs=[ - dummy_component, - img2img_batch_input_dir, - img2img_batch_output_dir, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - init_img_inpaint, - ], - outputs=[img2img_prompt, dummy_component], - ) - - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) - - detect_image_size_btn.click( - fn=lambda w, h, _: (w or gr.update(), h or gr.update()), - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, dummy_component], - outputs=[width, height], - show_progress=False, - ) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressImg2img", - inputs=[dummy_component], - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - img2img_interrogate.click( - fn=lambda *args: process_interrogate(interrogate, *args), - **interrogate_args, - ) - - img2img_deepbooru.click( - fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), - **interrogate_args, - ) - - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - - img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, - )) - - modules.scripts.scripts_current = None - - with gr.Blocks(analytics_enabled=False) as extras_interface: - ui_postprocessing.create_ui() - - with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='panel'): - image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") - - with gr.Column(variant='panel'): - html = gr.HTML() - generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") - html2 = gr.HTML() - with gr.Row(): - buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) - - for tabname, button in buttons.items(): - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, - )) - - image.change( - fn=wrap_gradio_call(modules.extras.run_pnginfo), - inputs=[image], - outputs=[html, generation_info, html2], - ) - - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - - with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): - gr.HTML(value="

See wiki for detailed explanation.

") - - with gr.Row(variant="compact").style(equal_height=False): - with gr.Tabs(elem_id="train_tabs"): - - with gr.Tab(label="Create embedding", id="create_embedding"): - new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") - initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - - with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") - new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - - def get_textual_inversion_template_names(): - return sorted(textual_inversion.textual_inversion_templates) - - with gr.Tab(label="Train", id="train"): - gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") - with FormRow(): - train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) - create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") - - with FormRow(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - - with FormRow(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) - - with FormRow(): - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") - - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") - - with FormRow(): - template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) - create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") - - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") - varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") - steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") - - with FormRow(): - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") - - use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") - - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") - - with gr.Row(): - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") - interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - - params = script_callbacks.UiTrainTabParams(txt2img_preview_params) - - script_callbacks.ui_train_tabs_callback(params) - - with gr.Column(elem_id='ti_gallery_container'): - ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - gr.HTML(elem_id="ti_progress", value="") - ti_outcome = gr.HTML(elem_id="ti_error", value="") - - create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, - inputs=[ - new_embedding_name, - initialization_text, - nvpt, - overwrite_old_embedding, - ], - outputs=[ - train_embedding_name, - ti_output, - ti_outcome, - ] - ) - - create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, - inputs=[ - new_hypernetwork_name, - new_hypernetwork_sizes, - overwrite_old_hypernetwork, - new_hypernetwork_layer_structure, - new_hypernetwork_activation_func, - new_hypernetwork_initialization_option, - new_hypernetwork_add_layer_norm, - new_hypernetwork_use_dropout, - new_hypernetwork_dropout_structure - ], - outputs=[ - train_hypernetwork_name, - ti_output, - ti_outcome, - ] - ) - - run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - - train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_embedding_name, - embedding_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - save_image_with_stored_embedding, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_hypernetwork_name, - hypernetwork_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - interrupt_training.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) - - settings = ui_settings.UiSettings() - settings.create_ui(loadsave, dummy_component) - - interfaces = [ - (txt2img_interface, "txt2img", "txt2img"), - (img2img_interface, "img2img", "img2img"), - (extras_interface, "Extras", "extras"), - (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "train"), - ] - - interfaces += script_callbacks.ui_tabs_callback() - interfaces += [(settings.interface, "Settings", "settings")] - - extensions_interface = ui_extensions.create_ui() - interfaces += [(extensions_interface, "Extensions", "extensions")] - - shared.tab_names = [] - for _interface, label, _ifid in interfaces: - shared.tab_names.append(label) - - with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: - settings.add_quicksettings() - - parameters_copypaste.connect_paste_params_buttons() - - with gr.Tabs(elem_id="tabs") as tabs: - tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} - sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) - - for interface, label, ifid in sorted_interfaces: - if label in shared.opts.hidden_tabs: - continue - with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): - interface.render() - - for interface, _label, ifid in interfaces: - if ifid in ["extensions", "settings"]: - continue - - loadsave.add_block(interface, ifid) - - loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) - - loadsave.setup_ui() - - if os.path.exists(os.path.join(script_path, "notification.mp3")): - gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) - - footer = shared.html("footer.html") - footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") - gr.HTML(footer, elem_id="footer") - - settings.add_functionality(demo) - - update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), - _js='modelmerger', - inputs=[ - dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, - ], - outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, - ] - ) - - loadsave.dump_defaults() - demo.ui_loadsave = loadsave - - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - - return demo - - -def versions_html(): - import torch - import launch - - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - commit = launch.commit_hash() - tag = launch.git_tag() - - if shared.xformers_available: - import xformers - xformers_version = xformers.__version__ - else: - xformers_version = "N/A" - - return f""" -version: {tag} - •  -python: {python_version} - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -xformers: {xformers_version} - •  -gradio: {gr.__version__} - •  -checkpoint: N/A -""" - - -def setup_ui_api(app): - from pydantic import BaseModel, Field - from typing import List - - class QuicksettingsHint(BaseModel): - name: str = Field(title="Name of the quicksettings field") - label: str = Field(title="Label of the quicksettings field") - - def quicksettings_hint(): - return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] - - app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) - - app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) - - app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) - - def download_sysinfo(attachment=False): - from fastapi.responses import PlainTextResponse - - text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" - - return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) - - app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) - app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) - -- cgit v1.2.3 From 6d3a0c950626e887f20bfc9946b84f9685303bab Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:43:43 +0300 Subject: move checkpoint merger UI to its own file --- modules/ui.py | 97 +-- modules/ui_checkpoint_merger.py | 1657 ++------------------------------------- 2 files changed, 76 insertions(+), 1678 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 07ecee7b..ac2787eb 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,7 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -1083,58 +1083,7 @@ def create_ui(): outputs=[html, generation_info, html2], ) - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) + modelmerger_ui = ui_checkpoint_merger.UiCheckpointMerger() with gr.Blocks(analytics_enabled=False) as train_interface: with gr.Row().style(equal_height=False): @@ -1464,7 +1413,7 @@ def create_ui(): (img2img_interface, "img2img", "img2img"), (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), + (modelmerger_ui.blocks, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "train"), ] @@ -1516,49 +1465,11 @@ def create_ui(): settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), - _js='modelmerger', - inputs=[ - dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, - ], - outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, - ] - ) + modelmerger_ui.setup_ui(dummy_component=dummy_component, sd_model_checkpoint_component=settings.component_dict['sd_model_checkpoint']) loadsave.dump_defaults() demo.ui_loadsave = loadsave - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - return demo diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py index 07ecee7b..8e72258a 100644 --- a/modules/ui_checkpoint_merger.py +++ b/modules/ui_checkpoint_merger.py @@ -1,1621 +1,108 @@ -import datetime -import json -import mimetypes -import os -import sys -from functools import reduce -import warnings import gradio as gr -import gradio.utils -import numpy as np -from PIL import Image, PngImagePlugin # noqa: F401 -from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML -from modules.paths import script_path +from modules import sd_models, sd_vae, errors, extras, call_queue +from modules.ui_components import FormRow from modules.ui_common import create_refresh_button -from modules.ui_gradio_extensions import reload_javascript -from modules.shared import opts, cmd_opts +def update_interp_description(value): + interp_description_css = "

{}

" + interp_descriptions = { + "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), + "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), + "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") + } + return interp_descriptions[value] -import modules.codeformer_model -import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts -import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui -from modules import prompt_parser -from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui -from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras -create_setting_component = ui_settings.create_setting_component - -warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) - -# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI -mimetypes.init() -mimetypes.add_type('application/javascript', '.js') - -if not cmd_opts.share and not cmd_opts.listen: - # fix gradio phoning home - gradio.utils.version_check = lambda: None - gradio.utils.get_local_ip_address = lambda: '127.0.0.1' - -if cmd_opts.ngrok is not None: - import modules.ngrok as ngrok - print('ngrok authtoken detected, trying to connect...') - ngrok.connect( - cmd_opts.ngrok, - cmd_opts.port if cmd_opts.port is not None else 7860, - cmd_opts.ngrok_options - ) - - -def gr_show(visible=True): - return {"visible": visible, "__type__": "update"} - - -sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" -sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None - -# Using constants for these since the variation selector isn't visible. -# Important that they exactly match script.js for tooltip to work. -random_symbol = '\U0001f3b2\ufe0f' # 🎲️ -reuse_symbol = '\u267b\ufe0f' # ♻️ -paste_symbol = '\u2199\ufe0f' # ↙ -refresh_symbol = '\U0001f504' # 🔄 -save_style_symbol = '\U0001f4be' # 💾 -apply_style_symbol = '\U0001f4cb' # 📋 -clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ -extra_networks_symbol = '\U0001F3B4' # 🎴 -switch_values_symbol = '\U000021C5' # ⇅ -restore_progress_symbol = '\U0001F300' # 🌀 -detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ - - -plaintext_to_html = ui_common.plaintext_to_html - - -def send_gradio_gallery_to_image(x): - if len(x) == 0: - return None - return image_from_url_text(x[0]) - - -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - -def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - - if not enable: - return "" - - p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) - - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" - - -def resize_from_to_html(width, height, scale_by): - target_width = int(width * scale_by) - target_height = int(height * scale_by) - - if not target_width or not target_height: - return "no image selected" - - return f"resize: from {width}x{height} to {target_width}x{target_height}" - - -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - -def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): - if mode in {0, 1, 3, 4}: - return [interrogation_function(ii_singles[mode]), None] - elif mode == 2: - return [interrogation_function(ii_singles[mode]["image"]), None] - elif mode == 5: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - images = shared.listfiles(ii_input_dir) - print(f"Will process {len(images)} images.") - if ii_output_dir != "": - os.makedirs(ii_output_dir, exist_ok=True) - else: - ii_output_dir = ii_input_dir - - for image in images: - img = Image.open(image) - filename = os.path.basename(image) - left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) - - return [gr.update(), None] - - -def interrogate(image): - prompt = shared.interrogator.interrogate(image.convert("RGB")) - return gr.update() if prompt is None else prompt - - -def interrogate_deepbooru(image): - prompt = deepbooru.model.tag(image) - return gr.update() if prompt is None else prompt - - -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - -def connect_clear_prompt(button): - """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" - button.click( - _js="clear_prompt", - fn=None, - inputs=[], - outputs=[], - ) - - -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - -def update_token_counter(text, steps): +def modelmerger(*args): try: - text, _ = extra_networks.parse_prompt(text) - - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) - prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) - - except Exception: - # a parsing error can happen here during typing, and we don't want to bother the user with - # messages related to it in console - prompt_schedules = [[[steps, text]]] - - flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step, prompt_text in flat_prompts] - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) - return f"{token_count}/{max_length}" - - -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) - - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button - - -def setup_progressbar(*args, **kwargs): - pass - - -def apply_setting(key, value): - if value is None: - return gr.update() - - if shared.cmd_opts.freeze_settings: - return gr.update() - - # dont allow model to be swapped when model hash exists in prompt - if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: - return gr.update() - - if key == "sd_model_checkpoint": - ckpt_info = sd_models.get_closet_checkpoint_match(value) - - if ckpt_info is not None: - value = ckpt_info.title - else: - return gr.update() - - comp_args = opts.data_labels[key].component_args - if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: - return - - valtype = type(opts.data_labels[key].default) - oldval = opts.data.get(key, None) - opts.data[key] = valtype(value) if valtype != type(None) else value - if oldval != value and opts.data_labels[key].onchange is not None: - opts.data_labels[key].onchange() - - opts.save(shared.config_filename) - return getattr(opts, key) - - -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) - - -def create_sampler_and_steps_selection(choices, tabname): - if opts.samplers_in_dropdown: - with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - else: - with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - - return steps, sampler_index - - -def ordered_ui_categories(): - user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} - - for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): - yield category - - -def create_override_settings_dropdown(tabname, row): - dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) - - dropdown.change( - fn=lambda x: gr.Dropdown.update(visible=bool(x)), - inputs=[dropdown], - outputs=[dropdown], - ) - - return dropdown - - -def create_ui(): - import modules.img2img - import modules.txt2img - - reload_javascript() - - parameters_copypaste.reset() - - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) - - with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) - - dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') - - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) - - elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") - - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) - - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - - else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) - - hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] - - for component in hr_resolution_preview_inputs: - event = component.release if isinstance(component, gr.Slider) else component.change - - event( - fn=calc_resolution_hires, - inputs=hr_resolution_preview_inputs, - outputs=[hr_final_resolution], - show_progress=False, - ) - event( - None, - _js="onCalcResolutionHires", - inputs=hr_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), - _js="submit", - inputs=[ - dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, - steps, - sampler_index, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - height, - width, - enable_hr, - denoising_strength, - hr_scale, - hr_upscaler, - hr_second_pass_steps, - hr_resize_x, - hr_resize_y, - hr_sampler_index, - hr_prompt, - hr_negative_prompt, - override_settings, - - ] + custom_inputs, - - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressTxt2img", - inputs=[dummy_component], - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - txt2img_prompt, - txt_prompt_img - ], - show_progress=False, - ) - - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - - txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (hr_scale, "Hires upscale"), - (hr_upscaler, "Hires upscaler"), - (hr_second_pass_steps, "Hires steps"), - (hr_resize_x, "Hires resize-1"), - (hr_resize_y, "Hires resize-2"), - (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), - (hr_prompt, "Hires prompt"), - (hr_negative_prompt, "Hires negative prompt"), - (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, - )) - - txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, - steps, - sampler_index, - cfg_scale, - seed, - width, - height, - ] - - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) - - with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) - - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') - - with FormRow().style(equal_height=False): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - modules.scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") + results = extras.run_modelmerger(*args) + except Exception as e: + errors.report("Error loading/saving model file", exc_info=True) + sd_models.list_models() # to remove the potentially missing models from the list + return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] + return results - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") +class UiCheckpointMerger: + def __init__(self): + with gr.Blocks(analytics_enabled=False) as modelmerger_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact'): + self.interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) + with FormRow(elem_id="modelmerger_models"): + self.primary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(self.primary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) + self.secondary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(self.secondary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + self.tertiary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(self.tertiary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) - - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() - - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) - - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - img2img_prompt, - img2img_prompt_img - ], - show_progress=False, - ) - - img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), - _js="submit_img2img", - inputs=[ - dummy_component, - dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - inpaint_color_sketch_orig, - init_img_inpaint, - init_mask_inpaint, - steps, - sampler_index, - mask_blur, - mask_alpha, - inpainting_fill, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - image_cfg_scale, - denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - selected_scale_tab, - height, - width, - scale_by, - resize_mode, - inpaint_full_res, - inpaint_full_res_padding, - inpainting_mask_invert, - img2img_batch_input_dir, - img2img_batch_output_dir, - img2img_batch_inpaint_mask_dir, - override_settings, - img2img_batch_use_png_info, - img2img_batch_png_info_props, - img2img_batch_png_info_dir, - ] + custom_inputs, - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - interrogate_args = dict( - _js="get_img2img_tab_index", - inputs=[ - dummy_component, - img2img_batch_input_dir, - img2img_batch_output_dir, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - init_img_inpaint, - ], - outputs=[img2img_prompt, dummy_component], - ) - - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) - - detect_image_size_btn.click( - fn=lambda w, h, _: (w or gr.update(), h or gr.update()), - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, dummy_component], - outputs=[width, height], - show_progress=False, - ) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressImg2img", - inputs=[dummy_component], - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - img2img_interrogate.click( - fn=lambda *args: process_interrogate(interrogate, *args), - **interrogate_args, - ) - - img2img_deepbooru.click( - fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), - **interrogate_args, - ) - - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - - img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, - )) - - modules.scripts.scripts_current = None - - with gr.Blocks(analytics_enabled=False) as extras_interface: - ui_postprocessing.create_ui() - - with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='panel'): - image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") - - with gr.Column(variant='panel'): - html = gr.HTML() - generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") - html2 = gr.HTML() - with gr.Row(): - buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) - - for tabname, button in buttons.items(): - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, - )) - - image.change( - fn=wrap_gradio_call(modules.extras.run_pnginfo), - inputs=[image], - outputs=[html, generation_info, html2], - ) - - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - - with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): - gr.HTML(value="

See wiki for detailed explanation.

") - - with gr.Row(variant="compact").style(equal_height=False): - with gr.Tabs(elem_id="train_tabs"): - - with gr.Tab(label="Create embedding", id="create_embedding"): - new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") - initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - - with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") - new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - - def get_textual_inversion_template_names(): - return sorted(textual_inversion.textual_inversion_templates) - - with gr.Tab(label="Train", id="train"): - gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") - with FormRow(): - train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) - create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") - - with FormRow(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - - with FormRow(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) + self.custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + self.interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + self.interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") + self.interp_method.change(fn=update_interp_description, inputs=[self.interp_method], outputs=[self.interp_description]) with FormRow(): - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") - - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + self.checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + self.save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + self.save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") with FormRow(): - template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) - create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") + with gr.Column(): + self.config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") - varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") - steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + with gr.Column(): + with FormRow(): + self.bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") + create_refresh_button(self.bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") with FormRow(): - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") - - use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") - - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + self.discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") with gr.Row(): - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") - interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - - params = script_callbacks.UiTrainTabParams(txt2img_preview_params) - - script_callbacks.ui_train_tabs_callback(params) - - with gr.Column(elem_id='ti_gallery_container'): - ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - gr.HTML(elem_id="ti_progress", value="") - ti_outcome = gr.HTML(elem_id="ti_error", value="") - - create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, - inputs=[ - new_embedding_name, - initialization_text, - nvpt, - overwrite_old_embedding, - ], - outputs=[ - train_embedding_name, - ti_output, - ti_outcome, - ] - ) - - create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, - inputs=[ - new_hypernetwork_name, - new_hypernetwork_sizes, - overwrite_old_hypernetwork, - new_hypernetwork_layer_structure, - new_hypernetwork_activation_func, - new_hypernetwork_initialization_option, - new_hypernetwork_add_layer_norm, - new_hypernetwork_use_dropout, - new_hypernetwork_dropout_structure - ], - outputs=[ - train_hypernetwork_name, - ti_output, - ti_outcome, - ] - ) - - run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - - train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_embedding_name, - embedding_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - save_image_with_stored_embedding, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_hypernetwork_name, - hypernetwork_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - interrupt_training.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) - - settings = ui_settings.UiSettings() - settings.create_ui(loadsave, dummy_component) - - interfaces = [ - (txt2img_interface, "txt2img", "txt2img"), - (img2img_interface, "img2img", "img2img"), - (extras_interface, "Extras", "extras"), - (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "train"), - ] - - interfaces += script_callbacks.ui_tabs_callback() - interfaces += [(settings.interface, "Settings", "settings")] - - extensions_interface = ui_extensions.create_ui() - interfaces += [(extensions_interface, "Extensions", "extensions")] - - shared.tab_names = [] - for _interface, label, _ifid in interfaces: - shared.tab_names.append(label) - - with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: - settings.add_quicksettings() - - parameters_copypaste.connect_paste_params_buttons() - - with gr.Tabs(elem_id="tabs") as tabs: - tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} - sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) - - for interface, label, ifid in sorted_interfaces: - if label in shared.opts.hidden_tabs: - continue - with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): - interface.render() - - for interface, _label, ifid in interfaces: - if ifid in ["extensions", "settings"]: - continue - - loadsave.add_block(interface, ifid) - - loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) - - loadsave.setup_ui() + self.modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - if os.path.exists(os.path.join(script_path, "notification.mp3")): - gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + with gr.Column(variant='compact', elem_id="modelmerger_results_container"): + with gr.Group(elem_id="modelmerger_results_panel"): + self.modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - footer = shared.html("footer.html") - footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") - gr.HTML(footer, elem_id="footer") + self.blocks = modelmerger_interface - settings.add_functionality(demo) - - update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), + def setup_ui(self, dummy_component, sd_model_checkpoint_component): + self.modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[self.modelmerger_result]) + self.modelmerger_merge.click( + fn=call_queue.wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), _js='modelmerger', inputs=[ dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, + self.primary_model_name, + self.secondary_model_name, + self.tertiary_model_name, + self.interp_method, + self.interp_amount, + self.save_as_half, + self.custom_name, + self.checkpoint_format, + self.config_source, + self.bake_in_vae, + self.discard_weights, + self.save_metadata, ], outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, + self.primary_model_name, + self.secondary_model_name, + self.tertiary_model_name, + sd_model_checkpoint_component, + self.modelmerger_result, ] ) - loadsave.dump_defaults() - demo.ui_loadsave = loadsave - - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - - return demo - - -def versions_html(): - import torch - import launch - - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - commit = launch.commit_hash() - tag = launch.git_tag() - - if shared.xformers_available: - import xformers - xformers_version = xformers.__version__ - else: - xformers_version = "N/A" - - return f""" -version: {tag} - •  -python: {python_version} - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -xformers: {xformers_version} - •  -gradio: {gr.__version__} - •  -checkpoint: N/A -""" - - -def setup_ui_api(app): - from pydantic import BaseModel, Field - from typing import List - - class QuicksettingsHint(BaseModel): - name: str = Field(title="Name of the quicksettings field") - label: str = Field(title="Label of the quicksettings field") - - def quicksettings_hint(): - return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] - - app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) - - app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) - - app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) - - def download_sysinfo(attachment=False): - from fastapi.responses import PlainTextResponse - - text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" - - return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) - - app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) - app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) + # Required as a workaround for change() event not triggering when loading values from ui-config.json + self.interp_description.value = update_interp_description(self.interp_method.value) -- cgit v1.2.3 From 20549a50cb3c41868ce561c6658bfaa0d20ac7ba Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 22:46:57 +0300 Subject: add style editor dialog rework toprow for img2img and txt2img to use a class with fields fix the console error when editing checkpoint user metadata --- modules/sd_models.py | 2 +- modules/styles.py | 5 +- modules/ui.py | 230 ++++++++++--------------- modules/ui_common.py | 32 +++- modules/ui_extra_networks_checkpoints.py | 2 +- modules/ui_extra_networks_hypernets.py | 2 +- modules/ui_extra_networks_textual_inversion.py | 2 +- modules/ui_prompt_styles.py | 110 ++++++++++++ style.css | 13 ++ 9 files changed, 248 insertions(+), 150 deletions(-) create mode 100644 modules/ui_prompt_styles.py (limited to 'modules/ui.py') diff --git a/modules/sd_models.py b/modules/sd_models.py index 8f72f21d..1d93d893 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -68,7 +68,7 @@ class CheckpointInfo: self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]' - self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) + self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) def register(self): checkpoints_list[self.title] = self diff --git a/modules/styles.py b/modules/styles.py index ec0e1bc5..0740fe1b 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -106,10 +106,7 @@ class StyleDatabase: if os.path.exists(path): shutil.copy(path, f"{path}.bak") - fd = os.open(path, os.O_RDWR | os.O_CREAT) - with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: - # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, - # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() + with open(path, "w", encoding="utf-8-sig", newline='') as file: writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) writer.writeheader() writer.writerows(style._asdict() for k, style in self.styles.items()) diff --git a/modules/ui.py b/modules/ui.py index ac2787eb..c059dcec 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,7 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -92,19 +92,6 @@ def send_gradio_gallery_to_image(x): return image_from_url_text(x[0]) -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): from modules import processing, devices @@ -129,13 +116,6 @@ def resize_from_to_html(width, height, scale_by): return f"resize: from {width}x{height} to {target_width}x{target_height}" -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): if mode in {0, 1, 3, 4}: return [interrogation_function(ii_singles[mode]), None] @@ -267,71 +247,67 @@ def update_token_counter(text, steps): return f"{token_count}/{max_length}" -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" +class Toprow: + def __init__(self, is_img2img): + id_part = "img2img" if is_img2img else "txt2img" + self.id_part = id_part - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + self.prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + self.button_interrogate = None + self.button_deepbooru = None + if is_img2img: + with gr.Column(scale=1, elem_classes="interrogate-col"): + self.button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + self.button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + + with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): + with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): + self.interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") + self.skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") + self.submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + + self.skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) + self.interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") + with gr.Row(elem_id=f"{id_part}_tools"): + self.paste = ToolButton(value=paste_symbol, elem_id="paste") + self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") + self.extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") + self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) + + self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) + self.token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") + self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) + self.negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") + + self.clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[self.prompt, self.negative_prompt], + outputs=[self.prompt, self.negative_prompt], + ) - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt) def setup_progressbar(*args, **kwargs): @@ -419,14 +395,14 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + toprow = txt2img_toprow = Toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + extra_networks_ui = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'txt2img') with gr.Row().style(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): @@ -532,9 +508,9 @@ def create_ui(): _js="submit", inputs=[ dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, + toprow.prompt, + toprow.negative_prompt, + toprow.ui_styles.dropdown, steps, sampler_index, restore_faces, @@ -569,12 +545,12 @@ def create_ui(): show_progress=False, ) - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) + toprow.prompt.submit(**txt2img_args) + toprow.submit.click(**txt2img_args) res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - restore_progress_button.click( + toprow.restore_progress_button.click( fn=progress.restore_progress, _js="restoreProgressTxt2img", inputs=[dummy_component], @@ -593,7 +569,7 @@ def create_ui(): txt_prompt_img ], outputs=[ - txt2img_prompt, + toprow.prompt, txt_prompt_img ], show_progress=False, @@ -607,8 +583,8 @@ def create_ui(): ) txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), + (toprow.prompt, "Prompt"), + (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_index, "Sampler"), (restore_faces, "Face restoration"), @@ -621,7 +597,7 @@ def create_ui(): (subseed_strength, "Variation seed strength"), (seed_resize_from_w, "Seed resize from-1"), (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), @@ -639,12 +615,12 @@ def create_ui(): ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, + paste_button=toprow.paste, tabname="txt2img", source_text_component=toprow.prompt, source_image_component=None, )) txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, + toprow.prompt, + toprow.negative_prompt, steps, sampler_index, cfg_scale, @@ -653,8 +629,8 @@ def create_ui(): height, ] - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) + toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) @@ -662,13 +638,13 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + toprow = img2img_toprow = Toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'img2img') with FormRow().style(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): @@ -889,7 +865,7 @@ def create_ui(): img2img_prompt_img ], outputs=[ - img2img_prompt, + toprow.prompt, img2img_prompt_img ], show_progress=False, @@ -901,9 +877,9 @@ def create_ui(): inputs=[ dummy_component, dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, + toprow.prompt, + toprow.negative_prompt, + toprow.ui_styles.dropdown, init_img, sketch, init_img_with_mask, @@ -962,11 +938,11 @@ def create_ui(): inpaint_color_sketch, init_img_inpaint, ], - outputs=[img2img_prompt, dummy_component], + outputs=[toprow.prompt, dummy_component], ) - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) + toprow.prompt.submit(**img2img_args) + toprow.submit.click(**img2img_args) res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) @@ -978,7 +954,7 @@ def create_ui(): show_progress=False, ) - restore_progress_button.click( + toprow.restore_progress_button.click( fn=progress.restore_progress, _js="restoreProgressImg2img", inputs=[dummy_component], @@ -991,46 +967,24 @@ def create_ui(): show_progress=False, ) - img2img_interrogate.click( + toprow.button_interrogate.click( fn=lambda *args: process_interrogate(interrogate, *args), **interrogate_args, ) - img2img_deepbooru.click( + toprow.button_deepbooru.click( fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), **interrogate_args, ) - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) + toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), + (toprow.prompt, "Prompt"), + (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_index, "Sampler"), (restore_faces, "Face restoration"), @@ -1044,7 +998,7 @@ def create_ui(): (subseed_strength, "Variation seed strength"), (seed_resize_from_w, "Seed resize from-1"), (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), *modules.scripts.scripts_img2img.infotext_fields @@ -1052,7 +1006,7 @@ def create_ui(): parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, + paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None, )) modules.scripts.scripts_current = None diff --git a/modules/ui_common.py b/modules/ui_common.py index 11eb2a4b..ba75fa73 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -223,20 +223,44 @@ Requested path was: {f} def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id): + refresh_components = refresh_component if isinstance(refresh_component, list) else [refresh_component] + + label = None + for comp in refresh_components: + label = getattr(comp, 'label', None) + if label is not None: + break + def refresh(): refresh_method() args = refreshed_args() if callable(refreshed_args) else refreshed_args for k, v in args.items(): - setattr(refresh_component, k, v) + for comp in refresh_components: + setattr(comp, k, v) - return gr.update(**(args or {})) + return [gr.update(**(args or {})) for _ in refresh_components] - refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id) + refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id, tooltip=f"{label}: refresh" if label else "Refresh") refresh_button.click( fn=refresh, inputs=[], - outputs=[refresh_component] + outputs=[*refresh_components] ) return refresh_button + +def setup_dialog(button_show, dialog, *, button_close=None): + """Sets up the UI so that the dialog (gr.Box) is invisible, and is only shown when buttons_show is clicked, in a fullscreen modal window.""" + + dialog.visible = False + + button_show.click( + fn=lambda: gr.update(visible=True), + inputs=[], + outputs=[dialog], + ).then(fn=None, _js="function(){ popup(gradioApp().getElementById('" + dialog.elem_id + "')); }") + + if button_close: + button_close.click(fn=None, _js="closePopup") + diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 2bb0a222..891d8f2c 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -12,7 +12,7 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def refresh(self): shared.refresh_checkpoints() - def create_item(self, name, index=None): + def create_item(self, name, index=None, enable_filter=True): checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name) path, ext = os.path.splitext(checkpoint.filename) return { diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index e53ccb42..514a4562 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -11,7 +11,7 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): def refresh(self): shared.reload_hypernetworks() - def create_item(self, name, index=None): + def create_item(self, name, index=None, enable_filter=True): full_path = shared.hypernetworks[name] path, ext = os.path.splitext(full_path) diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index d1794e50..73134698 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -12,7 +12,7 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): def refresh(self): sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) - def create_item(self, name, index=None): + def create_item(self, name, index=None, enable_filter=True): embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name) path, ext = os.path.splitext(embedding.filename) diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py new file mode 100644 index 00000000..85eb3a64 --- /dev/null +++ b/modules/ui_prompt_styles.py @@ -0,0 +1,110 @@ +import gradio as gr + +from modules import shared, ui_common, ui_components, styles + +styles_edit_symbol = '\U0001f58c\uFE0F' # 🖌️ +styles_materialize_symbol = '\U0001f4cb' # 📋 + + +def select_style(name): + style = shared.prompt_styles.styles.get(name) + existing = style is not None + empty = not name + + prompt = style.prompt if style else gr.update() + negative_prompt = style.negative_prompt if style else gr.update() + + return prompt, negative_prompt, gr.update(visible=existing), gr.update(visible=not empty) + + +def save_style(name, prompt, negative_prompt): + if not name: + return gr.update(visible=False) + + style = styles.PromptStyle(name, prompt, negative_prompt) + shared.prompt_styles.styles[style.name] = style + shared.prompt_styles.save_styles(shared.styles_filename) + + return gr.update(visible=True) + + +def delete_style(name): + if name == "": + return + + shared.prompt_styles.styles.pop(name, None) + shared.prompt_styles.save_styles(shared.styles_filename) + + return '', '', '' + + +def materialize_styles(prompt, negative_prompt, styles): + prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) + negative_prompt = shared.prompt_styles.apply_negative_styles_to_prompt(negative_prompt, styles) + + return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=negative_prompt), gr.Dropdown.update(value=[])] + + +def refresh_styles(): + return gr.update(choices=list(shared.prompt_styles.styles)), gr.update(choices=list(shared.prompt_styles.styles)) + + +class UiPromptStyles: + def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt): + self.tabname = tabname + + with gr.Row(elem_id=f"{tabname}_styles_row"): + self.dropdown = gr.Dropdown(label="Styles", show_label=False, elem_id=f"{tabname}_styles", choices=list(shared.prompt_styles.styles), value=[], multiselect=True, tooltip="Styles") + edit_button = ui_components.ToolButton(value=styles_edit_symbol, elem_id=f"{tabname}_styles_edit_button", tooltip="Edit styles") + + with gr.Box(elem_id=f"{tabname}_styles_dialog", elem_classes="popup-dialog") as styles_dialog: + with gr.Row(): + self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.") + ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles") + self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") + + with gr.Row(): + self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3) + + with gr.Row(): + self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3) + + with gr.Row(): + self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False) + self.delete = gr.Button('Delete', variant='primary', elem_id=f'{tabname}_edit_style_delete', visible=False) + self.close = gr.Button('Close', variant='secondary', elem_id=f'{tabname}_edit_style_close') + + self.selection.change( + fn=select_style, + inputs=[self.selection], + outputs=[self.prompt, self.neg_prompt, self.delete, self.save], + show_progress=False, + ) + + self.save.click( + fn=save_style, + inputs=[self.selection, self.prompt, self.neg_prompt], + outputs=[self.delete], + show_progress=False, + ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False) + + self.delete.click( + fn=delete_style, + _js='function(name){ if(name == "") return ""; return confirm("Delete style " + name + "?") ? name : ""; }', + inputs=[self.selection], + outputs=[self.selection, self.prompt, self.neg_prompt], + show_progress=False, + ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False) + + self.materialize.click( + fn=materialize_styles, + inputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], + outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], + show_progress=False, + ).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False) + + ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close) + + + + diff --git a/style.css b/style.css index 6c92d6e7..cf8470e4 100644 --- a/style.css +++ b/style.css @@ -972,3 +972,16 @@ div.block.gradio-box.edit-user-metadata { .edit-user-metadata-buttons{ margin-top: 1.5em; } + + + + +div.block.gradio-box.popup-dialog, .popup-dialog { + width: 56em; + background: var(--body-background-fill); + padding: 2em !important; +} + +div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-child{ + margin-top: 1em; +} -- cgit v1.2.3 From af528552d6006268e352f422d533a69a6a718a5d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 23:18:50 +0300 Subject: fix linter issues --- modules/ui.py | 57 +++++++++++++++++++++++++-------------------------------- webui.py | 4 ++-- 2 files changed, 27 insertions(+), 34 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index c059dcec..70364a8b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,30 +12,23 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button from modules.ui_gradio_extensions import reload_javascript - from modules.shared import opts, cmd_opts -import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts +import modules.hypernetworks.ui as hypernetworks_ui +import modules.textual_inversion.ui as textual_inversion_ui +import modules.textual_inversion.textual_inversion as textual_inversion import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui from modules import prompt_parser from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras create_setting_component = ui_settings.create_setting_component @@ -391,11 +384,11 @@ def create_ui(): parameters_copypaste.reset() - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) + scripts.scripts_current = scripts.scripts_txt2img + scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - toprow = txt2img_toprow = Toprow(is_img2img=False) + toprow = Toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) @@ -406,7 +399,7 @@ def create_ui(): with gr.Row().style(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() + scripts.scripts_txt2img.prepare_ui() for category in ordered_ui_categories(): if category == "sampler": @@ -474,10 +467,10 @@ def create_ui(): elif category == "scripts": with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + custom_inputs = scripts.scripts_txt2img.setup_ui() else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) + scripts.scripts_txt2img.setup_ui_for_section(category) hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] @@ -611,7 +604,7 @@ def create_ui(): (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields + *scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( @@ -634,11 +627,11 @@ def create_ui(): ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) + scripts.scripts_current = scripts.scripts_img2img + scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - toprow = img2img_toprow = Toprow(is_img2img=True) + toprow = Toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) @@ -740,7 +733,7 @@ def create_ui(): with FormRow(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - modules.scripts.scripts_img2img.prepare_ui() + scripts.scripts_img2img.prepare_ui() for category in ordered_ui_categories(): if category == "sampler": @@ -821,7 +814,7 @@ def create_ui(): elif category == "scripts": with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() + custom_inputs = scripts.scripts_img2img.setup_ui() elif category == "inpaint": with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: @@ -852,7 +845,7 @@ def create_ui(): outputs=[inpaint_controls, mask_alpha], ) else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) + scripts.scripts_img2img.setup_ui_for_section(category) img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) @@ -1001,7 +994,7 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields + *scripts.scripts_img2img.infotext_fields ] parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) @@ -1009,7 +1002,7 @@ def create_ui(): paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None, )) - modules.scripts.scripts_current = None + scripts.scripts_current = None with gr.Blocks(analytics_enabled=False) as extras_interface: ui_postprocessing.create_ui() @@ -1063,7 +1056,7 @@ def create_ui(): new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=hypernetworks_ui.keys, elem_id="train_new_hypernetwork_activation_func") new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") @@ -1208,7 +1201,7 @@ def create_ui(): ti_outcome = gr.HTML(elem_id="ti_error", value="") create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, + fn=textual_inversion_ui.create_embedding, inputs=[ new_embedding_name, initialization_text, @@ -1223,7 +1216,7 @@ def create_ui(): ) create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, + fn=hypernetworks_ui.create_hypernetwork, inputs=[ new_hypernetwork_name, new_hypernetwork_sizes, @@ -1243,7 +1236,7 @@ def create_ui(): ) run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), + fn=wrap_gradio_gpu_call(textual_inversion_ui.preprocess, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", inputs=[ dummy_component, @@ -1279,7 +1272,7 @@ def create_ui(): ) train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), + fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", inputs=[ dummy_component, @@ -1313,7 +1306,7 @@ def create_ui(): ) train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), + fn=wrap_gradio_gpu_call(hypernetworks_ui.train_hypernetwork, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", inputs=[ dummy_component, diff --git a/webui.py b/webui.py index 2dc4f1aa..8d84e5a4 100644 --- a/webui.py +++ b/webui.py @@ -58,10 +58,10 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer -import modules.face_restoration import modules.gfpgan_model as gfpgan +from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states +import modules.face_restoration import modules.img2img import modules.lowvram -- cgit v1.2.3 From 09c1be96748584b08b6299024bb7b64bafb09d09 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 23:31:04 +0300 Subject: put some of the shared functionality into toprow write a comment for the toprow --- modules/ui.py | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 70364a8b..03306ba9 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -25,6 +25,7 @@ import modules.hypernetworks.ui as hypernetworks_ui import modules.textual_inversion.ui as textual_inversion_ui import modules.textual_inversion.textual_inversion as textual_inversion import modules.shared as shared +import modules.images from modules import prompt_parser from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img @@ -241,6 +242,8 @@ def update_token_counter(text, steps): class Toprow: + """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation""" + def __init__(self, is_img2img): id_part = "img2img" if is_img2img else "txt2img" self.id_part = id_part @@ -251,12 +254,14 @@ class Toprow: with gr.Column(scale=80): with gr.Row(): self.prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + self.prompt_img = gr.File(label="", elem_id=f"{id_part}_prompt_image", file_count="single", type="binary", visible=False) with gr.Row(): with gr.Column(scale=80): with gr.Row(): self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + self.button_interrogate = None self.button_deepbooru = None if is_img2img: @@ -302,6 +307,13 @@ class Toprow: self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt) + self.prompt_img.change( + fn=modules.images.image_data, + inputs=[self.prompt_img], + outputs=[self.prompt, self.prompt_img], + show_progress=False, + ) + def setup_progressbar(*args, **kwargs): pass @@ -391,7 +403,6 @@ def create_ui(): toprow = Toprow(is_img2img=False) dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: from modules import ui_extra_networks @@ -556,18 +567,6 @@ def create_ui(): show_progress=False, ) - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - toprow.prompt, - txt_prompt_img - ], - show_progress=False, - ) - enable_hr.change( fn=lambda x: gr_show(x), inputs=[enable_hr], @@ -633,8 +632,6 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as img2img_interface: toprow = Toprow(is_img2img=True) - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: from modules import ui_extra_networks extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'img2img') @@ -852,18 +849,6 @@ def create_ui(): connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - toprow.prompt, - img2img_prompt_img - ], - show_progress=False, - ) - img2img_args = dict( fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), _js="submit_img2img", -- cgit v1.2.3 From 362789a3793025c698fa42372fd66c3c4f2d6413 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 4 Aug 2023 07:50:17 +0300 Subject: gradio 3.39 --- extensions-builtin/Lora/ui_edit_user_metadata.py | 2 +- modules/gradio_extensons.py | 60 ++++++++++++++++++++++++ modules/scripts.py | 60 ------------------------ modules/shared.py | 3 +- modules/ui.py | 24 +++++----- modules/ui_checkpoint_merger.py | 2 +- modules/ui_common.py | 2 +- modules/ui_components.py | 2 +- modules/ui_extensions.py | 8 ++-- modules/ui_postprocessing.py | 2 +- requirements.txt | 2 +- requirements_versions.txt | 2 +- style.css | 12 ++++- 13 files changed, 95 insertions(+), 86 deletions(-) create mode 100644 modules/gradio_extensons.py (limited to 'modules/ui.py') diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py index 2ca997f7..390d9dde 100644 --- a/extensions-builtin/Lora/ui_edit_user_metadata.py +++ b/extensions-builtin/Lora/ui_edit_user_metadata.py @@ -167,7 +167,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False) with gr.Column(scale=1, min_width=120): - generate_random_prompt = gr.Button('Generate').style(full_width=True, size="lg") + generate_random_prompt = gr.Button('Generate', size="lg", scale=1) self.edit_notes = gr.TextArea(label='Notes', lines=4) diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py new file mode 100644 index 00000000..5af7fd8e --- /dev/null +++ b/modules/gradio_extensons.py @@ -0,0 +1,60 @@ +import gradio as gr + +from modules import scripts + +def add_classes_to_gradio_component(comp): + """ + this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others + """ + + comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] + + if getattr(comp, 'multiselect', False): + comp.elem_classes.append('multiselect') + + +def IOComponent_init(self, *args, **kwargs): + self.webui_tooltip = kwargs.pop('tooltip', None) + + if scripts.scripts_current is not None: + scripts.scripts_current.before_component(self, **kwargs) + + scripts.script_callbacks.before_component_callback(self, **kwargs) + + res = original_IOComponent_init(self, *args, **kwargs) + + add_classes_to_gradio_component(self) + + scripts.script_callbacks.after_component_callback(self, **kwargs) + + if scripts.scripts_current is not None: + scripts.scripts_current.after_component(self, **kwargs) + + return res + + +def Block_get_config(self): + config = original_Block_get_config(self) + + webui_tooltip = getattr(self, 'webui_tooltip', None) + if webui_tooltip: + config["webui_tooltip"] = webui_tooltip + + return config + + +def BlockContext_init(self, *args, **kwargs): + res = original_BlockContext_init(self, *args, **kwargs) + + add_classes_to_gradio_component(self) + + return res + + +original_IOComponent_init = gr.components.IOComponent.__init__ +original_Block_get_config = gr.blocks.Block.get_config +original_BlockContext_init = gr.blocks.BlockContext.__init__ + +gr.components.IOComponent.__init__ = IOComponent_init +gr.blocks.Block.get_config = Block_get_config +gr.blocks.BlockContext.__init__ = BlockContext_init diff --git a/modules/scripts.py b/modules/scripts.py index edf7347e..f7d060aa 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -631,63 +631,3 @@ def reload_script_body_only(): reload_scripts = load_scripts # compatibility alias - - -def add_classes_to_gradio_component(comp): - """ - this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others - """ - - comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] - - if getattr(comp, 'multiselect', False): - comp.elem_classes.append('multiselect') - - - -def IOComponent_init(self, *args, **kwargs): - self.webui_tooltip = kwargs.pop('tooltip', None) - - if scripts_current is not None: - scripts_current.before_component(self, **kwargs) - - script_callbacks.before_component_callback(self, **kwargs) - - res = original_IOComponent_init(self, *args, **kwargs) - - add_classes_to_gradio_component(self) - - script_callbacks.after_component_callback(self, **kwargs) - - if scripts_current is not None: - scripts_current.after_component(self, **kwargs) - - return res - - -def Block_get_config(self): - config = original_Block_get_config(self) - - webui_tooltip = getattr(self, 'webui_tooltip', None) - if webui_tooltip: - config["webui_tooltip"] = webui_tooltip - - return config - - -original_IOComponent_init = gr.components.IOComponent.__init__ -original_Block_get_config = gr.components.Block.get_config -gr.components.IOComponent.__init__ = IOComponent_init -gr.components.Block.get_config = Block_get_config - - -def BlockContext_init(self, *args, **kwargs): - res = original_BlockContext_init(self, *args, **kwargs) - - add_classes_to_gradio_component(self) - - return res - - -original_BlockContext_init = gr.blocks.BlockContext.__init__ -gr.blocks.BlockContext.__init__ = BlockContext_init diff --git a/modules/shared.py b/modules/shared.py index 7103b4ca..cec030f7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -385,7 +385,8 @@ options_templates.update(options_section(('face-restoration', "Face restoration" })) options_templates.update(options_section(('system', "System"), { - "show_warnings": OptionInfo(False, "Show warnings in console."), + "show_warnings": OptionInfo(False, "Show warnings in console.").needs_restart(), + "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_restart(), "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), diff --git a/modules/ui.py b/modules/ui.py index 03306ba9..822a7660 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,6 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call +from modules import gradio_extensons # noqa: F401 from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path @@ -34,6 +35,7 @@ from modules.generation_parameters_copypaste import image_from_url_text create_setting_component = ui_settings.create_setting_component warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) +warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else "ignore", category=gr.deprecation.GradioDeprecationWarning) # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI mimetypes.init() @@ -146,7 +148,6 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') @@ -158,7 +159,6 @@ def create_seed_inputs(target_interface): with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: seed_extras.append(seed_extra_row_1) subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") @@ -408,7 +408,7 @@ def create_ui(): from modules import ui_extra_networks extra_networks_ui = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'txt2img') - with gr.Row().style(equal_height=False): + with gr.Row(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): scripts.scripts_txt2img.prepare_ui() @@ -636,7 +636,7 @@ def create_ui(): from modules import ui_extra_networks extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'img2img') - with FormRow().style(equal_height=False): + with FormRow(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): copy_image_buttons = [] copy_image_destinations = {} @@ -658,19 +658,19 @@ def create_ui(): img2img_selected_tab = gr.State(0) with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) add_copy_image_controls('img2img', init_img) with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height) add_copy_image_controls('sketch', sketch) with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height) add_copy_image_controls('inpaint', init_img_with_mask) with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height) inpaint_color_sketch_orig = gr.State(None) add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) @@ -993,7 +993,7 @@ def create_ui(): ui_postprocessing.create_ui() with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): + with gr.Row(equal_height=False): with gr.Column(variant='panel'): image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") @@ -1018,10 +1018,10 @@ def create_ui(): modelmerger_ui = ui_checkpoint_merger.UiCheckpointMerger() with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): + with gr.Row(equal_height=False): gr.HTML(value="

See wiki for detailed explanation.

") - with gr.Row(variant="compact").style(equal_height=False): + with gr.Row(variant="compact", equal_height=False): with gr.Tabs(elem_id="train_tabs"): with gr.Tab(label="Create embedding", id="create_embedding"): @@ -1181,7 +1181,7 @@ def create_ui(): with gr.Column(elem_id='ti_gallery_container'): ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery', columns=4) gr.HTML(elem_id="ti_progress", value="") ti_outcome = gr.HTML(elem_id="ti_error", value="") diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py index 4863d861..f9c5dd6b 100644 --- a/modules/ui_checkpoint_merger.py +++ b/modules/ui_checkpoint_merger.py @@ -29,7 +29,7 @@ def modelmerger(*args): class UiCheckpointMerger: def __init__(self): with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): + with gr.Row(equal_height=False): with gr.Column(variant='compact'): self.interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") diff --git a/modules/ui_common.py b/modules/ui_common.py index ba75fa73..eefe0c0e 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -134,7 +134,7 @@ Requested path was: {f} with gr.Column(variant='panel', elem_id=f"{tabname}_results"): with gr.Group(elem_id=f"{tabname}_gallery_container"): - result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(columns=4) + result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4) generation_info = None with gr.Column(): diff --git a/modules/ui_components.py b/modules/ui_components.py index 64451df7..8f8a7088 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -35,7 +35,7 @@ class FormColumn(FormComponent, gr.Column): class FormGroup(FormComponent, gr.Group): - """Same as gr.Row but fits inside gradio forms""" + """Same as gr.Group but fits inside gradio forms""" def get_block_name(self): return "group" diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index bd28bfcf..15a8b0bf 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -533,8 +533,8 @@ def create_ui(): apply = gr.Button(value=apply_label, variant="primary") check = gr.Button(value="Check for updates") extensions_disable_all = gr.Radio(label="Disable all extensions", choices=["none", "extra", "all"], value=shared.opts.disable_all_extensions, elem_id="extensions_disable_all") - extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False) - extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False) + extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False, container=False) + extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False, container=False) html = "" @@ -569,7 +569,7 @@ def create_ui(): with gr.Row(): refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary") extensions_index_url = os.environ.get('WEBUI_EXTENSIONS_INDEX', "https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json") - available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL").style(container=False) + available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL", container=False) extension_to_install = gr.Text(elem_id="extension_to_install", visible=False) install_extension_button = gr.Button(elem_id="install_extension_button", visible=False) @@ -578,7 +578,7 @@ def create_ui(): sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order",'update time', 'create time', "stars"], type="index") with gr.Row(): - search_extensions_text = gr.Text(label="Search").style(container=False) + search_extensions_text = gr.Text(label="Search", container=False) install_result = gr.HTML() available_extensions_table = gr.HTML() diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index c7dc1154..802e1ce7 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -6,7 +6,7 @@ import modules.generation_parameters_copypaste as parameters_copypaste def create_ui(): tab_index = gr.State(value=0) - with gr.Row().style(equal_height=False, variant='compact'): + with gr.Row(equal_height=False, variant='compact'): with gr.Column(variant='compact'): with gr.Tabs(elem_id="mode_extras"): with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single: diff --git a/requirements.txt b/requirements.txt index b3f8a7f4..afdc6ee2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ blendmodes clean-fid einops gfpgan -gradio==3.32.0 +gradio==3.39.0 inflection jsonmerge kornia diff --git a/requirements_versions.txt b/requirements_versions.txt index d07ab456..82b8732d 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -7,7 +7,7 @@ clean-fid==0.1.35 einops==0.4.1 fastapi==0.94.0 gfpgan==1.3.8 -gradio==3.32.0 +gradio==3.39.0 httpcore==0.15 inflection==0.5.1 jsonmerge==1.8.0 diff --git a/style.css b/style.css index cf8470e4..86b4f61e 100644 --- a/style.css +++ b/style.css @@ -8,6 +8,7 @@ --checkbox-label-gap: 0.25em 0.1em; --section-header-text-size: 12pt; --block-background-fill: transparent; + } .block.padded:not(.gradio-accordion) { @@ -42,7 +43,8 @@ div.form{ .block.gradio-radio, .block.gradio-checkboxgroup, .block.gradio-number, -.block.gradio-colorpicker +.block.gradio-colorpicker, +div.gradio-group { border-width: 0 !important; box-shadow: none !important; @@ -133,6 +135,11 @@ a{ cursor: pointer; } +div.styler{ + border: none; + background: var(--background-fill-primary); +} + /* general styled components */ @@ -164,7 +171,7 @@ a{ .checkboxes-row > div{ flex: 0; white-space: nowrap; - min-width: auto; + min-width: auto !important; } button.custom-button{ @@ -388,6 +395,7 @@ div#extras_scale_to_tab div.form{ #quicksettings > div, #quicksettings > fieldset{ max-width: 24em; min-width: 24em; + width: 24em; padding: 0; border: none; box-shadow: none; -- cgit v1.2.3 From ac8dfd9386785127c2a71ee2c1ae4f950a46f4fd Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 03:52:22 -0400 Subject: Toggle extras checkbox for infotext paste --- modules/ui.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 822a7660..fde79a8a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -603,6 +603,7 @@ def create_ui(): (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), + (seed_checkbox, lambda d: any(x in ["Variation seed", "Variation seed strength", "Seed resize from-1", "Seed resize from-2"] for x in d)), *scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) @@ -979,6 +980,7 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), + (seed_checkbox, lambda d: any(x in ["Variation seed", "Variation seed strength", "Seed resize from-1", "Seed resize from-2"] for x in d)), *scripts.scripts_img2img.infotext_fields ] parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) -- cgit v1.2.3 From d89a915b74cd999e13e559d6a702c7da9404db5e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 04:03:37 -0400 Subject: Only enable hr fix if hr scale or upscale in infotext on paste --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 822a7660..4628bc89 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -591,8 +591,8 @@ def create_ui(): (seed_resize_from_h, "Seed resize from-2"), (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), + (enable_hr, lambda d: "Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d)), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), (hr_second_pass_steps, "Hires steps"), -- cgit v1.2.3 From 67312653d766fbbebaecae87b26d5e49789970d3 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 04:40:56 -0400 Subject: Cleanup hr infotext paste check --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 4628bc89..cc298031 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -592,7 +592,7 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d)), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d))), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), (hr_second_pass_steps, "Hires steps"), -- cgit v1.2.3 From 7c5480eb969a421786089c6f8bc664a7444e75ee Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 04:42:35 -0400 Subject: Cleanup hr infotext paste check mk2 --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index cc298031..8b38d213 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -591,7 +591,7 @@ def create_ui(): (seed_resize_from_h, "Seed resize from-2"), (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d)), + (enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), -- cgit v1.2.3 From f5994e84a213e40f9f6f0eb24df1d4fe38a45a2e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 04:57:01 -0400 Subject: Cleanup extras checkbox infotext paste check --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index fde79a8a..2f3f74b5 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -585,6 +585,7 @@ def create_ui(): (width, "Size-1"), (height, "Size-2"), (batch_size, "Batch size"), + (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), (seed_resize_from_w, "Seed resize from-1"), @@ -603,7 +604,6 @@ def create_ui(): (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - (seed_checkbox, lambda d: any(x in ["Variation seed", "Variation seed strength", "Seed resize from-1", "Seed resize from-2"] for x in d)), *scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) @@ -973,6 +973,7 @@ def create_ui(): (width, "Size-1"), (height, "Size-2"), (batch_size, "Batch size"), + (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), (seed_resize_from_w, "Seed resize from-1"), @@ -980,7 +981,6 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), - (seed_checkbox, lambda d: any(x in ["Variation seed", "Variation seed strength", "Seed resize from-1", "Seed resize from-2"] for x in d)), *scripts.scripts_img2img.infotext_fields ] parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) -- cgit v1.2.3 From 682ff8936df018330e0d2a259794a262dc3251b2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 4 Aug 2023 18:51:15 +0300 Subject: glorious, glorious wonderful clear milky white butter smooth color for inpainting you are the best, gradio how I yearned for this day i always believed in you i knew you had it in you this day marks a new beginning thank you, everyone thank you --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 586174b8..6cf3dff8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -667,11 +667,11 @@ def create_ui(): add_copy_image_controls('sketch', sketch) with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height) + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color='#ffffff') add_copy_image_controls('inpaint', init_img_with_mask) with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height) + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color='#ffffff') inpaint_color_sketch_orig = gr.State(None) add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) -- cgit v1.2.3 From 1d60a609a9d7a7f79517dc0c87d4b834b89db252 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 5 Aug 2023 09:25:21 +0900 Subject: configurable masks color and default brush color --- modules/shared.py | 3 +++ modules/ui.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/shared.py b/modules/shared.py index cec030f7..1eb00b8f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -492,6 +492,9 @@ options_templates.update(options_section(('ui', "User interface"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(), "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(), "img2img_editor_height": OptionInfo(720, "img2img: height of image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(), + "img2img_sketch_default_brush_color": OptionInfo("#000000", "sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch) (requires reload"), + "img2img_inpaint_mask_brush_color": OptionInfo("#000000", "inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask) (requires reload"), + "img2img_inpaint_sketch_default_brush_color": OptionInfo("#000000", "inpaint sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch) (requires reload"), "return_grid": OptionInfo(True, "Show grid in results for web"), "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), diff --git a/modules/ui.py b/modules/ui.py index 6cf3dff8..843a75ef 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -663,15 +663,15 @@ def create_ui(): add_copy_image_controls('img2img', init_img) with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height) + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) add_copy_image_controls('sketch', sketch) with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color='#ffffff') + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) add_copy_image_controls('inpaint', init_img_with_mask) with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color='#ffffff') + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) inpaint_color_sketch_orig = gr.State(None) add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) -- cgit v1.2.3 From 8197f24dbcfe41d77e337da42bed22944154465f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 08:07:13 +0300 Subject: remove the extra networks button --- modules/ui.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 61a6b4ad..1af6b4c8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -289,7 +289,6 @@ class Toprow: with gr.Row(elem_id=f"{id_part}_tools"): self.paste = ToolButton(value=paste_symbol, elem_id="paste") self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - self.extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) -- cgit v1.2.3 From e866c354626e772779a6307488b7b41f9d101399 Mon Sep 17 00:00:00 2001 From: Diego Casorran Date: Sun, 6 Aug 2023 12:25:04 +0000 Subject: add explicit content-type header for image/webp --- modules/ui.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 1af6b4c8..4ffb9b82 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -41,6 +41,9 @@ warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else mimetypes.init() mimetypes.add_type('application/javascript', '.js') +# Likewise, add explicit content-type header for certain missing image types +mimetypes.add_type('image/webp', '.webp') + if not cmd_opts.share and not cmd_opts.listen: # fix gradio phoning home gradio.utils.version_check = lambda: None -- cgit v1.2.3 From 7d8f55ec7c2d875254e63d92131ecc555378257e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 7 Aug 2023 01:45:10 -0400 Subject: Remove style method --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 4ffb9b82..5150dae4 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -408,7 +408,7 @@ def create_ui(): extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs") extra_tabs.__enter__() - with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row().style(equal_height=False): + with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): scripts.scripts_txt2img.prepare_ui() -- cgit v1.2.3 From 70c63c1208d33bf02e15c4e310bac83f12ee8625 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 21:28:34 +0300 Subject: pass samplers from UI by name, make it possible to use a sampler from infotext even if it's hidden in the dropdown --- modules/img2img.py | 6 +++--- modules/sd_samplers.py | 13 +++++++++---- modules/txt2img.py | 8 ++++---- modules/ui.py | 29 ++++++++++++++--------------- 4 files changed, 30 insertions(+), 26 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/img2img.py b/modules/img2img.py index d8e1c534..e06ac1d6 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -6,7 +6,7 @@ import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError import gradio as gr -from modules import sd_samplers, images as imgutil +from modules import images as imgutil from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state @@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal process_images(p) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -172,7 +172,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w, seed_enable_extras=seed_enable_extras, - sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name, + sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, steps=steps, diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 05dbe2b5..45faae62 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -12,6 +12,7 @@ all_samplers_map = {x.name: x for x in all_samplers} samplers = [] samplers_for_img2img = [] samplers_map = {} +samplers_hidden = {} def find_sampler_config(name): @@ -38,11 +39,11 @@ def create_sampler(name, model): def set_samplers(): - global samplers, samplers_for_img2img + global samplers, samplers_for_img2img, samplers_hidden - hidden = set(shared.opts.hide_samplers) - samplers = [x for x in all_samplers if x.name not in hidden] - samplers_for_img2img = [x for x in all_samplers if x.name not in hidden] + samplers_hidden = set(shared.opts.hide_samplers) + samplers = all_samplers + samplers_for_img2img = all_samplers samplers_map.clear() for sampler in all_samplers: @@ -51,4 +52,8 @@ def set_samplers(): samplers_map[alias.lower()] = sampler.name +def visible_sampler_names(): + return [x.name for x in samplers if x.name not in samplers_hidden] + + set_samplers() diff --git a/modules/txt2img.py b/modules/txt2img.py index 935ed418..8fa389b5 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,7 +1,7 @@ from contextlib import closing import modules.scripts -from modules import sd_samplers, processing +from modules import processing from modules.generation_parameters_copypaste import create_override_settings_dict from modules.shared import opts, cmd_opts import modules.shared as shared @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -25,7 +25,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w, seed_enable_extras=seed_enable_extras, - sampler_name=sd_samplers.samplers[sampler_index].name, + sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, steps=steps, @@ -42,7 +42,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y, hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name, - hr_sampler_name=sd_samplers.samplers_for_img2img[hr_sampler_index - 1].name if hr_sampler_index != 0 else None, + hr_sampler_name=hr_sampler_name, hr_prompt=hr_prompt, hr_negative_prompt=hr_negative_prompt, override_settings=override_settings, diff --git a/modules/ui.py b/modules/ui.py index 5150dae4..e3753e97 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -29,7 +29,6 @@ import modules.shared as shared import modules.images from modules import prompt_parser from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img from modules.generation_parameters_copypaste import image_from_url_text create_setting_component = ui_settings.create_setting_component @@ -360,14 +359,14 @@ def create_output_panel(tabname, outdir): def create_sampler_and_steps_selection(choices, tabname): if opts.samplers_in_dropdown: with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + sampler_name = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0]) steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) else: with FormGroup(elem_id=f"sampler_selection_{tabname}"): steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + sampler_name = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0]) - return steps, sampler_index + return steps, sampler_name def ordered_ui_categories(): @@ -414,7 +413,7 @@ def create_ui(): for category in ordered_ui_categories(): if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") elif category == "dimensions": with FormRow(): @@ -460,7 +459,7 @@ def create_ui(): hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") + hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: with gr.Column(scale=80): @@ -520,7 +519,7 @@ def create_ui(): toprow.negative_prompt, toprow.ui_styles.dropdown, steps, - sampler_index, + sampler_name, restore_faces, tiling, batch_count, @@ -538,7 +537,7 @@ def create_ui(): hr_resize_x, hr_resize_y, hr_checkpoint_name, - hr_sampler_index, + hr_sampler_name, hr_prompt, hr_negative_prompt, override_settings, @@ -583,7 +582,7 @@ def create_ui(): (toprow.prompt, "Prompt"), (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), - (sampler_index, "Sampler"), + (sampler_name, "Sampler"), (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (seed, "Seed"), @@ -605,7 +604,7 @@ def create_ui(): (hr_resize_x, "Hires resize-1"), (hr_resize_y, "Hires resize-2"), (hr_checkpoint_name, "Hires checkpoint"), - (hr_sampler_index, "Hires sampler"), + (hr_sampler_name, "Hires sampler"), (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" else gr.update()), (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), @@ -621,7 +620,7 @@ def create_ui(): toprow.prompt, toprow.negative_prompt, steps, - sampler_index, + sampler_name, cfg_scale, seed, width, @@ -744,7 +743,7 @@ def create_ui(): for category in ordered_ui_categories(): if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") elif category == "dimensions": with FormRow(): @@ -876,7 +875,7 @@ def create_ui(): init_img_inpaint, init_mask_inpaint, steps, - sampler_index, + sampler_name, mask_blur, mask_alpha, inpainting_fill, @@ -972,7 +971,7 @@ def create_ui(): (toprow.prompt, "Prompt"), (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), - (sampler_index, "Sampler"), + (sampler_name, "Sampler"), (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (image_cfg_scale, "Image CFG scale"), -- cgit v1.2.3 From 386245a26427a64f364f66f6fecd03b3bccfd7f3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 10:25:35 +0300 Subject: split shared.py into multiple files; should resolve all circular reference import errors related to shared.py --- modules/devices.py | 10 +- modules/extensions.py | 4 +- modules/generation_parameters_copypaste.py | 3 +- modules/images.py | 28 +- modules/localization.py | 3 +- modules/mac_specific.py | 4 +- modules/options.py | 236 +++++++ modules/rng.py | 3 +- modules/sd_models.py | 9 +- modules/sd_models_config.py | 3 +- modules/sd_vae.py | 5 +- modules/shared.py | 961 ++--------------------------- modules/shared_cmd_options.py | 18 + modules/shared_gradio_themes.py | 66 ++ modules/shared_init.py | 51 ++ modules/shared_items.py | 49 ++ modules/shared_options.py | 692 +-------------------- modules/shared_state.py | 159 +++++ modules/shared_total_tqdm.py | 37 ++ modules/sysinfo.py | 7 +- modules/ui.py | 6 +- modules/ui_common.py | 4 +- modules/util.py | 58 ++ webui.py | 11 +- 24 files changed, 762 insertions(+), 1665 deletions(-) create mode 100644 modules/options.py create mode 100644 modules/shared_cmd_options.py create mode 100644 modules/shared_gradio_themes.py create mode 100644 modules/shared_init.py create mode 100644 modules/shared_state.py create mode 100644 modules/shared_total_tqdm.py create mode 100644 modules/util.py (limited to 'modules/ui.py') diff --git a/modules/devices.py b/modules/devices.py index ce59dc53..c01f0602 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,7 +3,7 @@ import contextlib from functools import lru_cache import torch -from modules import errors +from modules import errors, shared if sys.platform == "darwin": from modules import mac_specific @@ -17,8 +17,6 @@ def has_mps() -> bool: def get_cuda_device_string(): - from modules import shared - if shared.cmd_opts.device_id is not None: return f"cuda:{shared.cmd_opts.device_id}" @@ -40,8 +38,6 @@ def get_optimal_device(): def get_device_for(task): - from modules import shared - if task in shared.cmd_opts.use_cpu: return cpu @@ -97,8 +93,6 @@ nv_rng = None def autocast(disable=False): - from modules import shared - if disable: return contextlib.nullcontext() @@ -117,8 +111,6 @@ class NansException(Exception): def test_for_nans(x, where): - from modules import shared - if shared.cmd_opts.disable_nan_check: return diff --git a/modules/extensions.py b/modules/extensions.py index e4633af4..bf9a1878 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,7 +1,7 @@ import os import threading -from modules import shared, errors, cache +from modules import shared, errors, cache, scripts from modules.gitpython_hack import Repo from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 @@ -90,8 +90,6 @@ class Extension: self.have_info_from_repo = True def list_files(self, subdir, extension): - from modules import scripts - dirpath = os.path.join(self.path, subdir) if not os.path.isdir(dirpath): return [] diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 5758e6f3..d932c67d 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -6,7 +6,7 @@ import re import gradio as gr from modules.paths import data_path -from modules import shared, ui_tempdir, script_callbacks +from modules import shared, ui_tempdir, script_callbacks, processing from PIL import Image re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' @@ -198,7 +198,6 @@ def restore_old_hires_fix_params(res): height = int(res.get("Size-2", 512)) if firstpass_width == 0 or firstpass_height == 0: - from modules import processing firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height) res['Size-1'] = firstpass_width diff --git a/modules/images.py b/modules/images.py index ba3c43a4..019c1d60 100644 --- a/modules/images.py +++ b/modules/images.py @@ -21,8 +21,6 @@ from modules import sd_samplers, shared, script_callbacks, errors from modules.paths_internal import roboto_ttf_file from modules.shared import opts -import modules.sd_vae as sd_vae - LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -342,16 +340,6 @@ def sanitize_filename_part(text, replace_spaces=True): class FilenameGenerator: - def get_vae_filename(self): #get the name of the VAE file. - if sd_vae.loaded_vae_file is None: - return "NoneType" - file_name = os.path.basename(sd_vae.loaded_vae_file) - split_file_name = file_name.split('.') - if len(split_file_name) > 1 and split_file_name[0] == '': - return split_file_name[1] # if the first character of the filename is "." then [1] is obtained. - else: - return split_file_name[0] - replacements = { 'seed': lambda self: self.seed if self.seed is not None else '', 'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0], @@ -391,6 +379,22 @@ class FilenameGenerator: self.image = image self.zip = zip + def get_vae_filename(self): + """Get the name of the VAE file.""" + + import modules.sd_vae as sd_vae + + if sd_vae.loaded_vae_file is None: + return "NoneType" + + file_name = os.path.basename(sd_vae.loaded_vae_file) + split_file_name = file_name.split('.') + if len(split_file_name) > 1 and split_file_name[0] == '': + return split_file_name[1] # if the first character of the filename is "." then [1] is obtained. + else: + return split_file_name[0] + + def hasprompt(self, *args): lower = self.prompt.lower() if self.p is None or self.prompt is None: diff --git a/modules/localization.py b/modules/localization.py index e8f585da..c1320288 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -1,7 +1,7 @@ import json import os -from modules import errors +from modules import errors, scripts localizations = {} @@ -16,7 +16,6 @@ def list_localizations(dirname): localizations[fn] = os.path.join(dirname, file) - from modules import scripts for file in scripts.list_scripts("localizations", ".json"): fn, ext = os.path.splitext(file.filename) localizations[fn] = file.path diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 9ceb43ba..bce527cc 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -4,6 +4,7 @@ import torch import platform from modules.sd_hijack_utils import CondFunc from packaging import version +from modules import shared log = logging.getLogger(__name__) @@ -30,8 +31,7 @@ has_mps = check_for_mps() def torch_mps_gc() -> None: try: - from modules.shared import state - if state.current_latent is not None: + if shared.state.current_latent is not None: log.debug("`current_latent` is set, skipping MPS garbage collection") return from torch.mps import empty_cache diff --git a/modules/options.py b/modules/options.py new file mode 100644 index 00000000..59cb75ec --- /dev/null +++ b/modules/options.py @@ -0,0 +1,236 @@ +import json +import sys + +import gradio as gr + +from modules import errors +from modules.shared_cmd_options import cmd_opts + + +class OptionInfo: + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): + self.default = default + self.label = label + self.component = component + self.component_args = component_args + self.onchange = onchange + self.section = section + self.refresh = refresh + self.do_not_save = False + + self.comment_before = comment_before + """HTML text that will be added after label in UI""" + + self.comment_after = comment_after + """HTML text that will be added before label in UI""" + + def link(self, label, url): + self.comment_before += f"[{label}]" + return self + + def js(self, label, js_func): + self.comment_before += f"[{label}]" + return self + + def info(self, info): + self.comment_after += f"({info})" + return self + + def html(self, html): + self.comment_after += html + return self + + def needs_restart(self): + self.comment_after += " (requires restart)" + return self + + def needs_reload_ui(self): + self.comment_after += " (requires Reload UI)" + return self + + +class OptionHTML(OptionInfo): + def __init__(self, text): + super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) + + self.do_not_save = True + + +def options_section(section_identifier, options_dict): + for v in options_dict.values(): + v.section = section_identifier + + return options_dict + + +options_builtin_fields = {"data_labels", "data", "restricted_opts", "typemap"} + + +class Options: + typemap = {int: float} + + def __init__(self, data_labels, restricted_opts): + self.data_labels = data_labels + self.data = {k: v.default for k, v in self.data_labels.items()} + self.restricted_opts = restricted_opts + + def __setattr__(self, key, value): + if key in options_builtin_fields: + return super(Options, self).__setattr__(key, value) + + if self.data is not None: + if key in self.data or key in self.data_labels: + assert not cmd_opts.freeze_settings, "changing settings is disabled" + + info = self.data_labels.get(key, None) + if info.do_not_save: + return + + comp_args = info.component_args if info else None + if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + if cmd_opts.hide_ui_dir_config and key in self.restricted_opts: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + self.data[key] = value + return + + return super(Options, self).__setattr__(key, value) + + def __getattr__(self, item): + if item in options_builtin_fields: + return super(Options, self).__getattribute__(item) + + if self.data is not None: + if item in self.data: + return self.data[item] + + if item in self.data_labels: + return self.data_labels[item].default + + return super(Options, self).__getattribute__(item) + + def set(self, key, value): + """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" + + oldval = self.data.get(key, None) + if oldval == value: + return False + + if self.data_labels[key].do_not_save: + return False + + try: + setattr(self, key, value) + except RuntimeError: + return False + + if self.data_labels[key].onchange is not None: + try: + self.data_labels[key].onchange() + except Exception as e: + errors.display(e, f"changing setting {key} to {value}") + setattr(self, key, oldval) + return False + + return True + + def get_default(self, key): + """returns the default value for the key""" + + data_label = self.data_labels.get(key) + if data_label is None: + return None + + return data_label.default + + def save(self, filename): + assert not cmd_opts.freeze_settings, "saving settings is disabled" + + with open(filename, "w", encoding="utf8") as file: + json.dump(self.data, file, indent=4) + + def same_type(self, x, y): + if x is None or y is None: + return True + + type_x = self.typemap.get(type(x), type(x)) + type_y = self.typemap.get(type(y), type(y)) + + return type_x == type_y + + def load(self, filename): + with open(filename, "r", encoding="utf8") as file: + self.data = json.load(file) + + # 1.6.0 VAE defaults + if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: + self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') + + # 1.1.1 quicksettings list migration + if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: + self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] + + # 1.4.0 ui_reorder + if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: + self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] + + bad_settings = 0 + for k, v in self.data.items(): + info = self.data_labels.get(k, None) + if info is not None and not self.same_type(info.default, v): + print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) + bad_settings += 1 + + if bad_settings > 0: + print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) + + def onchange(self, key, func, call=True): + item = self.data_labels.get(key) + item.onchange = func + + if call: + func() + + def dumpjson(self): + d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} + d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} + d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} + return json.dumps(d) + + def add_option(self, key, info): + self.data_labels[key] = info + + def reorder(self): + """reorder settings so that all items related to section always go together""" + + section_ids = {} + settings_items = self.data_labels.items() + for _, item in settings_items: + if item.section not in section_ids: + section_ids[item.section] = len(section_ids) + + self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) + + def cast_value(self, key, value): + """casts an arbitrary to the same type as this setting's value with key + Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) + """ + + if value is None: + return None + + default_value = self.data_labels[key].default + if default_value is None: + default_value = getattr(self, key, None) + if default_value is None: + return None + + expected_type = type(default_value) + if expected_type == bool and value == "False": + value = False + else: + value = expected_type(value) + + return value diff --git a/modules/rng.py b/modules/rng.py index 2d7baea5..f927a318 100644 --- a/modules/rng.py +++ b/modules/rng.py @@ -63,9 +63,8 @@ def randn_without_seed(shape, generator=None): def manual_seed(seed): """Set up a global random number generator using the specified seed.""" - from modules.shared import opts - if opts.randn_source == "NV": + if shared.opts.randn_source == "NV": global nv_rng nv_rng = rng_philox.Generator(seed) return diff --git a/modules/sd_models.py b/modules/sd_models.py index 53c1df54..88a09899 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -14,7 +14,7 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config -from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache +from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack from modules.timer import Timer import tomesd @@ -473,7 +473,6 @@ model_data = SdModelData() def get_empty_cond(sd_model): - from modules import extra_networks, processing p = processing.StableDiffusionProcessingTxt2Img() extra_networks.activate(p, {}) @@ -486,8 +485,6 @@ def get_empty_cond(sd_model): def send_model_to_cpu(m): - from modules import lowvram - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.send_everything_to_cpu() else: @@ -497,8 +494,6 @@ def send_model_to_cpu(m): def send_model_to_device(m): - from modules import lowvram - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.setup_for_low_vram(m, shared.cmd_opts.medvram) else: @@ -642,7 +637,6 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): def reload_model_weights(sd_model=None, info=None): - from modules import devices, sd_hijack checkpoint_info = info or select_checkpoint() timer = Timer() @@ -705,7 +699,6 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - from modules import devices, sd_hijack timer = Timer() if model_data.sd_model: diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 8266fa39..08dd03f1 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -2,7 +2,7 @@ import os import torch -from modules import shared, paths, sd_disable_initialization +from modules import shared, paths, sd_disable_initialization, devices sd_configs_path = shared.sd_configs_path sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion") @@ -29,7 +29,6 @@ def is_using_v_parameterization_for_sd2(state_dict): """ import ldm.modules.diffusionmodules.openaimodel - from modules import devices device = devices.cpu diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 38bcb840..5ac1ac31 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -2,7 +2,8 @@ import os import collections from dataclasses import dataclass -from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks +from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack + import glob from copy import deepcopy @@ -231,8 +232,6 @@ unspecified = object() def reload_vae_weights(sd_model=None, vae_file=unspecified): - from modules import lowvram, devices, sd_hijack - if not sd_model: sd_model = shared.sd_model diff --git a/modules/shared.py b/modules/shared.py index e9b980a4..8ba72f49 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,843 +1,51 @@ -import datetime -import json -import os -import re import sys -import threading -import time -import logging import gradio as gr -import torch -import tqdm -import launch -import modules.interrogate -import modules.memmon -import modules.styles -import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 +from modules import shared_cmd_options, shared_gradio_themes, options, shared_items from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from ldm.models.diffusion.ddpm import LatentDiffusion -from typing import Optional +from modules import util -log = logging.getLogger(__name__) - -demo = None - -parser = cmd_args.parser - -script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) -script_loading.preload_extensions(extensions_builtin_dir, parser) - -if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: - cmd_opts = parser.parse_args() -else: - cmd_opts, _ = parser.parse_known_args() - - -restricted_opts = { - "samples_filename_pattern", - "directories_filename_pattern", - "outdir_samples", - "outdir_txt2img_samples", - "outdir_img2img_samples", - "outdir_extras_samples", - "outdir_grids", - "outdir_txt2img_grids", - "outdir_save", - "outdir_init_images" -} - -# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json -gradio_hf_hub_themes = [ - "gradio/base", - "gradio/glass", - "gradio/monochrome", - "gradio/seafoam", - "gradio/soft", - "gradio/dracula_test", - "abidlabs/dracula_test", - "abidlabs/Lime", - "abidlabs/pakistan", - "Ama434/neutral-barlow", - "dawood/microsoft_windows", - "finlaymacklon/smooth_slate", - "Franklisi/darkmode", - "freddyaboulton/dracula_revamped", - "freddyaboulton/test-blue", - "gstaff/xkcd", - "Insuz/Mocha", - "Insuz/SimpleIndigo", - "JohnSmith9982/small_and_pretty", - "nota-ai/theme", - "nuttea/Softblue", - "ParityError/Anime", - "reilnuud/polite", - "remilia/Ghostly", - "rottenlittlecreature/Moon_Goblin", - "step-3-profit/Midnight-Deep", - "Taithrah/Minimal", - "ysharma/huggingface", - "ysharma/steampunk" -] - - -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access - -devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ - (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) - -devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 -devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 - -device = devices.device -weight_load_location = None if cmd_opts.lowram else "cpu" +cmd_opts = shared_cmd_options.cmd_opts +parser = shared_cmd_options.parser batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -xformers_available = False -config_filename = cmd_opts.ui_settings_file - -os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = {} -loaded_hypernetworks = [] - - -def reload_hypernetworks(): - from modules.hypernetworks import hypernetwork - global hypernetworks - - hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) - - -class State: - skipped = False - interrupted = False - job = "" - job_no = 0 - job_count = 0 - processing_has_refined_job_count = False - job_timestamp = '0' - sampling_step = 0 - sampling_steps = 0 - current_latent = None - current_image = None - current_image_sampling_step = 0 - id_live_preview = 0 - textinfo = None - time_start = None - server_start = None - _server_command_signal = threading.Event() - _server_command: Optional[str] = None - - @property - def need_restart(self) -> bool: - # Compatibility getter for need_restart. - return self.server_command == "restart" - - @need_restart.setter - def need_restart(self, value: bool) -> None: - # Compatibility setter for need_restart. - if value: - self.server_command = "restart" - - @property - def server_command(self): - return self._server_command - - @server_command.setter - def server_command(self, value: Optional[str]) -> None: - """ - Set the server command to `value` and signal that it's been set. - """ - self._server_command = value - self._server_command_signal.set() - - def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: - """ - Wait for server command to get set; return and clear the value and signal. - """ - if self._server_command_signal.wait(timeout): - self._server_command_signal.clear() - req = self._server_command - self._server_command = None - return req - return None - - def request_restart(self) -> None: - self.interrupt() - self.server_command = "restart" - log.info("Received restart request") - - def skip(self): - self.skipped = True - log.info("Received skip request") - - def interrupt(self): - self.interrupted = True - log.info("Received interrupt request") - - def nextjob(self): - if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: - self.do_set_current_image() - - self.job_no += 1 - self.sampling_step = 0 - self.current_image_sampling_step = 0 - - def dict(self): - obj = { - "skipped": self.skipped, - "interrupted": self.interrupted, - "job": self.job, - "job_count": self.job_count, - "job_timestamp": self.job_timestamp, - "job_no": self.job_no, - "sampling_step": self.sampling_step, - "sampling_steps": self.sampling_steps, - } - - return obj - - def begin(self, job: str = "(unknown)"): - self.sampling_step = 0 - self.job_count = -1 - self.processing_has_refined_job_count = False - self.job_no = 0 - self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - self.current_latent = None - self.current_image = None - self.current_image_sampling_step = 0 - self.id_live_preview = 0 - self.skipped = False - self.interrupted = False - self.textinfo = None - self.time_start = time.time() - self.job = job - devices.torch_gc() - log.info("Starting job %s", job) - - def end(self): - duration = time.time() - self.time_start - log.info("Ending job %s (%.2f seconds)", self.job, duration) - self.job = "" - self.job_count = 0 - - devices.torch_gc() - - def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" - if not parallel_processing_allowed: - return - - if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: - self.do_set_current_image() - - def do_set_current_image(self): - if self.current_latent is None: - return - - import modules.sd_samplers - - try: - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - - self.current_image_sampling_step = self.sampling_step - - except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. - # we silently ignore this error - errors.record_exception() - - def assign_current_image(self, image): - self.current_image = image - self.id_live_preview += 1 - - -state = State() -state.server_start = time.time() - styles_filename = cmd_opts.styles_file -prompt_styles = modules.styles.StyleDatabase(styles_filename) - -interrogator = modules.interrogate.InterrogateModels("interrogate") - -face_restorers = [] - - -class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange - self.section = section - self.refresh = refresh - self.do_not_save = False - - self.comment_before = comment_before - """HTML text that will be added after label in UI""" - - self.comment_after = comment_after - """HTML text that will be added before label in UI""" - - def link(self, label, url): - self.comment_before += f"[{label}]" - return self - - def js(self, label, js_func): - self.comment_before += f"[{label}]" - return self - - def info(self, info): - self.comment_after += f"({info})" - return self - - def html(self, html): - self.comment_after += html - return self - - def needs_restart(self): - self.comment_after += " (requires restart)" - return self - - def needs_reload_ui(self): - self.comment_after += " (requires Reload UI)" - return self - - -class OptionHTML(OptionInfo): - def __init__(self, text): - super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) - - self.do_not_save = True - - -def options_section(section_identifier, options_dict): - for v in options_dict.values(): - v.section = section_identifier - - return options_dict - - -def list_checkpoint_tiles(): - import modules.sd_models - return modules.sd_models.checkpoint_tiles() - - -def refresh_checkpoints(): - import modules.sd_models - return modules.sd_models.list_models() - - -def list_samplers(): - import modules.sd_samplers - return modules.sd_samplers.all_samplers - - +config_filename = cmd_opts.ui_settings_file hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -tab_names = [] - -options_templates = {} - -options_templates.update(options_section(('saving-images', "Saving images/grids"), { - "samples_save": OptionInfo(True, "Always save all generated images"), - "samples_format": OptionInfo('png', 'File format for images'), - "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), - - "grid_save": OptionInfo(True, "Always save all generated image grids"), - "grid_format": OptionInfo('png', 'File format for grids'), - "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), - "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), - "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), - "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), - "font": OptionInfo("", "Font for image grids that have text"), - "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), - "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), - "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), - - "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), - "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), - "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), - "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), - "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), - "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), - "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), - "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), - "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), - "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), - "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), - "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), - - "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), - "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), - "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "save_init_img": OptionInfo(False, "Save init images when using img2img"), - - "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), - "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), - - "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), -})) - -options_templates.update(options_section(('saving-paths', "Paths for saving"), { - "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), - "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), - "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), - "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), - "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), - "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), - "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), - "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), - "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), -})) - -options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { - "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), - "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), - "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), - "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), -})) - -options_templates.update(options_section(('upscaling', "Upscaling"), { - "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), - "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), - "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), -})) - -options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), - "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), - "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), -})) - -options_templates.update(options_section(('system', "System"), { - "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), - "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), - "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), - "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), - "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), - "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), - "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), - "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), - "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), -})) - -options_templates.update(options_section(('training', "Training"), { - "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), - "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), - "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), - "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), - "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), - "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), - "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), - "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), - "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), - "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), - "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), - "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), -})) - -options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), - "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), - "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), - "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), - "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), - "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), - "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), -})) - -options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { - "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), - "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), - "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), - "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), -})) -options_templates.update(options_section(('vae', "VAE"), { - "sd_vae_explanation": OptionHTML(""" -VAE is a neural network that transforms a standard RGB -image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling -(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. -For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. -"""), - "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), - "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), - "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), - "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), -})) - -options_templates.update(options_section(('img2img', "img2img"), { - "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), - "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), - "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), - "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), - "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), - "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), - "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), - "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), -})) - -options_templates.update(options_section(('optimizations', "Optimizations"), { - "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), - "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), - "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), - "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), -})) - -options_templates.update(options_section(('compatibility', "Compatibility"), { - "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), - "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), - "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), - "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), - "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), - "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), -})) - -options_templates.update(options_section(('interrogate', "Interrogate"), { - "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), - "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), - "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), - "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), - "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), - "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), - "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), - "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), - "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), - "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), - "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), - "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), -})) - -options_templates.update(options_section(('extra_networks', "Extra Networks"), { - "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), - "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), - "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), - "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), - "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), - "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), - "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), - "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), - "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), - "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), - "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), -})) - -options_templates.update(options_section(('ui', "User interface"), { - "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), - "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), - "return_grid": OptionInfo(True, "Show grid in results for web"), - "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), - "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), - "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), - "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), - "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), - "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), - "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), - "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), - "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), -})) - - -options_templates.update(options_section(('infotext', "Infotext"), { - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), - "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), - "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html(""""""), - -})) - -options_templates.update(options_section(('ui', "Live previews"), { - "show_progressbar": OptionInfo(True, "Show progressbar"), - "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), - "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), - "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), - "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), - "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), - "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), -})) - -options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), - "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), - "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), - "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), - 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), - 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), - 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), - 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), - 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), - 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), - 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), - 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), - 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), - 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), -})) - -options_templates.update(options_section(('postprocessing', "Postprocessing"), { - 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), -})) - -options_templates.update(options_section((None, "Hidden options"), { - "disabled_extensions": OptionInfo([], "Disable these extensions"), - "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), - "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), - "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), -})) - - -options_templates.update() - - -class Options: - data = None - data_labels = options_templates - typemap = {int: float} - - def __init__(self): - self.data = {k: v.default for k, v in self.data_labels.items()} - - def __setattr__(self, key, value): - if self.data is not None: - if key in self.data or key in self.data_labels: - assert not cmd_opts.freeze_settings, "changing settings is disabled" - - info = opts.data_labels.get(key, None) - if info.do_not_save: - return - - comp_args = info.component_args if info else None - if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - if cmd_opts.hide_ui_dir_config and key in restricted_opts: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - self.data[key] = value - return - - return super(Options, self).__setattr__(key, value) - - def __getattr__(self, item): - if self.data is not None: - if item in self.data: - return self.data[item] - - if item in self.data_labels: - return self.data_labels[item].default - - return super(Options, self).__getattribute__(item) - - def set(self, key, value): - """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" - - oldval = self.data.get(key, None) - if oldval == value: - return False - - if self.data_labels[key].do_not_save: - return False - - try: - setattr(self, key, value) - except RuntimeError: - return False - - if self.data_labels[key].onchange is not None: - try: - self.data_labels[key].onchange() - except Exception as e: - errors.display(e, f"changing setting {key} to {value}") - setattr(self, key, oldval) - return False - - return True - - def get_default(self, key): - """returns the default value for the key""" - - data_label = self.data_labels.get(key) - if data_label is None: - return None - - return data_label.default - - def save(self, filename): - assert not cmd_opts.freeze_settings, "saving settings is disabled" - - with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) - - def same_type(self, x, y): - if x is None or y is None: - return True - - type_x = self.typemap.get(type(x), type(x)) - type_y = self.typemap.get(type(y), type(y)) - - return type_x == type_y - - def load(self, filename): - with open(filename, "r", encoding="utf8") as file: - self.data = json.load(file) - - # 1.6.0 VAE defaults - if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: - self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') - - # 1.1.1 quicksettings list migration - if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: - self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] - - # 1.4.0 ui_reorder - if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: - self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] - - bad_settings = 0 - for k, v in self.data.items(): - info = self.data_labels.get(k, None) - if info is not None and not self.same_type(info.default, v): - print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) - bad_settings += 1 - - if bad_settings > 0: - print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) - - def onchange(self, key, func, call=True): - item = self.data_labels.get(key) - item.onchange = func - - if call: - func() - - def dumpjson(self): - d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} - d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} - d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} - return json.dumps(d) - - def add_option(self, key, info): - self.data_labels[key] = info - - def reorder(self): - """reorder settings so that all items related to section always go together""" - - section_ids = {} - settings_items = self.data_labels.items() - for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) - - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) - - def cast_value(self, key, value): - """casts an arbitrary to the same type as this setting's value with key - Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) - """ - - if value is None: - return None - - default_value = self.data_labels[key].default - if default_value is None: - default_value = getattr(self, key, None) - if default_value is None: - return None - - expected_type = type(default_value) - if expected_type == bool and value == "False": - value = False - else: - value = expected_type(value) - - return value +demo = None +device = None -opts = Options() -if os.path.exists(config_filename): - opts.load(config_filename) +weight_load_location = None +xformers_available = False -class Shared(sys.modules[__name__].__class__): - """ - this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than - at program startup. - """ +hypernetworks = {} - sd_model_val = None +loaded_hypernetworks = [] - @property - def sd_model(self): - import modules.sd_models +state = None - return modules.sd_models.model_data.get_sd_model() +prompt_styles = None - @sd_model.setter - def sd_model(self, value): - import modules.sd_models +interrogator = None - modules.sd_models.model_data.set_sd_model(value) +face_restorers = [] +options_templates = None +opts = None -sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead -sys.modules[__name__].__class__ = Shared +sd_model: LatentDiffusion = None settings_components = None """assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" +tab_names = [] + latent_upscale_default_mode = "Latent" latent_upscale_modes = { "Latent": {"mode": "bilinear", "antialias": False}, @@ -856,121 +64,24 @@ progress_print_out = sys.stdout gradio_theme = gr.themes.Base() +total_tqdm = None -def reload_gradio_theme(theme_name=None): - global gradio_theme - if not theme_name: - theme_name = opts.gradio_theme - - default_theme_args = dict( - font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) - - if theme_name == "Default": - gradio_theme = gr.themes.Default(**default_theme_args) - else: - try: - theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') - theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') - if opts.gradio_themes_cache and os.path.exists(theme_cache_path): - gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) - else: - os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) - except Exception as e: - errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default(**default_theme_args) - - -class TotalTQDM: - def __init__(self): - self._tqdm = None - - def reset(self): - self._tqdm = tqdm.tqdm( - desc="Total progress", - total=state.job_count * state.sampling_steps, - position=1, - file=progress_print_out - ) - - def update(self): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.update() - - def updateTotal(self, new_total): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.total = new_total - - def clear(self): - if self._tqdm is not None: - self._tqdm.refresh() - self._tqdm.close() - self._tqdm = None - - -total_tqdm = TotalTQDM() - -mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) -mem_mon.start() - - -def natural_sort_key(s, regex=re.compile('([0-9]+)')): - return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] - - -def listfiles(dirname): - filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] - return [file for file in filenames if os.path.isfile(file)] - - -def html_path(filename): - return os.path.join(script_path, "html", filename) - - -def html(filename): - path = html_path(filename) - - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - - return "" - - -def walk_files(path, allowed_extensions=None): - if not os.path.exists(path): - return - - if allowed_extensions is not None: - allowed_extensions = set(allowed_extensions) - - items = list(os.walk(path, followlinks=True)) - items = sorted(items, key=lambda x: natural_sort_key(x[0])) - - for root, _, files in items: - for filename in sorted(files, key=natural_sort_key): - if allowed_extensions is not None: - _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: - continue - - if not opts.list_hidden_files and ("/." in root or "\\." in root): - continue +mem_mon = None - yield os.path.join(root, filename) +options_section = options.options_section +OptionInfo = options.OptionInfo +OptionHTML = options.OptionHTML +natural_sort_key = util.natural_sort_key +listfiles = util.listfiles +html_path = util.html_path +html = util.html +walk_files = util.walk_files +ldm_print = util.ldm_print -def ldm_print(*args, **kwargs): - if opts.hide_ldm_prints: - return +reload_gradio_theme = shared_gradio_themes.reload_gradio_theme - print(*args, **kwargs) +list_checkpoint_tiles = shared_items.list_checkpoint_tiles +refresh_checkpoints = shared_items.refresh_checkpoints +list_samplers = shared_items.list_samplers +reload_hypernetworks = shared_items.reload_hypernetworks diff --git a/modules/shared_cmd_options.py b/modules/shared_cmd_options.py new file mode 100644 index 00000000..af24938b --- /dev/null +++ b/modules/shared_cmd_options.py @@ -0,0 +1,18 @@ +import os + +import launch +from modules import cmd_args, script_loading +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 + +parser = cmd_args.parser + +script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) +script_loading.preload_extensions(extensions_builtin_dir, parser) + +if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: + cmd_opts = parser.parse_args() +else: + cmd_opts, _ = parser.parse_known_args() + + +cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access diff --git a/modules/shared_gradio_themes.py b/modules/shared_gradio_themes.py new file mode 100644 index 00000000..ad1f2212 --- /dev/null +++ b/modules/shared_gradio_themes.py @@ -0,0 +1,66 @@ +import os + +import gradio as gr + +from modules import errors, shared +from modules.paths_internal import script_path + + +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/base", + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/Lime", + "abidlabs/pakistan", + "Ama434/neutral-barlow", + "dawood/microsoft_windows", + "finlaymacklon/smooth_slate", + "Franklisi/darkmode", + "freddyaboulton/dracula_revamped", + "freddyaboulton/test-blue", + "gstaff/xkcd", + "Insuz/Mocha", + "Insuz/SimpleIndigo", + "JohnSmith9982/small_and_pretty", + "nota-ai/theme", + "nuttea/Softblue", + "ParityError/Anime", + "reilnuud/polite", + "remilia/Ghostly", + "rottenlittlecreature/Moon_Goblin", + "step-3-profit/Midnight-Deep", + "Taithrah/Minimal", + "ysharma/huggingface", + "ysharma/steampunk" +] + + +def reload_gradio_theme(theme_name=None): + if not theme_name: + theme_name = shared.opts.gradio_theme + + default_theme_args = dict( + font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) + + if theme_name == "Default": + shared.gradio_theme = gr.themes.Default(**default_theme_args) + else: + try: + theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') + theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') + if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path): + shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) + else: + os.makedirs(theme_cache_dir, exist_ok=True) + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + gradio_theme.dump(theme_cache_path) + except Exception as e: + errors.display(e, "changing gradio theme") + shared.gradio_theme = gr.themes.Default(**default_theme_args) diff --git a/modules/shared_init.py b/modules/shared_init.py new file mode 100644 index 00000000..e7fc18d2 --- /dev/null +++ b/modules/shared_init.py @@ -0,0 +1,51 @@ +import os + +import torch + +from modules import shared +from modules.shared import cmd_opts + +import sys +sys.setrecursionlimit(1000) + + +def initialize(): + """Initializes fields inside the shared module in a controlled manner. + + Should be called early because some other modules you can import mingt need these fields to be already set. + """ + + os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) + + from modules import options, shared_options + shared.options_templates = shared_options.options_templates + shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts) + if os.path.exists(shared.config_filename): + shared.opts.load(shared.config_filename) + + from modules import devices + devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ + (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) + + devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 + devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 + + shared.device = devices.device + shared.weight_load_location = None if cmd_opts.lowram else "cpu" + + from modules import shared_state + shared.state = shared_state.State() + + from modules import styles + shared.prompt_styles = styles.StyleDatabase(shared.styles_filename) + + from modules import interrogate + shared.interrogator = interrogate.InterrogateModels("interrogate") + + from modules import shared_total_tqdm + shared.total_tqdm = shared_total_tqdm.TotalTQDM() + + from modules import memmon, devices + shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts) + shared.mem_mon.start() + diff --git a/modules/shared_items.py b/modules/shared_items.py index 89792e88..e4ec40a8 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -1,3 +1,6 @@ +import sys + +from modules.shared_cmd_options import cmd_opts def realesrgan_models_names(): @@ -41,6 +44,28 @@ def refresh_unet_list(): modules.sd_unet.list_unets() +def list_checkpoint_tiles(): + import modules.sd_models + return modules.sd_models.checkpoint_tiles() + + +def refresh_checkpoints(): + import modules.sd_models + return modules.sd_models.list_models() + + +def list_samplers(): + import modules.sd_samplers + return modules.sd_samplers.all_samplers + + +def reload_hypernetworks(): + from modules.hypernetworks import hypernetwork + from modules import shared + + shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) + + ui_reorder_categories_builtin_items = [ "inpaint", "sampler", @@ -67,3 +92,27 @@ def ui_reorder_categories(): yield from sections yield "scripts" + + +class Shared(sys.modules[__name__].__class__): + """ + this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than + at program startup. + """ + + sd_model_val = None + + @property + def sd_model(self): + import modules.sd_models + + return modules.sd_models.model_data.get_sd_model() + + @sd_model.setter + def sd_model(self, value): + import modules.sd_models + + modules.sd_models.model_data.set_sd_model(value) + + +sys.modules['modules.shared'].__class__ = Shared diff --git a/modules/shared_options.py b/modules/shared_options.py index e9b980a4..7468bc81 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -1,40 +1,12 @@ -import datetime -import json -import os -import re -import sys -import threading -import time -import logging - import gradio as gr -import torch -import tqdm - -import launch -import modules.interrogate -import modules.memmon -import modules.styles -import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 -from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 -from ldm.models.diffusion.ddpm import LatentDiffusion -from typing import Optional - -log = logging.getLogger(__name__) - -demo = None - -parser = cmd_args.parser -script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) -script_loading.preload_extensions(extensions_builtin_dir, parser) - -if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: - cmd_opts = parser.parse_args() -else: - cmd_opts, _ = parser.parse_known_args() +from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 +from modules.shared_cmd_options import cmd_opts +from modules.options import options_section, OptionInfo, OptionHTML +options_templates = {} +hide_dirs = shared.hide_dirs restricted_opts = { "samples_filename_pattern", @@ -49,302 +21,6 @@ restricted_opts = { "outdir_init_images" } -# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json -gradio_hf_hub_themes = [ - "gradio/base", - "gradio/glass", - "gradio/monochrome", - "gradio/seafoam", - "gradio/soft", - "gradio/dracula_test", - "abidlabs/dracula_test", - "abidlabs/Lime", - "abidlabs/pakistan", - "Ama434/neutral-barlow", - "dawood/microsoft_windows", - "finlaymacklon/smooth_slate", - "Franklisi/darkmode", - "freddyaboulton/dracula_revamped", - "freddyaboulton/test-blue", - "gstaff/xkcd", - "Insuz/Mocha", - "Insuz/SimpleIndigo", - "JohnSmith9982/small_and_pretty", - "nota-ai/theme", - "nuttea/Softblue", - "ParityError/Anime", - "reilnuud/polite", - "remilia/Ghostly", - "rottenlittlecreature/Moon_Goblin", - "step-3-profit/Midnight-Deep", - "Taithrah/Minimal", - "ysharma/huggingface", - "ysharma/steampunk" -] - - -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access - -devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ - (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) - -devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 -devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 - -device = devices.device -weight_load_location = None if cmd_opts.lowram else "cpu" - -batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) -parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -xformers_available = False -config_filename = cmd_opts.ui_settings_file - -os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = {} -loaded_hypernetworks = [] - - -def reload_hypernetworks(): - from modules.hypernetworks import hypernetwork - global hypernetworks - - hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) - - -class State: - skipped = False - interrupted = False - job = "" - job_no = 0 - job_count = 0 - processing_has_refined_job_count = False - job_timestamp = '0' - sampling_step = 0 - sampling_steps = 0 - current_latent = None - current_image = None - current_image_sampling_step = 0 - id_live_preview = 0 - textinfo = None - time_start = None - server_start = None - _server_command_signal = threading.Event() - _server_command: Optional[str] = None - - @property - def need_restart(self) -> bool: - # Compatibility getter for need_restart. - return self.server_command == "restart" - - @need_restart.setter - def need_restart(self, value: bool) -> None: - # Compatibility setter for need_restart. - if value: - self.server_command = "restart" - - @property - def server_command(self): - return self._server_command - - @server_command.setter - def server_command(self, value: Optional[str]) -> None: - """ - Set the server command to `value` and signal that it's been set. - """ - self._server_command = value - self._server_command_signal.set() - - def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: - """ - Wait for server command to get set; return and clear the value and signal. - """ - if self._server_command_signal.wait(timeout): - self._server_command_signal.clear() - req = self._server_command - self._server_command = None - return req - return None - - def request_restart(self) -> None: - self.interrupt() - self.server_command = "restart" - log.info("Received restart request") - - def skip(self): - self.skipped = True - log.info("Received skip request") - - def interrupt(self): - self.interrupted = True - log.info("Received interrupt request") - - def nextjob(self): - if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: - self.do_set_current_image() - - self.job_no += 1 - self.sampling_step = 0 - self.current_image_sampling_step = 0 - - def dict(self): - obj = { - "skipped": self.skipped, - "interrupted": self.interrupted, - "job": self.job, - "job_count": self.job_count, - "job_timestamp": self.job_timestamp, - "job_no": self.job_no, - "sampling_step": self.sampling_step, - "sampling_steps": self.sampling_steps, - } - - return obj - - def begin(self, job: str = "(unknown)"): - self.sampling_step = 0 - self.job_count = -1 - self.processing_has_refined_job_count = False - self.job_no = 0 - self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - self.current_latent = None - self.current_image = None - self.current_image_sampling_step = 0 - self.id_live_preview = 0 - self.skipped = False - self.interrupted = False - self.textinfo = None - self.time_start = time.time() - self.job = job - devices.torch_gc() - log.info("Starting job %s", job) - - def end(self): - duration = time.time() - self.time_start - log.info("Ending job %s (%.2f seconds)", self.job, duration) - self.job = "" - self.job_count = 0 - - devices.torch_gc() - - def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" - if not parallel_processing_allowed: - return - - if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: - self.do_set_current_image() - - def do_set_current_image(self): - if self.current_latent is None: - return - - import modules.sd_samplers - - try: - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - - self.current_image_sampling_step = self.sampling_step - - except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. - # we silently ignore this error - errors.record_exception() - - def assign_current_image(self, image): - self.current_image = image - self.id_live_preview += 1 - - -state = State() -state.server_start = time.time() - -styles_filename = cmd_opts.styles_file -prompt_styles = modules.styles.StyleDatabase(styles_filename) - -interrogator = modules.interrogate.InterrogateModels("interrogate") - -face_restorers = [] - - -class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange - self.section = section - self.refresh = refresh - self.do_not_save = False - - self.comment_before = comment_before - """HTML text that will be added after label in UI""" - - self.comment_after = comment_after - """HTML text that will be added before label in UI""" - - def link(self, label, url): - self.comment_before += f"[{label}]" - return self - - def js(self, label, js_func): - self.comment_before += f"[{label}]" - return self - - def info(self, info): - self.comment_after += f"({info})" - return self - - def html(self, html): - self.comment_after += html - return self - - def needs_restart(self): - self.comment_after += " (requires restart)" - return self - - def needs_reload_ui(self): - self.comment_after += " (requires Reload UI)" - return self - - -class OptionHTML(OptionInfo): - def __init__(self, text): - super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) - - self.do_not_save = True - - -def options_section(section_identifier, options_dict): - for v in options_dict.values(): - v.section = section_identifier - - return options_dict - - -def list_checkpoint_tiles(): - import modules.sd_models - return modules.sd_models.checkpoint_tiles() - - -def refresh_checkpoints(): - import modules.sd_models - return modules.sd_models.list_models() - - -def list_samplers(): - import modules.sd_samplers - return modules.sd_samplers.all_samplers - - -hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -tab_names = [] - -options_templates = {} - options_templates.update(options_section(('saving-images', "Saving images/grids"), { "samples_save": OptionInfo(True, "Always save all generated images"), "samples_format": OptionInfo('png', 'File format for images'), @@ -412,11 +88,11 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), + "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}), })) options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), + "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), })) @@ -450,7 +126,7 @@ options_templates.update(options_section(('training', "Training"), { })) options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints), "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), @@ -526,7 +202,7 @@ options_templates.update(options_section(('interrogate', "Interrogate"), { "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), - "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), + "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": interrogate.category_types()}, refresh=interrogate.category_types), "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), @@ -546,12 +222,12 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), @@ -568,9 +244,9 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), + "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), + "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), @@ -605,7 +281,7 @@ options_templates.update(options_section(('ui', "Live previews"), { })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), + "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(), "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), @@ -638,339 +314,3 @@ options_templates.update(options_section((None, "Hidden options"), { "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) - -options_templates.update() - - -class Options: - data = None - data_labels = options_templates - typemap = {int: float} - - def __init__(self): - self.data = {k: v.default for k, v in self.data_labels.items()} - - def __setattr__(self, key, value): - if self.data is not None: - if key in self.data or key in self.data_labels: - assert not cmd_opts.freeze_settings, "changing settings is disabled" - - info = opts.data_labels.get(key, None) - if info.do_not_save: - return - - comp_args = info.component_args if info else None - if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - if cmd_opts.hide_ui_dir_config and key in restricted_opts: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - self.data[key] = value - return - - return super(Options, self).__setattr__(key, value) - - def __getattr__(self, item): - if self.data is not None: - if item in self.data: - return self.data[item] - - if item in self.data_labels: - return self.data_labels[item].default - - return super(Options, self).__getattribute__(item) - - def set(self, key, value): - """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" - - oldval = self.data.get(key, None) - if oldval == value: - return False - - if self.data_labels[key].do_not_save: - return False - - try: - setattr(self, key, value) - except RuntimeError: - return False - - if self.data_labels[key].onchange is not None: - try: - self.data_labels[key].onchange() - except Exception as e: - errors.display(e, f"changing setting {key} to {value}") - setattr(self, key, oldval) - return False - - return True - - def get_default(self, key): - """returns the default value for the key""" - - data_label = self.data_labels.get(key) - if data_label is None: - return None - - return data_label.default - - def save(self, filename): - assert not cmd_opts.freeze_settings, "saving settings is disabled" - - with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) - - def same_type(self, x, y): - if x is None or y is None: - return True - - type_x = self.typemap.get(type(x), type(x)) - type_y = self.typemap.get(type(y), type(y)) - - return type_x == type_y - - def load(self, filename): - with open(filename, "r", encoding="utf8") as file: - self.data = json.load(file) - - # 1.6.0 VAE defaults - if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: - self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') - - # 1.1.1 quicksettings list migration - if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: - self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] - - # 1.4.0 ui_reorder - if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: - self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] - - bad_settings = 0 - for k, v in self.data.items(): - info = self.data_labels.get(k, None) - if info is not None and not self.same_type(info.default, v): - print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) - bad_settings += 1 - - if bad_settings > 0: - print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) - - def onchange(self, key, func, call=True): - item = self.data_labels.get(key) - item.onchange = func - - if call: - func() - - def dumpjson(self): - d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} - d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} - d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} - return json.dumps(d) - - def add_option(self, key, info): - self.data_labels[key] = info - - def reorder(self): - """reorder settings so that all items related to section always go together""" - - section_ids = {} - settings_items = self.data_labels.items() - for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) - - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) - - def cast_value(self, key, value): - """casts an arbitrary to the same type as this setting's value with key - Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) - """ - - if value is None: - return None - - default_value = self.data_labels[key].default - if default_value is None: - default_value = getattr(self, key, None) - if default_value is None: - return None - - expected_type = type(default_value) - if expected_type == bool and value == "False": - value = False - else: - value = expected_type(value) - - return value - - -opts = Options() -if os.path.exists(config_filename): - opts.load(config_filename) - - -class Shared(sys.modules[__name__].__class__): - """ - this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than - at program startup. - """ - - sd_model_val = None - - @property - def sd_model(self): - import modules.sd_models - - return modules.sd_models.model_data.get_sd_model() - - @sd_model.setter - def sd_model(self, value): - import modules.sd_models - - modules.sd_models.model_data.set_sd_model(value) - - -sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead -sys.modules[__name__].__class__ = Shared - -settings_components = None -"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" - -latent_upscale_default_mode = "Latent" -latent_upscale_modes = { - "Latent": {"mode": "bilinear", "antialias": False}, - "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, - "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, - "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, - "Latent (nearest)": {"mode": "nearest", "antialias": False}, - "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, -} - -sd_upscalers = [] - -clip_model = None - -progress_print_out = sys.stdout - -gradio_theme = gr.themes.Base() - - -def reload_gradio_theme(theme_name=None): - global gradio_theme - if not theme_name: - theme_name = opts.gradio_theme - - default_theme_args = dict( - font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) - - if theme_name == "Default": - gradio_theme = gr.themes.Default(**default_theme_args) - else: - try: - theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') - theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') - if opts.gradio_themes_cache and os.path.exists(theme_cache_path): - gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) - else: - os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) - except Exception as e: - errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default(**default_theme_args) - - -class TotalTQDM: - def __init__(self): - self._tqdm = None - - def reset(self): - self._tqdm = tqdm.tqdm( - desc="Total progress", - total=state.job_count * state.sampling_steps, - position=1, - file=progress_print_out - ) - - def update(self): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.update() - - def updateTotal(self, new_total): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.total = new_total - - def clear(self): - if self._tqdm is not None: - self._tqdm.refresh() - self._tqdm.close() - self._tqdm = None - - -total_tqdm = TotalTQDM() - -mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) -mem_mon.start() - - -def natural_sort_key(s, regex=re.compile('([0-9]+)')): - return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] - - -def listfiles(dirname): - filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] - return [file for file in filenames if os.path.isfile(file)] - - -def html_path(filename): - return os.path.join(script_path, "html", filename) - - -def html(filename): - path = html_path(filename) - - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - - return "" - - -def walk_files(path, allowed_extensions=None): - if not os.path.exists(path): - return - - if allowed_extensions is not None: - allowed_extensions = set(allowed_extensions) - - items = list(os.walk(path, followlinks=True)) - items = sorted(items, key=lambda x: natural_sort_key(x[0])) - - for root, _, files in items: - for filename in sorted(files, key=natural_sort_key): - if allowed_extensions is not None: - _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: - continue - - if not opts.list_hidden_files and ("/." in root or "\\." in root): - continue - - yield os.path.join(root, filename) - - -def ldm_print(*args, **kwargs): - if opts.hide_ldm_prints: - return - - print(*args, **kwargs) diff --git a/modules/shared_state.py b/modules/shared_state.py new file mode 100644 index 00000000..3dc9c788 --- /dev/null +++ b/modules/shared_state.py @@ -0,0 +1,159 @@ +import datetime +import logging +import threading +import time + +from modules import errors, shared, devices +from typing import Optional + +log = logging.getLogger(__name__) + + +class State: + skipped = False + interrupted = False + job = "" + job_no = 0 + job_count = 0 + processing_has_refined_job_count = False + job_timestamp = '0' + sampling_step = 0 + sampling_steps = 0 + current_latent = None + current_image = None + current_image_sampling_step = 0 + id_live_preview = 0 + textinfo = None + time_start = None + server_start = None + _server_command_signal = threading.Event() + _server_command: Optional[str] = None + + def __init__(self): + self.server_start = time.time() + + @property + def need_restart(self) -> bool: + # Compatibility getter for need_restart. + return self.server_command == "restart" + + @need_restart.setter + def need_restart(self, value: bool) -> None: + # Compatibility setter for need_restart. + if value: + self.server_command = "restart" + + @property + def server_command(self): + return self._server_command + + @server_command.setter + def server_command(self, value: Optional[str]) -> None: + """ + Set the server command to `value` and signal that it's been set. + """ + self._server_command = value + self._server_command_signal.set() + + def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: + """ + Wait for server command to get set; return and clear the value and signal. + """ + if self._server_command_signal.wait(timeout): + self._server_command_signal.clear() + req = self._server_command + self._server_command = None + return req + return None + + def request_restart(self) -> None: + self.interrupt() + self.server_command = "restart" + log.info("Received restart request") + + def skip(self): + self.skipped = True + log.info("Received skip request") + + def interrupt(self): + self.interrupted = True + log.info("Received interrupt request") + + def nextjob(self): + if shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps == -1: + self.do_set_current_image() + + self.job_no += 1 + self.sampling_step = 0 + self.current_image_sampling_step = 0 + + def dict(self): + obj = { + "skipped": self.skipped, + "interrupted": self.interrupted, + "job": self.job, + "job_count": self.job_count, + "job_timestamp": self.job_timestamp, + "job_no": self.job_no, + "sampling_step": self.sampling_step, + "sampling_steps": self.sampling_steps, + } + + return obj + + def begin(self, job: str = "(unknown)"): + self.sampling_step = 0 + self.job_count = -1 + self.processing_has_refined_job_count = False + self.job_no = 0 + self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + self.current_latent = None + self.current_image = None + self.current_image_sampling_step = 0 + self.id_live_preview = 0 + self.skipped = False + self.interrupted = False + self.textinfo = None + self.time_start = time.time() + self.job = job + devices.torch_gc() + log.info("Starting job %s", job) + + def end(self): + duration = time.time() - self.time_start + log.info("Ending job %s (%.2f seconds)", self.job, duration) + self.job = "" + self.job_count = 0 + + devices.torch_gc() + + def set_current_image(self): + """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" + if not shared.parallel_processing_allowed: + return + + if self.sampling_step - self.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps != -1: + self.do_set_current_image() + + def do_set_current_image(self): + if self.current_latent is None: + return + + import modules.sd_samplers + + try: + if shared.opts.show_progress_grid: + self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) + else: + self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) + + self.current_image_sampling_step = self.sampling_step + + except Exception: + # when switching models during genration, VAE would be on CPU, so creating an image will fail. + # we silently ignore this error + errors.record_exception() + + def assign_current_image(self, image): + self.current_image = image + self.id_live_preview += 1 diff --git a/modules/shared_total_tqdm.py b/modules/shared_total_tqdm.py new file mode 100644 index 00000000..cf82e104 --- /dev/null +++ b/modules/shared_total_tqdm.py @@ -0,0 +1,37 @@ +import tqdm + +from modules import shared + + +class TotalTQDM: + def __init__(self): + self._tqdm = None + + def reset(self): + self._tqdm = tqdm.tqdm( + desc="Total progress", + total=shared.state.job_count * shared.state.sampling_steps, + position=1, + file=shared.progress_print_out + ) + + def update(self): + if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.update() + + def updateTotal(self, new_total): + if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.total = new_total + + def clear(self): + if self._tqdm is not None: + self._tqdm.refresh() + self._tqdm.close() + self._tqdm = None + diff --git a/modules/sysinfo.py b/modules/sysinfo.py index cf24c6dd..7d906e1f 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -10,7 +10,7 @@ import psutil import re import launch -from modules import paths_internal, timer +from modules import paths_internal, timer, shared, extensions, errors checksum_token = "DontStealMyGamePlz__WINNERS_DONT_USE_DRUGS__DONT_COPY_THAT_FLOPPY" environment_whitelist = { @@ -115,8 +115,6 @@ def format_exception(e, tb): def get_exceptions(): try: - from modules import errors - return list(reversed(errors.exception_records)) except Exception as e: return str(e) @@ -142,8 +140,6 @@ def get_torch_sysinfo(): def get_extensions(*, enabled): try: - from modules import extensions - def to_json(x: extensions.Extension): return { "name": x.name, @@ -160,7 +156,6 @@ def get_extensions(*, enabled): def get_config(): try: - from modules import shared return shared.opts.data except Exception as e: return str(e) diff --git a/modules/ui.py b/modules/ui.py index e3753e97..30b80417 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, devices, ui_extra_networks from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -91,8 +91,6 @@ def send_gradio_gallery_to_image(x): def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - if not enable: return "" @@ -630,7 +628,6 @@ def create_ui(): toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) - from modules import ui_extra_networks extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img') ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) @@ -995,7 +992,6 @@ def create_ui(): paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None, )) - from modules import ui_extra_networks extra_networks_ui_img2img = ui_extra_networks.create_ui(img2img_interface, [img2img_generation_tab], 'img2img') ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) diff --git a/modules/ui_common.py b/modules/ui_common.py index 303af9cd..99d19ff0 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -11,7 +11,7 @@ from modules import call_queue, shared from modules.generation_parameters_copypaste import image_from_url_text import modules.images from modules.ui_components import ToolButton - +import modules.generation_parameters_copypaste as parameters_copypaste folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 @@ -105,8 +105,6 @@ def save_files(js_data, images, do_make_zip, index): def create_output_panel(tabname, outdir): - from modules import shared - import modules.generation_parameters_copypaste as parameters_copypaste def open_folder(f): if not os.path.exists(f): diff --git a/modules/util.py b/modules/util.py new file mode 100644 index 00000000..60afc067 --- /dev/null +++ b/modules/util.py @@ -0,0 +1,58 @@ +import os +import re + +from modules import shared +from modules.paths_internal import script_path + + +def natural_sort_key(s, regex=re.compile('([0-9]+)')): + return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] + + +def listfiles(dirname): + filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] + return [file for file in filenames if os.path.isfile(file)] + + +def html_path(filename): + return os.path.join(script_path, "html", filename) + + +def html(filename): + path = html_path(filename) + + if os.path.exists(path): + with open(path, encoding="utf8") as file: + return file.read() + + return "" + + +def walk_files(path, allowed_extensions=None): + if not os.path.exists(path): + return + + if allowed_extensions is not None: + allowed_extensions = set(allowed_extensions) + + items = list(os.walk(path, followlinks=True)) + items = sorted(items, key=lambda x: natural_sort_key(x[0])) + + for root, _, files in items: + for filename in sorted(files, key=natural_sort_key): + if allowed_extensions is not None: + _, ext = os.path.splitext(filename) + if ext not in allowed_extensions: + continue + + if not shared.opts.list_hidden_files and ("/." in root or "\\." in root): + continue + + yield os.path.join(root, filename) + + +def ldm_print(*args, **kwargs): + if shared.opts.hide_ldm_prints: + return + + print(*args, **kwargs) diff --git a/webui.py b/webui.py index 6d36f880..0f1ace97 100644 --- a/webui.py +++ b/webui.py @@ -43,12 +43,15 @@ startup_timer.record("import torch") import gradio # noqa: F401 startup_timer.record("import gradio") -from modules import paths, timer, import_hook, errors, devices # noqa: F401 +from modules import paths, timer, import_hook, errors # noqa: F401 startup_timer.record("setup paths") import ldm.modules.encoders.modules # noqa: F401 startup_timer.record("import ldm") +from modules import shared_init, shared, shared_items +shared_init.initialize() +startup_timer.record("initialize shared") from modules import extra_networks from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401 @@ -58,8 +61,6 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared - if not shared.cmd_opts.skip_version_check: errors.check_versions() @@ -82,7 +83,7 @@ import modules.textual_inversion.textual_inversion import modules.progress import modules.ui -from modules import modelloader +from modules import modelloader, devices from modules.shared import cmd_opts import modules.hypernetworks.hypernetwork @@ -297,7 +298,7 @@ def initialize_rest(*, reload_script_modules=False): Thread(target=load_model).start() - shared.reload_hypernetworks() + shared_items.reload_hypernetworks() startup_timer.record("reload hypernetworks") ui_extra_networks.initialize() -- cgit v1.2.3 From ff1bfd01ba33ee03b3eb99b1669bd29a3a6658ca Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Wed, 9 Aug 2023 14:41:25 -0400 Subject: Remove up down symbol --- modules/ui.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 30b80417..4e1daa8d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -78,7 +78,6 @@ extra_networks_symbol = '\U0001F3B4' # 🎴 switch_values_symbol = '\U000021C5' # ⇅ restore_progress_symbol = '\U0001F300' # 🌀 detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ plaintext_to_html = ui_common.plaintext_to_html -- cgit v1.2.3 From 9199b6b7ebe96cdf09571ba874a103e8ed8c90ef Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 11:20:46 +0300 Subject: add a custom UI element that combines accordion and checkbox rework hires fix UI to use accordion prevent bogus progress output in console when calculating hires fix dimensions --- javascript/inputAccordion.js | 37 +++++++++++++++++++++ modules/processing.py | 77 +++++++++++++++++++++++--------------------- modules/ui.py | 23 ++++--------- modules/ui_components.py | 31 ++++++++++++++++++ style.css | 15 +++++---- 5 files changed, 124 insertions(+), 59 deletions(-) create mode 100644 javascript/inputAccordion.js (limited to 'modules/ui.py') diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js new file mode 100644 index 00000000..a5eef229 --- /dev/null +++ b/javascript/inputAccordion.js @@ -0,0 +1,37 @@ +var observerAccordionOpen = new MutationObserver(function(mutations) { + mutations.forEach(function(mutationRecord) { + var elem = mutationRecord.target; + var open = elem.classList.contains('open'); + + var accordion = elem.parentNode; + accordion.classList.toggle('input-accordion-open', open); + + var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); + checkbox.checked = open; + updateInput(checkbox); + + extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + if(extra){ + extra.style.display = open ? "" : "none"; + } + }); +}); + +function inputAccordionChecked(id, checked){ + var label = gradioApp().querySelector('#' + id + " .label-wrap"); + if(label.classList.contains('open') != checked){ + label.click(); + } +} + +onUiLoaded(function() { + for (var accordion of gradioApp().querySelectorAll('.input-accordion')) { + var labelWrap = accordion.querySelector('.label-wrap'); + observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); + + var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + if(extra){ + labelWrap.insertBefore(extra, labelWrap.lastElementChild) + } + } +}); diff --git a/modules/processing.py b/modules/processing.py index 6961b7b1..7819644c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -924,6 +924,45 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_c = None self.hr_uc = None + def calculate_target_resolution(self): + if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): + self.hr_resize_x = self.width + self.hr_resize_y = self.height + self.hr_upscale_to_x = self.width + self.hr_upscale_to_y = self.height + + self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height) + self.applied_old_hires_behavior_to = (self.width, self.height) + + if self.hr_resize_x == 0 and self.hr_resize_y == 0: + self.extra_generation_params["Hires upscale"] = self.hr_scale + self.hr_upscale_to_x = int(self.width * self.hr_scale) + self.hr_upscale_to_y = int(self.height * self.hr_scale) + else: + self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}" + + if self.hr_resize_y == 0: + self.hr_upscale_to_x = self.hr_resize_x + self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width + elif self.hr_resize_x == 0: + self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height + self.hr_upscale_to_y = self.hr_resize_y + else: + target_w = self.hr_resize_x + target_h = self.hr_resize_y + src_ratio = self.width / self.height + dst_ratio = self.hr_resize_x / self.hr_resize_y + + if src_ratio < dst_ratio: + self.hr_upscale_to_x = self.hr_resize_x + self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width + else: + self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height + self.hr_upscale_to_y = self.hr_resize_y + + self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f + self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f + def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: if self.hr_checkpoint_name: @@ -948,43 +987,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): raise Exception(f"could not find upscaler named {self.hr_upscaler}") - if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): - self.hr_resize_x = self.width - self.hr_resize_y = self.height - self.hr_upscale_to_x = self.width - self.hr_upscale_to_y = self.height - - self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height) - self.applied_old_hires_behavior_to = (self.width, self.height) - - if self.hr_resize_x == 0 and self.hr_resize_y == 0: - self.extra_generation_params["Hires upscale"] = self.hr_scale - self.hr_upscale_to_x = int(self.width * self.hr_scale) - self.hr_upscale_to_y = int(self.height * self.hr_scale) - else: - self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}" - - if self.hr_resize_y == 0: - self.hr_upscale_to_x = self.hr_resize_x - self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width - elif self.hr_resize_x == 0: - self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height - self.hr_upscale_to_y = self.hr_resize_y - else: - target_w = self.hr_resize_x - target_h = self.hr_resize_y - src_ratio = self.width / self.height - dst_ratio = self.hr_resize_x / self.hr_resize_y - - if src_ratio < dst_ratio: - self.hr_upscale_to_x = self.hr_resize_x - self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width - else: - self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height - self.hr_upscale_to_y = self.hr_resize_y - - self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f - self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f + self.calculate_target_resolution() if not state.processing_has_refined_job_count: if state.job_count == -1: diff --git a/modules/ui.py b/modules/ui.py index 4e1daa8d..cbad3afe 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,7 +14,7 @@ from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_grad from modules import gradio_extensons # noqa: F401 from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, devices, ui_extra_networks -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion from modules.paths import script_path from modules.ui_common import create_refresh_button from modules.ui_gradio_extensions import reload_javascript @@ -94,11 +94,9 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz return "" p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + p.calculate_target_resolution() - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" + return f"from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" def resize_from_to_html(width, height, scale_by): @@ -436,11 +434,12 @@ def create_ui(): with FormRow(elem_classes="checkboxes-row", variant="compact"): restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with InputAccordion(False, label="Hires. fix") as enable_hr: + with enable_hr.extra(): + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") @@ -568,13 +567,6 @@ def create_ui(): show_progress=False, ) - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - txt2img_paste_fields = [ (toprow.prompt, "Prompt"), (toprow.negative_prompt, "Negative prompt"), @@ -594,7 +586,6 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), (hr_second_pass_steps, "Hires steps"), diff --git a/modules/ui_components.py b/modules/ui_components.py index 8f8a7088..598ce738 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -72,3 +72,34 @@ class DropdownEditable(FormComponent, gr.Dropdown): def get_block_name(self): return "dropdown" + +class InputAccordion(gr.Checkbox): + global_index = 0 + + def __init__(self, value, **kwargs): + self.accordion_id = kwargs.get('elem_id') + if self.accordion_id is None: + self.accordion_id = f"input-accordion-{self.global_index}" + self.global_index += 1 + + kwargs['elem_id'] = self.accordion_id + "-checkbox" + kwargs['visible'] = False + super().__init__(value, **kwargs) + + self.change(fn=None, _js='function(checked){ inputAccordionChecked("' + self.accordion_id + '", checked); }', inputs=[self]) + + self.accordion = gr.Accordion(kwargs.get('label', 'Accordion'), open=value, elem_id=self.accordion_id, elem_classes=['input-accordion']) + + def extra(self): + return gr.Column(elem_id=self.accordion_id + '-extra', elem_classes='input-accordion-extra', min_width=0) + + def __enter__(self): + self.accordion.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.accordion.__exit__(exc_type, exc_val, exc_tb) + + def get_block_name(self): + return "checkbox" + diff --git a/style.css b/style.css index dfd5ca36..5163e53c 100644 --- a/style.css +++ b/style.css @@ -329,12 +329,6 @@ div.gradio-accordion { border-radius: 0 0.5rem 0.5rem 0; } -#txtimg_hr_finalres{ - min-height: 0 !important; - padding: .625rem .75rem; - margin-left: -0.75em -} - #img2img_scale_resolution_preview.block{ display: flex; align-items: end; @@ -1016,3 +1010,12 @@ div.block.gradio-box.popup-dialog, .popup-dialog { div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-child{ margin-top: 1em; } + +div.block.input-accordion{ + margin-bottom: 0.4em; +} + +.input-accordion-extra{ + flex: 0 0 auto !important; + margin: 0 0.5em 0 auto; +} -- cgit v1.2.3 From 33446acf47a8c3e0c0964782189562df3c4bcf4f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 12:41:41 +0300 Subject: face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back --- modules/generation_parameters_copypaste.py | 2 ++ modules/img2img.py | 4 +--- modules/processing.py | 11 +++++++++-- modules/shared_options.py | 2 ++ modules/txt2img.py | 4 +--- modules/ui.py | 12 ++---------- 6 files changed, 17 insertions(+), 18 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index d932c67d..bdff3266 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -343,6 +343,8 @@ infotext_to_setting_name_mapping = [ ('Pad conds', 'pad_cond_uncond'), ('VAE Encoder', 'sd_vae_encode_method'), ('VAE Decoder', 'sd_vae_decode_method'), + ('Tiling', 'tiling'), + ('Face restoration', 'face_restoration'), ] diff --git a/modules/img2img.py b/modules/img2img.py index e06ac1d6..c7bbbac8 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal process_images(p) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -179,8 +179,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s cfg_scale=cfg_scale, width=width, height=height, - restore_faces=restore_faces, - tiling=tiling, init_images=[image], mask=mask, mask_blur=mask_blur, diff --git a/modules/processing.py b/modules/processing.py index 7819644c..68a8f1c6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -111,7 +111,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -564,7 +564,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "CFG scale": p.cfg_scale, "Image CFG scale": getattr(p, 'image_cfg_scale', None), "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], - "Face restoration": (opts.face_restoration_model if p.restore_faces else None), + "Face restoration": opts.face_restoration_model if p.restore_faces else None, "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), @@ -580,6 +580,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" and opts.randn_source != "NV" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + "Tiling": "True" if p.tiling else None, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, "User": p.user if opts.add_user_name_to_info else None, @@ -645,6 +646,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: seed = get_fixed_seed(p.seed) subseed = get_fixed_seed(p.subseed) + if p.restore_faces is None: + p.restore_faces = opts.face_restoration + + if p.tiling is None: + p.tiling = opts.tiling + modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() diff --git a/modules/shared_options.py b/modules/shared_options.py index 7468bc81..f72859d9 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -92,6 +92,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { })) options_templates.update(options_section(('face-restoration', "Face restoration"), { + "face_restoration": OptionInfo(False, "Restore faces").info("will use a third-party model on generation result to reconstruct faces"), "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), @@ -138,6 +139,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), + "tiling": OptionInfo(False, "Tiling").info("produce a tileable picture"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { diff --git a/modules/txt2img.py b/modules/txt2img.py index edad8930..5ea96bba 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -32,8 +32,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step cfg_scale=cfg_scale, width=width, height=height, - restore_faces=restore_faces, - tiling=tiling, enable_hr=enable_hr, denoising_strength=denoising_strength if enable_hr else None, hr_scale=hr_scale, diff --git a/modules/ui.py b/modules/ui.py index cbad3afe..09a826fd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -432,8 +432,7 @@ def create_ui(): elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + pass elif category == "hires_fix": with InputAccordion(False, label="Hires. fix") as enable_hr: @@ -516,8 +515,6 @@ def create_ui(): toprow.ui_styles.dropdown, steps, sampler_name, - restore_faces, - tiling, batch_count, batch_size, cfg_scale, @@ -572,7 +569,6 @@ def create_ui(): (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_name, "Sampler"), - (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (seed, "Seed"), (width, "Size-1"), @@ -792,8 +788,7 @@ def create_ui(): elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + pass elif category == "batch": if not opts.dimensions_and_batch_together: @@ -866,8 +861,6 @@ def create_ui(): mask_blur, mask_alpha, inpainting_fill, - restore_faces, - tiling, batch_count, batch_size, cfg_scale, @@ -959,7 +952,6 @@ def create_ui(): (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_name, "Sampler"), - (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (image_cfg_scale, "Image CFG scale"), (seed, "Seed"), -- cgit v1.2.3 From faca86620d53b122eac00fd0a4ab54b3a2e3108e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 12:58:00 +0300 Subject: linter fixes --- javascript/inputAccordion.js | 12 ++++++------ modules/ui.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'modules/ui.py') diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js index a5eef229..f2839852 100644 --- a/javascript/inputAccordion.js +++ b/javascript/inputAccordion.js @@ -10,16 +10,16 @@ var observerAccordionOpen = new MutationObserver(function(mutations) { checkbox.checked = open; updateInput(checkbox); - extra = gradioApp().querySelector('#' + accordion.id + "-extra"); - if(extra){ + var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + if (extra) { extra.style.display = open ? "" : "none"; } }); }); -function inputAccordionChecked(id, checked){ +function inputAccordionChecked(id, checked) { var label = gradioApp().querySelector('#' + id + " .label-wrap"); - if(label.classList.contains('open') != checked){ + if (label.classList.contains('open') != checked) { label.click(); } } @@ -30,8 +30,8 @@ onUiLoaded(function() { observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); - if(extra){ - labelWrap.insertBefore(extra, labelWrap.lastElementChild) + if (extra) { + labelWrap.insertBefore(extra, labelWrap.lastElementChild); } } }); diff --git a/modules/ui.py b/modules/ui.py index 09a826fd..b87e95a6 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, devices, ui_extra_networks +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion from modules.paths import script_path from modules.ui_common import create_refresh_button -- cgit v1.2.3 From 237b704172480561a7616f3b695ffc2bd215957f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 10 Aug 2023 21:42:26 +0900 Subject: use new style constructor --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index b87e95a6..587d2bd8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -628,7 +628,7 @@ def create_ui(): extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs") extra_tabs.__enter__() - with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow().style(equal_height=False): + with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): copy_image_buttons = [] copy_image_destinations = {} -- cgit v1.2.3 From 1b3093fe3aedb20aa8d505ceeea7900ac592e6fe Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 15:58:53 +0300 Subject: fix --use-textbox-seed --- modules/processing.py | 12 ++++++++++-- modules/ui.py | 6 +++++- modules/ui_loadsave.py | 5 ++++- 3 files changed, 19 insertions(+), 4 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/processing.py b/modules/processing.py index 68a8f1c6..f06c374a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -521,7 +521,15 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): def get_fixed_seed(seed): - if seed is None or seed == '' or seed == -1: + if seed == '' or seed is None: + seed = -1 + elif isinstance(seed, str): + try: + seed = int(seed) + except Exception: + seed = -1 + + if seed == -1: return int(random.randrange(4294967294)) return seed @@ -728,7 +736,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: # strength, which is saved as "Model Strength: 1.0" in the infotext if n == 0: with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: - processed = Processed(p, [], p.seed, "") + processed = Processed(p, []) file.write(processed.infotext(p, 0)) p.setup_conds() diff --git a/modules/ui.py b/modules/ui.py index b87e95a6..e7433cbd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -144,7 +144,11 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + if cmd_opts.use_textbox_seed: + seed = gr.Textbox(label='Seed', value="", elem_id=f"{target_interface}_seed") + else: + seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index 0052a5cc..99d763e1 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -8,7 +8,7 @@ from modules.ui_components import ToolButton class UiLoadsave: - """allows saving and restorig default values for gradio components""" + """allows saving and restoring default values for gradio components""" def __init__(self, filename): self.filename = filename @@ -48,6 +48,9 @@ class UiLoadsave: elif condition and not condition(saved_value): pass else: + if isinstance(x, gr.Textbox) and field == 'value': # due to an undersirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies + saved_value = str(saved_value) + setattr(obj, field, saved_value) if init_field is not None: init_field(saved_value) -- cgit v1.2.3 From 4f6582cb668d4f4beea64f1c7baae721c85cfc69 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 16:10:42 +0300 Subject: add precision=0 to gr.Number seed --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index e7433cbd..c08f412d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -147,7 +147,7 @@ def create_seed_inputs(target_interface): if cmd_opts.use_textbox_seed: seed = gr.Textbox(label='Seed', value="", elem_id=f"{target_interface}_seed") else: - seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed", precision=0) random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') @@ -159,7 +159,7 @@ def create_seed_inputs(target_interface): with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed", precision=0) random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") -- cgit v1.2.3 From 64311faa6848d641cc452115e4e1eb47d2a7b519 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 12:39:59 +0300 Subject: put refiner into main UI, into the new accordions section add VAE from main model into infotext, not from refiner model option to make scripts UI without gr.Group fix inconsistencies with refiner when usings samplers that do more denoising than steps --- modules/processing.py | 22 ++++++++----- modules/processing_scripts/refiner.py | 55 +++++++++++++++++++++++++++++++++ modules/scripts.py | 24 ++++++++++----- modules/sd_models.py | 3 ++ modules/sd_samplers_cfg_denoiser.py | 6 +++- modules/sd_samplers_common.py | 40 ++++++++++++++---------- modules/sd_samplers_kdiffusion.py | 3 +- modules/shared_items.py | 4 +-- modules/shared_options.py | 2 -- modules/ui.py | 58 ++++++++++++++++++++--------------- modules/ui_components.py | 18 ++++++++--- style.css | 32 +++++++++++-------- 12 files changed, 188 insertions(+), 79 deletions(-) create mode 100644 modules/processing_scripts/refiner.py (limited to 'modules/ui.py') diff --git a/modules/processing.py b/modules/processing.py index 131c4c3c..5996cbac 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -373,9 +373,10 @@ class StableDiffusionProcessing: negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True) sampler_config = sd_samplers.find_sampler_config(self.sampler_name) - self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 - self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data) - self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data) + total_steps = sampler_config.total_steps(self.steps) if sampler_config else self.steps + self.step_multiplier = total_steps // self.steps + self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data) + self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data) def get_conds(self): return self.c, self.uc @@ -579,8 +580,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), - "VAE hash": sd_vae.get_loaded_vae_hash() if opts.add_model_hash_to_info else None, - "VAE": sd_vae.get_loaded_vae_name() if opts.add_model_name_to_info else None, + "VAE hash": p.loaded_vae_hash if opts.add_model_hash_to_info else None, + "VAE": p.loaded_vae_name if opts.add_model_name_to_info else None, "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), @@ -669,6 +670,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.tiling is None: p.tiling = opts.tiling + p.loaded_vae_name = sd_vae.get_loaded_vae_name() + p.loaded_vae_hash = sd_vae.get_loaded_vae_hash() + modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() @@ -1188,8 +1192,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): hr_prompts = prompt_parser.SdConditioning(self.hr_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y) hr_negative_prompts = prompt_parser.SdConditioning(self.hr_negative_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y, is_negative_prompt=True) - self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) - self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) + sampler_config = sd_samplers.find_sampler_config(self.hr_sampler_name or self.sampler_name) + steps = self.hr_second_pass_steps or self.steps + total_steps = sampler_config.total_steps(steps) if sampler_config else steps + + self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, total_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) + self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, total_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) def setup_conds(self): super().setup_conds() diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py new file mode 100644 index 00000000..5a82991a --- /dev/null +++ b/modules/processing_scripts/refiner.py @@ -0,0 +1,55 @@ +import gradio as gr + +from modules import scripts, sd_models +from modules.ui_common import create_refresh_button +from modules.ui_components import InputAccordion + + +class ScriptRefiner(scripts.Script): + section = "accordions" + create_group = False + + def __init__(self): + pass + + def title(self): + return "Refiner" + + def show(self, is_img2img): + return scripts.AlwaysVisible + + def ui(self, is_img2img): + with InputAccordion(False, label="Refiner", elem_id=self.elem_id("enable")) as enable_refiner: + with gr.Row(): + refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation") + create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh")) + + refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation") + + def lookup_checkpoint(title): + info = sd_models.get_closet_checkpoint_match(title) + return None if info is None else info.title + + self.infotext_fields = [ + (enable_refiner, lambda d: 'Refiner' in d), + (refiner_checkpoint, lambda d: lookup_checkpoint(d.get('Refiner'))), + (refiner_switch_at, 'Refiner switch at'), + ] + + return enable_refiner, refiner_checkpoint, refiner_switch_at + + def before_process(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at): + # the actual implementation is in sd_samplers_common.py, apply_refiner + + p.refiner_checkpoint_info = None + p.refiner_switch_at = None + + if not enable_refiner or refiner_checkpoint in (None, "", "None"): + return + + refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(refiner_checkpoint) + if refiner_checkpoint_info is None: + raise Exception(f'Could not find checkpoint with name {refiner_checkpoint}') + + p.refiner_checkpoint_info = refiner_checkpoint_info + p.refiner_switch_at = refiner_switch_at diff --git a/modules/scripts.py b/modules/scripts.py index f7d060aa..51da732a 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -37,7 +37,10 @@ class Script: is_img2img = False group = None - """A gr.Group component that has all script's UI inside it""" + """A gr.Group component that has all script's UI inside it.""" + + create_group = True + """If False, for alwayson scripts, a group component will not be created.""" infotext_fields = None """if set in ui(), this is a list of pairs of gradio component + text; the text will be used when @@ -232,6 +235,7 @@ class Script: """ pass + current_basedir = paths.script_path @@ -250,7 +254,7 @@ postprocessing_scripts_data = [] ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"]) -def list_scripts(scriptdirname, extension): +def list_scripts(scriptdirname, extension, *, include_extensions=True): scripts_list = [] basedir = os.path.join(paths.script_path, scriptdirname) @@ -258,8 +262,9 @@ def list_scripts(scriptdirname, extension): for filename in sorted(os.listdir(basedir)): scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename))) - for ext in extensions.active(): - scripts_list += ext.list_files(scriptdirname, extension) + if include_extensions: + for ext in extensions.active(): + scripts_list += ext.list_files(scriptdirname, extension) scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] @@ -288,7 +293,7 @@ def load_scripts(): postprocessing_scripts_data.clear() script_callbacks.clear_callbacks() - scripts_list = list_scripts("scripts", ".py") + scripts_list = list_scripts("scripts", ".py") + list_scripts("modules/processing_scripts", ".py", include_extensions=False) syspath = sys.path @@ -429,10 +434,13 @@ class ScriptRunner: if script.alwayson and script.section != section: continue - with gr.Group(visible=script.alwayson) as group: - self.create_script_ui(script) + if script.create_group: + with gr.Group(visible=script.alwayson) as group: + self.create_script_ui(script) - script.group = group + script.group = group + else: + self.create_script_ui(script) def prepare_ui(self): self.inputs = [None] diff --git a/modules/sd_models.py b/modules/sd_models.py index a178adca..f6fbdcd6 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -147,6 +147,9 @@ re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$") def get_closet_checkpoint_match(search_string): + if not search_string: + return None + checkpoint_info = checkpoint_aliases.get(search_string, None) if checkpoint_info is not None: return checkpoint_info diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index a532e013..113425b2 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -45,6 +45,11 @@ class CFGDenoiser(torch.nn.Module): self.nmask = None self.init_latent = None self.steps = None + """number of steps as specified by user in UI""" + + self.total_steps = None + """expected number of calls to denoiser calculated from self.steps and specifics of the selected sampler""" + self.step = 0 self.image_cfg_scale = None self.padded_cond_uncond = False @@ -56,7 +61,6 @@ class CFGDenoiser(torch.nn.Module): def inner_model(self): raise NotImplementedError() - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): denoised_uncond = x_out[-uncond.shape[0]:] denoised = torch.clone(denoised_uncond) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 35c4d657..85f3c7e0 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -7,7 +7,16 @@ from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, s from modules.shared import opts, state import k_diffusion.sampling -SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) + +SamplerDataTuple = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) + + +class SamplerData(SamplerDataTuple): + def total_steps(self, steps): + if self.options.get("second_order", False): + steps = steps * 2 + + return steps def setup_img2img_steps(p, steps=None): @@ -131,31 +140,26 @@ def replace_torchsde_browinan(): replace_torchsde_browinan() -def apply_refiner(sampler): - completed_ratio = sampler.step / sampler.steps +def apply_refiner(cfg_denoiser): + completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps + refiner_switch_at = cfg_denoiser.p.refiner_switch_at + refiner_checkpoint_info = cfg_denoiser.p.refiner_checkpoint_info - if completed_ratio <= shared.opts.sd_refiner_switch_at: + if refiner_switch_at is not None and completed_ratio <= refiner_switch_at: return False - if shared.opts.sd_refiner_checkpoint == "None": + if refiner_checkpoint_info is None or shared.sd_model.sd_checkpoint_info == refiner_checkpoint_info: return False - if shared.sd_model.sd_checkpoint_info.title == shared.opts.sd_refiner_checkpoint: - return False - - refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(shared.opts.sd_refiner_checkpoint) - if refiner_checkpoint_info is None: - raise Exception(f'Could not find checkpoint with name {shared.opts.sd_refiner_checkpoint}') - - sampler.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title - sampler.p.extra_generation_params['Refiner switch at'] = shared.opts.sd_refiner_switch_at + cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title + cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=refiner_checkpoint_info) devices.torch_gc() - sampler.p.setup_conds() - sampler.update_inner_model() + cfg_denoiser.p.setup_conds() + cfg_denoiser.update_inner_model() return True @@ -192,7 +196,7 @@ class Sampler: self.sampler_noises = None self.stop_at = None self.eta = None - self.config = None # set by the function calling the constructor + self.config: SamplerData = None # set by the function calling the constructor self.last_latent = None self.s_min_uncond = None self.s_churn = 0.0 @@ -208,6 +212,7 @@ class Sampler: self.p = None self.model_wrap_cfg = None self.sampler_extra_args = None + self.options = {} def callback_state(self, d): step = d['i'] @@ -220,6 +225,7 @@ class Sampler: def launch_sampling(self, steps, func): self.model_wrap_cfg.steps = steps + self.model_wrap_cfg.total_steps = self.config.total_steps(steps) state.sampling_steps = steps state.sampling_step = 0 diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d10fe12e..1f8e9c4b 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -64,9 +64,10 @@ class CFGDenoiserKDiffusion(sd_samplers_cfg_denoiser.CFGDenoiser): class KDiffusionSampler(sd_samplers_common.Sampler): - def __init__(self, funcname, sd_model): + def __init__(self, funcname, sd_model, options=None): super().__init__(funcname) + self.options = options or {} self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) self.model_wrap_cfg = CFGDenoiserKDiffusion(self) diff --git a/modules/shared_items.py b/modules/shared_items.py index e4ec40a8..754166d2 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -69,8 +69,8 @@ def reload_hypernetworks(): ui_reorder_categories_builtin_items = [ "inpaint", "sampler", + "accordions", "checkboxes", - "hires_fix", "dimensions", "cfg", "seed", @@ -86,7 +86,7 @@ def ui_reorder_categories(): sections = {} for script in scripts.scripts_txt2img.scripts + scripts.scripts_img2img.scripts: - if isinstance(script.section, str): + if isinstance(script.section, str) and script.section not in ui_reorder_categories_builtin_items: sections[script.section] = 1 yield from sections diff --git a/modules/shared_options.py b/modules/shared_options.py index 1e5b64ea..9ae51f18 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -140,8 +140,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), "tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"), - "sd_refiner_checkpoint": OptionInfo("None", "Refiner checkpoint", gr.Dropdown, lambda: {"choices": ["None"] + shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints, infotext="Refiner").info("switch to another model in the middle of generation"), - "sd_refiner_switch_at": OptionInfo(1.0, "Refiner switch at", gr.Slider, {"minimum": 0.01, "maximum": 1.0, "step": 0.01}, infotext='Refiner switch at').info("fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { diff --git a/modules/ui.py b/modules/ui.py index 05292734..3321b94d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -438,35 +438,38 @@ def create_ui(): with FormRow(elem_classes="checkboxes-row", variant="compact"): pass - elif category == "hires_fix": - with InputAccordion(False, label="Hires. fix") as enable_hr: - with enable_hr.extra(): - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) + elif category == "accordions": + with gr.Row(elem_id="txt2img_accordions", elem_classes="accordions"): + with InputAccordion(False, label="Hires. fix", elem_id="txt2img_hr") as enable_hr: + with enable_hr.extra(): + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") - create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") + hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") + create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") - hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") + hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + + scripts.scripts_txt2img.setup_ui_for_section(category) elif category == "batch": if not opts.dimensions_and_batch_together: @@ -482,7 +485,7 @@ def create_ui(): with FormGroup(elem_id="txt2img_script_container"): custom_inputs = scripts.scripts_txt2img.setup_ui() - else: + if category not in {"accordions"}: scripts.scripts_txt2img.setup_ui_for_section(category) hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] @@ -794,6 +797,10 @@ def create_ui(): with FormRow(elem_classes="checkboxes-row", variant="compact"): pass + elif category == "accordions": + with gr.Row(elem_id="img2img_accordions", elem_classes="accordions"): + scripts.scripts_img2img.setup_ui_for_section(category) + elif category == "batch": if not opts.dimensions_and_batch_together: with FormRow(elem_id="img2img_column_batch"): @@ -836,7 +843,8 @@ def create_ui(): inputs=[], outputs=[inpaint_controls, mask_alpha], ) - else: + + if category not in {"accordions"}: scripts.scripts_img2img.setup_ui_for_section(category) img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) diff --git a/modules/ui_components.py b/modules/ui_components.py index bfe2fbd9..d08b2b99 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -87,13 +87,23 @@ class InputAccordion(gr.Checkbox): self.accordion_id = f"input-accordion-{InputAccordion.global_index}" InputAccordion.global_index += 1 - kwargs['elem_id'] = self.accordion_id + "-checkbox" - kwargs['visible'] = False - super().__init__(value, **kwargs) + kwargs_checkbox = { + **kwargs, + "elem_id": f"{self.accordion_id}-checkbox", + "visible": False, + } + super().__init__(value, **kwargs_checkbox) self.change(fn=None, _js='function(checked){ inputAccordionChecked("' + self.accordion_id + '", checked); }', inputs=[self]) - self.accordion = gr.Accordion(kwargs.get('label', 'Accordion'), open=value, elem_id=self.accordion_id, elem_classes=['input-accordion']) + kwargs_accordion = { + **kwargs, + "elem_id": self.accordion_id, + "label": kwargs.get('label', 'Accordion'), + "elem_classes": ['input-accordion'], + "open": value, + } + self.accordion = gr.Accordion(**kwargs_accordion) def extra(self): """Allows you to put something into the label of the accordion. diff --git a/style.css b/style.css index 4cdce87c..260b1056 100644 --- a/style.css +++ b/style.css @@ -166,16 +166,6 @@ a{ color: var(--button-secondary-text-color-hover); } -.checkboxes-row{ - margin-bottom: 0.5em; - margin-left: 0em; -} -.checkboxes-row > div{ - flex: 0; - white-space: nowrap; - min-width: auto !important; -} - button.custom-button{ border-radius: var(--button-large-radius); padding: var(--button-large-padding); @@ -352,7 +342,7 @@ div.block.gradio-accordion { } div.dimensions-tools{ - min-width: 0 !important; + min-width: 1.6em !important; max-width: fit-content; flex-direction: column; place-content: center; @@ -1012,10 +1002,28 @@ div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-chi } div.block.input-accordion{ - margin-bottom: 0.4em; + } .input-accordion-extra{ flex: 0 0 auto !important; margin: 0 0.5em 0 auto; } + +div.accordions > div.input-accordion{ + min-width: fit-content !important; +} + +div.accordions > div.gradio-accordion .label-wrap span{ + white-space: nowrap; + margin-right: 0.25em; +} + +div.accordions{ + gap: 0.5em; +} + +div.accordions > div.input-accordion.input-accordion-open{ + flex: 1 auto; +} + -- cgit v1.2.3 From f0b72b81211881e083c84cff585380bb70d17271 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 17:46:13 +0300 Subject: move seed, variation seed and variation seed strength to a single row, dump resize seed from UI add a way for scripts to register a callback for before/after just a single component's creation --- modules/img2img.py | 8 +-- modules/processing_scripts/seed.py | 95 +++++++++++++++++++++++++++++ modules/scripts.py | 77 +++++++++++++++++++++++- modules/shared_items.py | 1 + modules/txt2img.py | 8 +-- modules/ui.py | 119 ++++--------------------------------- style.css | 9 ++- 7 files changed, 191 insertions(+), 126 deletions(-) create mode 100644 modules/processing_scripts/seed.py (limited to 'modules/ui.py') diff --git a/modules/img2img.py b/modules/img2img.py index c7bbbac8..ac9fd3f8 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal process_images(p) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -166,12 +166,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s prompt=prompt, negative_prompt=negative_prompt, styles=prompt_styles, - seed=seed, - subseed=subseed, - subseed_strength=subseed_strength, - seed_resize_from_h=seed_resize_from_h, - seed_resize_from_w=seed_resize_from_w, - seed_enable_extras=seed_enable_extras, sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py new file mode 100644 index 00000000..e0911bbe --- /dev/null +++ b/modules/processing_scripts/seed.py @@ -0,0 +1,95 @@ +import json + +import gradio as gr + +from modules import scripts, ui, errors +from modules.shared import cmd_opts +from modules.ui_components import ToolButton + + +class ScriptSeed(scripts.ScriptBuiltin): + section = "seed" + create_group = False + + def __init__(self): + self.seed = None + self.reuse_seed = None + self.reuse_subseed = None + + def title(self): + return "Seed" + + def show(self, is_img2img): + return scripts.AlwaysVisible + + def ui(self, is_img2img): + with gr.Row(elem_id=self.elem_id("seed_row")): + if cmd_opts.use_textbox_seed: + self.seed = gr.Textbox(label='Seed', value="", elem_id=self.elem_id("seed")) + else: + self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), precision=0) + + random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), label='Random seed') + reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), label='Reuse seed') + + subseed = gr.Number(label='Variation seed', value=-1, elem_id=self.elem_id("subseed"), precision=0) + + random_subseed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_subseed")) + reuse_subseed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_subseed")) + + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=self.elem_id("subseed_strength")) + + random_seed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("seed") + "')}", show_progress=False, inputs=[], outputs=[]) + random_subseed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("subseed") + "')}", show_progress=False, inputs=[], outputs=[]) + + self.infotext_fields = [ + (self.seed, "Seed"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + ] + + self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_seed, x.component, False), elem_id=f'generation_info_{self.tabname}') + self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_subseed, x.component, True), elem_id=f'generation_info_{self.tabname}') + + return self.seed, subseed, subseed_strength + + def before_process(self, p, seed, subseed, subseed_strength): + p.seed = seed + + if subseed_strength > 0: + p.subseed = subseed + p.subseed_strength = subseed_strength + + +def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, is_subseed): + """ Connects a 'reuse (sub)seed' button's click event so that it copies last used + (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength + was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" + + def copy_seed(gen_info_string: str, index): + res = -1 + + try: + gen_info = json.loads(gen_info_string) + index -= gen_info.get('index_of_first_image', 0) + + if is_subseed and gen_info.get('subseed_strength', 0) > 0: + all_subseeds = gen_info.get('all_subseeds', [-1]) + res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] + else: + all_seeds = gen_info.get('all_seeds', [-1]) + res = all_seeds[index if 0 <= index < len(all_seeds) else 0] + + except json.decoder.JSONDecodeError: + if gen_info_string: + errors.report(f"Error parsing JSON generation info: {gen_info_string}") + + return [res, gr.update()] + + reuse_seed.click( + fn=copy_seed, + _js="(x, y) => [x, selected_gallery_index()]", + show_progress=False, + inputs=[generation_info, seed], + outputs=[seed, seed] + ) diff --git a/modules/scripts.py b/modules/scripts.py index 51da732a..66fbec0d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -3,6 +3,7 @@ import re import sys import inspect from collections import namedtuple +from dataclasses import dataclass import gradio as gr @@ -21,6 +22,11 @@ class PostprocessBatchListArgs: self.images = images +@dataclass +class OnComponent: + component: gr.blocks.Block + + class Script: name = None """script's internal name derived from title""" @@ -35,6 +41,7 @@ class Script: is_txt2img = False is_img2img = False + tabname = None group = None """A gr.Group component that has all script's UI inside it.""" @@ -55,6 +62,12 @@ class Script: api_info = None """Generated value of type modules.api.models.ScriptInfo with information about the script for API""" + on_before_component_elem_id = [] + """list of callbacks to be called before a component with an elem_id is created""" + + on_after_component_elem_id = [] + """list of callbacks to be called after a component with an elem_id is created""" + def title(self): """this function should return the title of the script. This is what will be displayed in the dropdown menu.""" @@ -215,6 +228,24 @@ class Script: pass + def on_before_component(self, callback, *, elem_id): + """ + Calls callback before a component is created. The callback function is called with a single argument of type OnComponent. + + This function is an alternative to before_component in that it also cllows to run before a component is created, but + it doesn't require to be called for every created component - just for the one you need. + """ + + self.on_before_component_elem_id.append((elem_id, callback)) + + def on_after_component(self, callback, *, elem_id): + """ + Calls callback after a component is created. The callback function is called with a single argument of type OnComponent. + """ + + self.on_after_component_elem_id.append((elem_id, callback)) + + def describe(self): """unused""" return "" @@ -236,6 +267,17 @@ class Script: pass +class ScriptBuiltin(Script): + + def elem_id(self, item_id): + """helper function to generate id for a HTML element, constructs final id out of tab and user-supplied item_id""" + + need_tabname = self.show(True) == self.show(False) + tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else "" + + return f'{tabname}{item_id}' + + current_basedir = paths.script_path @@ -354,10 +396,17 @@ class ScriptRunner: self.selectable_scripts = [] self.alwayson_scripts = [] self.titles = [] + self.title_map = {} self.infotext_fields = [] self.paste_field_names = [] self.inputs = [None] + self.on_before_component_elem_id = {} + """dict of callbacks to be called before an element is created; key=elem_id, value=list of callbacks""" + + self.on_after_component_elem_id = {} + """dict of callbacks to be called after an element is created; key=elem_id, value=list of callbacks""" + def initialize_scripts(self, is_img2img): from modules import scripts_auto_postprocessing @@ -372,6 +421,7 @@ class ScriptRunner: script.filename = script_data.path script.is_txt2img = not is_img2img script.is_img2img = is_img2img + script.tabname = "img2img" if is_img2img else "txt2img" visibility = script.show(script.is_img2img) @@ -446,6 +496,8 @@ class ScriptRunner: self.inputs = [None] def setup_ui(self): + all_titles = [wrap_call(script.title, script.filename, "title") or script.filename for script in self.scripts] + self.title_map = {title.lower(): script for title, script in zip(all_titles, self.scripts)} self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.selectable_scripts] self.setup_ui_for_section(None) @@ -492,6 +544,13 @@ class ScriptRunner: self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None')))) self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts]) + for script in self.scripts: + for elem_id, callback in script.on_before_component_elem_id: + self.on_before_component_elem_id.get(elem_id, []).append((callback, script)) + + for elem_id, callback in script.on_after_component_elem_id: + self.on_after_component_elem_id.get(elem_id, []).append((callback, script)) + return self.inputs def run(self, p, *args): @@ -585,6 +644,13 @@ class ScriptRunner: errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True) def before_component(self, component, **kwargs): + for callbacks in self.on_before_component_elem_id.get(kwargs.get("elem_id"), []): + for callback, script in callbacks: + try: + callback(OnComponent(component=component)) + except Exception: + errors.report(f"Error running on_before_component: {script.filename}", exc_info=True) + for script in self.scripts: try: script.before_component(component, **kwargs) @@ -592,12 +658,22 @@ class ScriptRunner: errors.report(f"Error running before_component: {script.filename}", exc_info=True) def after_component(self, component, **kwargs): + for callbacks in self.on_after_component_elem_id.get(component.elem_id, []): + for callback, script in callbacks: + try: + callback(OnComponent(component=component)) + except Exception: + errors.report(f"Error running on_after_component: {script.filename}", exc_info=True) + for script in self.scripts: try: script.after_component(component, **kwargs) except Exception: errors.report(f"Error running after_component: {script.filename}", exc_info=True) + def script(self, title): + return self.title_map.get(title.lower()) + def reload_sources(self, cache): for si, script in list(enumerate(self.scripts)): args_from = script.args_from @@ -616,7 +692,6 @@ class ScriptRunner: self.scripts[si].args_from = args_from self.scripts[si].args_to = args_to - def before_hr(self, p): for script in self.alwayson_scripts: try: diff --git a/modules/shared_items.py b/modules/shared_items.py index 754166d2..84d69c8d 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -73,6 +73,7 @@ ui_reorder_categories_builtin_items = [ "checkboxes", "dimensions", "cfg", + "denoising", "seed", "batch", "override_settings", diff --git a/modules/txt2img.py b/modules/txt2img.py index 5ea96bba..1ee592ad 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -19,12 +19,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step prompt=prompt, styles=prompt_styles, negative_prompt=negative_prompt, - seed=seed, - subseed=subseed, - subseed_strength=subseed_strength, - seed_resize_from_h=seed_resize_from_h, - seed_resize_from_w=seed_resize_from_w, - seed_enable_extras=seed_enable_extras, sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, diff --git a/modules/ui.py b/modules/ui.py index 3321b94d..a6b1f964 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1,5 +1,4 @@ import datetime -import json import mimetypes import os import sys @@ -13,7 +12,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -142,45 +141,6 @@ def interrogate_deepbooru(image): return gr.update() if prompt is None else prompt -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - if cmd_opts.use_textbox_seed: - seed = gr.Textbox(label='Seed', value="", elem_id=f"{target_interface}_seed") - else: - seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed", precision=0) - - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed", precision=0) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - def connect_clear_prompt(button): """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" button.click( @@ -191,39 +151,6 @@ def connect_clear_prompt(button): ) -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - def update_token_counter(text, steps): try: text, _ = extra_networks.parse_prompt(text) @@ -429,10 +356,8 @@ def create_ui(): batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + with gr.Row(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): @@ -509,9 +434,6 @@ def create_ui(): txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - txt2img_args = dict( fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), _js="submit", @@ -525,8 +447,6 @@ def create_ui(): batch_count, batch_size, cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, height, width, enable_hr, @@ -577,15 +497,9 @@ def create_ui(): (steps, "Steps"), (sampler_name, "Sampler"), (cfg_scale, "CFG scale"), - (seed, "Seed"), (width, "Size-1"), (height, "Size-2"), (batch_size, "Batch size"), - (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)), @@ -613,7 +527,7 @@ def create_ui(): steps, sampler_name, cfg_scale, - seed, + scripts.scripts_txt2img.script('Seed').seed, width, height, ] @@ -783,15 +697,13 @@ def create_ui(): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + elif category == "denoising": + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + elif category == "cfg": + with gr.Row(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): @@ -849,9 +761,6 @@ def create_ui(): img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - img2img_args = dict( fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), _js="submit_img2img", @@ -878,8 +787,6 @@ def create_ui(): cfg_scale, image_cfg_scale, denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, selected_scale_tab, height, width, @@ -966,15 +873,9 @@ def create_ui(): (sampler_name, "Sampler"), (cfg_scale, "CFG scale"), (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), (width, "Size-1"), (height, "Size-2"), (batch_size, "Batch size"), - (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), diff --git a/style.css b/style.css index f936e9a0..92093b89 100644 --- a/style.css +++ b/style.css @@ -222,14 +222,18 @@ div.block.gradio-accordion { padding: 0.1em 0.75em; } +[id$=_seed], [id$=_subseed]{ + max-width: 10em; +} + [id$=_subseed_show]{ min-width: auto !important; flex-grow: 0 !important; display: flex; } -[id$=_subseed_show] label{ - margin-bottom: 0.5em; +[id$=_subseed_show] .label-wrap{ + margin: 0 0 0 0.5em; align-self: end; } @@ -1028,5 +1032,6 @@ div.accordions{ div.accordions > div.input-accordion.input-accordion-open{ flex: 1 auto; + flex-flow: column; } -- cgit v1.2.3 From 0f77139253f5481d62f7c1eddc220355440b2d1f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 15 Aug 2023 14:24:55 -0400 Subject: Fix inpaint upload for alpha masks, create reusable function --- modules/img2img.py | 2 +- modules/processing.py | 10 +++++++++- modules/ui.py | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/img2img.py b/modules/img2img.py index ac9fd3f8..328cb0e9 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -129,7 +129,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s mask = None elif mode == 2: # inpaint image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] - mask = mask.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0) + mask = processing.create_binary_mask(mask) image = image.convert("RGB") elif mode == 3: # inpaint sketch image = inpaint_color_sketch diff --git a/modules/processing.py b/modules/processing.py index 1d098302..e62db62f 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -81,6 +81,12 @@ def apply_overlay(image, paste_loc, index, overlays): return image +def create_binary_mask(image): + if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255): + image = image.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0) + else: + image = image.convert('L') + return image def txt2img_image_conditioning(sd_model, x, width, height): if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models @@ -1385,7 +1391,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image_mask = self.image_mask if image_mask is not None: - image_mask = image_mask.convert('L') + # image_mask is passed in as RGBA by Gradio to support alpha masks, + # but we still want to support binary masks. + image_mask = create_binary_mask(image_mask) if self.inpainting_mask_invert: image_mask = ImageOps.invert(image_mask) diff --git a/modules/ui.py b/modules/ui.py index a6b1f964..c98d9849 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -598,7 +598,7 @@ def create_ui(): with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' -- cgit v1.2.3 From 46e8898f654f5567a9704ab18c67be155158373a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 17 Aug 2023 19:35:34 -0400 Subject: Fix img2img background color not being used --- modules/img2img.py | 6 ++---- modules/ui.py | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/img2img.py b/modules/img2img.py index 328cb0e9..1519e132 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -122,15 +122,14 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s is_batch = mode == 5 if mode == 0: # img2img - image = init_img.convert("RGB") + image = init_img mask = None elif mode == 1: # img2img sketch - image = sketch.convert("RGB") + image = sketch mask = None elif mode == 2: # inpaint image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] mask = processing.create_binary_mask(mask) - image = image.convert("RGB") elif mode == 3: # inpaint sketch image = inpaint_color_sketch orig = inpaint_color_sketch_orig or inpaint_color_sketch @@ -139,7 +138,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100) blur = ImageFilter.GaussianBlur(mask_blur) image = Image.composite(image.filter(blur), orig, mask.filter(blur)) - image = image.convert("RGB") elif mode == 4: # inpaint upload mask image = init_img_inpaint mask = init_mask_inpaint diff --git a/modules/ui.py b/modules/ui.py index c98d9849..01f77849 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -575,7 +575,7 @@ def create_ui(): add_copy_image_controls('img2img', init_img) with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) add_copy_image_controls('sketch', sketch) with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: @@ -583,7 +583,7 @@ def create_ui(): add_copy_image_controls('inpaint', init_img_with_mask) with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) inpaint_color_sketch_orig = gr.State(None) add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) -- cgit v1.2.3 From df595ae3135ef12c135f43ef2a0f792708aab4b3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 21 Aug 2023 08:48:22 +0300 Subject: make resize handle available to extensions --- .eslintrc.js | 4 +- .../resize-handle/javascript/resize-handle.js | 106 -------------------- extensions-builtin/resize-handle/style.css | 10 -- javascript/resizeHandle.js | 109 +++++++++++++++++++++ modules/ui.py | 6 +- modules/ui_components.py | 12 +++ style.css | 12 +++ 7 files changed, 139 insertions(+), 120 deletions(-) delete mode 100644 extensions-builtin/resize-handle/javascript/resize-handle.js delete mode 100644 extensions-builtin/resize-handle/style.css create mode 100644 javascript/resizeHandle.js (limited to 'modules/ui.py') diff --git a/.eslintrc.js b/.eslintrc.js index e3b4fb76..4777c276 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -90,6 +90,8 @@ module.exports = { // localStorage.js localSet: "readonly", localGet: "readonly", - localRemove: "readonly" + localRemove: "readonly", + // resizeHandle.js + setupResizeHandle: "writable" } }; diff --git a/extensions-builtin/resize-handle/javascript/resize-handle.js b/extensions-builtin/resize-handle/javascript/resize-handle.js deleted file mode 100644 index a07a01d2..00000000 --- a/extensions-builtin/resize-handle/javascript/resize-handle.js +++ /dev/null @@ -1,106 +0,0 @@ -onUiLoaded(async() => { - const GRADIO_MIN_WIDTH = 320; - const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr'; - const PAD = 16; - const DEBOUNCE_TIME = 100; - - const R = { - tracking: false, - parent: null, - parentWidth: null, - leftCol: null, - leftColStartWidth: null, - screenX: null, - }; - - let resizeTimer; - - const leftCols = [ - gradioApp().querySelector('#txt2img_settings'), - gradioApp().querySelector('#img2img_settings'), - ]; - - function setLeftColGridTemplate(el, width) { - el.style.gridTemplateColumns = `${width}px 16px 1fr`; - } - - function displayResizeHandle(parent) { - if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { - parent.style.display = 'flex'; - if (R.handle != null) { - R.handle.style.opacity = '0'; - } - return false; - } else { - parent.style.display = 'grid'; - if (R.handle != null) { - R.handle.style.opacity = '100'; - } - return true; - } - } - - function setup() { - for (const leftCol of leftCols) { - const parent = leftCol.parentElement; - const rightCol = parent.lastElementChild; - - if (!displayResizeHandle(parent)) { - return; - } - - parent.style.display = 'grid'; - parent.style.gap = '0'; - parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS; - - const resizeHandle = document.createElement('div'); - resizeHandle.classList.add('resize-handle'); - parent.insertBefore(resizeHandle, rightCol); - - resizeHandle.addEventListener('mousedown', (evt) => { - R.tracking = true; - R.parent = parent; - R.parentWidth = parent.offsetWidth; - R.handle = resizeHandle; - R.leftCol = leftCol; - R.leftColStartWidth = leftCol.offsetWidth; - R.screenX = evt.screenX; - }); - } - } - - window.addEventListener('mousemove', (evt) => { - if (R.tracking) { - const delta = R.screenX - evt.screenX; - const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); - setLeftColGridTemplate(R.parent, leftColWidth); - } - }); - - window.addEventListener('mouseup', () => R.tracking = false); - - window.addEventListener('resize', () => { - clearTimeout(resizeTimer); - - resizeTimer = setTimeout(() => { - for (const leftCol of leftCols) { - const parent = leftCol.parentElement; - - if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) { - const oldParentWidth = R.parentWidth; - const newParentWidth = parent.offsetWidth; - const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]); - - const ratio = newParentWidth / oldParentWidth; - - const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH); - setLeftColGridTemplate(parent, newWidthL); - - R.parentWidth = newParentWidth; - } - } - }, DEBOUNCE_TIME); - }); - - setup(); -}); diff --git a/extensions-builtin/resize-handle/style.css b/extensions-builtin/resize-handle/style.css deleted file mode 100644 index 0e18267a..00000000 --- a/extensions-builtin/resize-handle/style.css +++ /dev/null @@ -1,10 +0,0 @@ -.resize-handle{ - cursor: col-resize; - grid-column: 2 / 3; - min-width: 8px !important; - max-width: 8px !important; - height: 100%; - border-left: 1px dashed var(--border-color-primary); - user-select: none; - margin-left: 8px; -} \ No newline at end of file diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js new file mode 100644 index 00000000..71023f9f --- /dev/null +++ b/javascript/resizeHandle.js @@ -0,0 +1,109 @@ +(function() { + const GRADIO_MIN_WIDTH = 320; + const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr'; + const PAD = 16; + const DEBOUNCE_TIME = 100; + + const R = { + tracking: false, + parent: null, + parentWidth: null, + leftCol: null, + leftColStartWidth: null, + screenX: null, + }; + + let resizeTimer; + let parents = []; + + function setLeftColGridTemplate(el, width) { + el.style.gridTemplateColumns = `${width}px 16px 1fr`; + } + + function displayResizeHandle(parent) { + if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { + parent.style.display = 'flex'; + if (R.handle != null) { + R.handle.style.opacity = '0'; + } + return false; + } else { + parent.style.display = 'grid'; + if (R.handle != null) { + R.handle.style.opacity = '100'; + } + return true; + } + } + + function afterResize(parent) { + if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) { + const oldParentWidth = R.parentWidth; + const newParentWidth = parent.offsetWidth; + const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]); + + const ratio = newParentWidth / oldParentWidth; + + const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH); + setLeftColGridTemplate(parent, newWidthL); + + R.parentWidth = newParentWidth; + } + } + + function setup(parent) { + const leftCol = parent.firstElementChild; + const rightCol = parent.lastElementChild; + + parents.push(parent); + + parent.style.display = 'grid'; + parent.style.gap = '0'; + parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS; + + const resizeHandle = document.createElement('div'); + resizeHandle.classList.add('resize-handle'); + parent.insertBefore(resizeHandle, rightCol); + + resizeHandle.addEventListener('mousedown', (evt) => { + R.tracking = true; + R.parent = parent; + R.parentWidth = parent.offsetWidth; + R.handle = resizeHandle; + R.leftCol = leftCol; + R.leftColStartWidth = leftCol.offsetWidth; + R.screenX = evt.screenX; + }); + + afterResize(parent); + } + + window.addEventListener('mousemove', (evt) => { + if (R.tracking) { + const delta = R.screenX - evt.screenX; + const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); + setLeftColGridTemplate(R.parent, leftColWidth); + } + }); + + window.addEventListener('mouseup', () => R.tracking = false); + + + window.addEventListener('resize', () => { + clearTimeout(resizeTimer); + + resizeTimer = setTimeout(function() { + for (const parent of parents) { + afterResize(parent); + } + }, DEBOUNCE_TIME); + }); + + setupResizeHandle = setup; +})(); + +onUiLoaded(function() { + for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) { + setupResizeHandle(elem); + } +}); diff --git a/modules/ui.py b/modules/ui.py index 01f77849..2b6a13cb 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_grad from modules import gradio_extensons # noqa: F401 from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow from modules.paths import script_path from modules.ui_common import create_refresh_button from modules.ui_gradio_extensions import reload_javascript @@ -333,7 +333,7 @@ def create_ui(): extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs") extra_tabs.__enter__() - with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row(equal_height=False): + with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): scripts.scripts_txt2img.prepare_ui() @@ -549,7 +549,7 @@ def create_ui(): extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs") extra_tabs.__enter__() - with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow(equal_height=False): + with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): copy_image_buttons = [] copy_image_destinations = {} diff --git a/modules/ui_components.py b/modules/ui_components.py index d08b2b99..55979f62 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -20,6 +20,18 @@ class ToolButton(FormComponent, gr.Button): return "button" +class ResizeHandleRow(gr.Row): + """Same as gr.Row but fits inside gradio forms""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + self.elem_classes.append("resize-handle-row") + + def get_block_name(self): + return "row" + + class FormRow(FormComponent, gr.Row): """Same as gr.Row but fits inside gradio forms""" diff --git a/style.css b/style.css index 46125864..4166a3df 100644 --- a/style.css +++ b/style.css @@ -1055,3 +1055,15 @@ div.accordions > div.input-accordion.input-accordion-open{ position: sticky; top: 0.5em; } + + +.resize-handle{ + cursor: col-resize; + grid-column: 2 / 3; + min-width: 8px !important; + max-width: 8px !important; + height: 100%; + border-left: 1px dashed var(--border-color-primary); + user-select: none; + margin-left: 8px; +} -- cgit v1.2.3