From 8a7c07a2140c98bceca858087525d77fd0352fda Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 10 Oct 2022 15:39:39 +0800 Subject: show image history --- modules/ui.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 4f18126f..8762fcf5 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -37,6 +37,7 @@ import modules.generation_parameters_copypaste from modules import prompt_parser from modules.images import save_image import modules.textual_inversion.ui +import modules.images_history as img_his # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI mimetypes.init() @@ -499,7 +500,6 @@ def create_ui(wrap_gradio_gpu_call): custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False) with gr.Column(variant='panel'): - with gr.Group(): txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False) txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4) @@ -516,6 +516,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) @@ -607,6 +608,7 @@ def create_ui(wrap_gradio_gpu_call): ] modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt) token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) + with gr.Blocks(analytics_enabled=False) as img2img_interface: img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True) @@ -696,6 +698,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) @@ -1126,8 +1129,10 @@ def create_ui(wrap_gradio_gpu_call): opts.save(shared.config_filename) - return f'{changed} settings changed.', opts.dumpjson() + return f'{changed} settings changed.', opts.dumpjson() + + images_history = img_his.create_history_tabs(gr, opts) with gr.Blocks(analytics_enabled=False) as settings_interface: settings_submit = gr.Button(value="Apply settings", variant='primary') result = gr.HTML() @@ -1206,7 +1211,9 @@ def create_ui(wrap_gradio_gpu_call): (pnginfo_interface, "PNG Info", "pnginfo"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (textual_inversion_interface, "Textual inversion", "ti"), + (images_history, "History", "images_history"), (settings_interface, "Settings", "settings"), + ] with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file: -- cgit v1.2.3 From 23f2989799ee3911d2959cfceb74b921f20c9a51 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 10 Oct 2022 18:33:49 +0800 Subject: images history over --- javascript/images_history.js | 4 +- modules/images_history.py | 141 ++++++++++++++++++++++++++----------------- modules/ui.py | 9 ++- testui.py | 124 ------------------------------------- 4 files changed, 94 insertions(+), 184 deletions(-) delete mode 100644 testui.py (limited to 'modules/ui.py') diff --git a/javascript/images_history.js b/javascript/images_history.js index f30b7eff..93d2b89a 100644 --- a/javascript/images_history.js +++ b/javascript/images_history.js @@ -58,9 +58,9 @@ function set_history_index_from_img(e){ } } } -function images_history_get_current_img(is_image2image){ +function images_history_get_current_img(is_image2image, image_path, files){ head = is_image2image?"img2img":"txt2img" s = $(gradioApp().getElementById(head + '_images_history_set_index')).attr("img_index") - return s + return [s, image_path, files] } diff --git a/modules/images_history.py b/modules/images_history.py index 23d83557..0e0a48f3 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -1,5 +1,6 @@ import os -def get_recent_images(is_img2img, dir_name, page_index, step): +def get_recent_images(dir_name, page_index, step, image_index): + print(image_index) page_index = int(page_index) f_list = os.listdir(dir_name) file_list = [] @@ -8,7 +9,7 @@ def get_recent_images(is_img2img, dir_name, page_index, step): continue file_list.append(file) file_list = sorted(file_list, key=lambda file: -os.path.getctime(os.path.join(dir_name, file))) - num = 24 + num = 48 max_page_index = len(file_list) // num + 1 page_index = max_page_index if page_index == -1 else page_index + step page_index = 1 if page_index < 1 else page_index @@ -16,75 +17,101 @@ def get_recent_images(is_img2img, dir_name, page_index, step): idx_frm = (page_index - 1) * num file_list = file_list[idx_frm:idx_frm + num] print(f"Loading history page {page_index}") - return [os.path.join(dir_name, file) for file in file_list], page_index, file_list -def first_page_click(is_img2img, dir_name): - return get_recent_images(is_img2img, dir_name, 1, 0) -def end_page_click(is_img2img, dir_name): - return get_recent_images(is_img2img, dir_name, -1, 0) -def prev_page_click(is_img2img, dir_name, page_index): - return get_recent_images(is_img2img, dir_name, page_index, -1) -def next_page_click(is_img2img, dir_name, page_index): - return get_recent_images(is_img2img, dir_name, page_index, 1) -def page_index_change(is_img2img, dir_name, page_index): - return get_recent_images(is_img2img, dir_name, page_index, 0) -def show_image_info(num, filenames): - return filenames[int(num)] -def delete_image(is_img2img, dir_name, name, page_index, filenames): + image_index = int(image_index) + if image_index < 0 or image_index > len(file_list) - 1: + current_file = None + hide_image = None + else: + current_file = file_list[int(image_index)] + hide_image = os.path.join(dir_name, current_file) + return [os.path.join(dir_name, file) for file in file_list], page_index, file_list, current_file, hide_image +def first_page_click(dir_name, page_index, image_index): + return get_recent_images(dir_name, 1, 0, image_index) +def end_page_click(dir_name, page_index, image_index): + return get_recent_images(dir_name, -1, 0, image_index) +def prev_page_click(dir_name, page_index, image_index): + return get_recent_images(dir_name, page_index, -1, image_index) +def next_page_click(dir_name, page_index, image_index): + return get_recent_images(dir_name, page_index, 1, image_index) +def page_index_change(dir_name, page_index, image_index): + return get_recent_images(dir_name, page_index, 0, image_index) + +def show_image_info(num, image_path, filenames): + file = filenames[int(num)] + return file, num, os.path.join(image_path, file) +def delete_image(is_img2img, dir_name, name, page_index, filenames, image_index): + print("filename", name) path = os.path.join(dir_name, name) if os.path.exists(path): print(f"Delete file {path}") os.remove(path) - i = 0 - for f in filenames: - if f == name: - break - i += 1 - images, page_index, file_list = get_recent_images(is_img2img, dir_name, page_index, 0) - current_file = file_list[i] if i < len(file_list) else None - return images, page_index, file_list, current_file + images, page_index, file_list, current_file, hide_image = get_recent_images(dir_name, page_index, 0, image_index) + return images, page_index, file_list, current_file, hide_image -def show_images_history(gr, opts, is_img2img): +def show_images_history(gr, opts, is_img2img, run_pnginfo, switch_dict): def id_name(is_img2img, name): return ("img2img" if is_img2img else "txt2img") + "_" + name - with gr.Row(): - if is_img2img: - dir_name = opts.outdir_img2img_samples - else: - dir_name = opts.outdir_txt2img_samples - first_page = gr.Button('First Page', elem_id=id_name(is_img2img,"images_history_first_page")) - prev_page = gr.Button('Prev Page') - page_index = gr.Number(value=1) - next_page = gr.Button('Next Page') - end_page = gr.Button('End Page') - with gr.Row(): - delete = gr.Button('Delete') - Send = gr.Button('Send') - with gr.Row(): - with gr.Column(elem_id=id_name(is_img2img,"images_history")): - history_gallery = gr.Gallery(label="Images history").style(grid=6) - img_file_name = gr.Textbox() - img_file_info = gr.Textbox(dir_name) - img_path = gr.Textbox(dir_name, visible=False) - set_index = gr.Button('set_index', elem_id=id_name(is_img2img,"images_history_set_index")) - is_img2img_flag = gr.Checkbox(is_img2img, visible=False) - filenames = gr.State() - first_page.click(first_page_click, inputs=[is_img2img_flag, img_path], outputs=[history_gallery, page_index, filenames]) - next_page.click(next_page_click, inputs=[is_img2img_flag, img_path, page_index], outputs=[history_gallery, page_index, filenames]) - prev_page.click(prev_page_click, inputs=[is_img2img_flag, img_path, page_index], outputs=[history_gallery, page_index, filenames]) - end_page.click(end_page_click, inputs=[is_img2img_flag, img_path], outputs=[history_gallery, page_index, filenames]) - page_index.submit(page_index_change, inputs=[is_img2img_flag, img_path, page_index], outputs=[history_gallery, page_index, filenames]) - set_index.click(show_image_info, _js="images_history_get_current_img",inputs=[is_img2img_flag, filenames], outputs=img_file_name) - delete.click(delete_image, inputs=[is_img2img_flag, img_path, img_file_name, page_index, filenames], outputs=[history_gallery, page_index, filenames,img_file_name]) + if is_img2img: + dir_name = opts.outdir_img2img_samples + else: + dir_name = opts.outdir_txt2img_samples + with gr.Row(): + first_page = gr.Button('First', elem_id=id_name(is_img2img,"images_history_first_page")) + prev_page = gr.Button('Prev') + page_index = gr.Number(value=1, label="Page Index") + next_page = gr.Button('Next') + end_page = gr.Button('End') + with gr.Row(elem_id=id_name(is_img2img,"images_history")): + with gr.Row(): + with gr.Column(): + history_gallery = gr.Gallery(show_label=False).style(grid=6) + with gr.Column(): + with gr.Row(): + delete = gr.Button('Delete') + pnginfo_send_to_txt2img = gr.Button('Send to txt2img') + pnginfo_send_to_img2img = gr.Button('Send to img2img') + with gr.Row(): + with gr.Column(): + img_file_info = gr.Textbox(dir_name, label="Generate Info") + img_file_name = gr.Textbox(label="File Name") + with gr.Row(): + # hiden items + img_path = gr.Textbox(dir_name, visible=False) + is_img2img_flag = gr.Checkbox(is_img2img, visible=False) + image_index = gr.Textbox(value=-1, visible=False) + set_index = gr.Button('set_index', elem_id=id_name(is_img2img,"images_history_set_index")) + filenames = gr.State() + hide_image = gr.Image(visible=False, type="pil") + info1 = gr.Textbox(visible=False) + info2 = gr.Textbox(visible=False) + + + # turn pages + gallery_inputs = [img_path, page_index, image_index] + gallery_outputs = [history_gallery, page_index, filenames, img_file_name, hide_image] + first_page.click(first_page_click, inputs=gallery_inputs, outputs=gallery_outputs) + next_page.click(next_page_click, inputs=gallery_inputs, outputs=gallery_outputs) + prev_page.click(prev_page_click, inputs=gallery_inputs, outputs=gallery_outputs) + end_page.click(end_page_click, inputs=gallery_inputs, outputs=gallery_outputs) + page_index.submit(page_index_change, inputs=gallery_inputs, outputs=gallery_outputs) #page_index.change(page_index_change, inputs=[is_img2img_flag, img_path, page_index], outputs=[history_gallery, page_index]) + + #other funcitons + set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[is_img2img_flag, img_path, filenames], outputs=[img_file_name, image_index, hide_image]) + delete.click(delete_image, inputs=[is_img2img_flag, img_path, img_file_name, page_index, filenames, image_index], outputs=gallery_outputs) + hide_image.change(fn=run_pnginfo, inputs=[hide_image], outputs=[info1, img_file_info, info2]) + switch_dict["fn"](pnginfo_send_to_txt2img, switch_dict["t2i"], img_file_info, 'switch_to_txt2img') + switch_dict["fn"](pnginfo_send_to_img2img, switch_dict["i2i"], img_file_info, 'switch_to_img2img_img2img') + -def create_history_tabs(gr, opts): +def create_history_tabs(gr, opts, run_pnginfo, switch_dict): with gr.Blocks(analytics_enabled=False) as images_history: with gr.Tabs() as tabs: with gr.Tab("txt2img history", id="images_history_txt2img"): with gr.Blocks(analytics_enabled=False) as images_history_txt2img: - show_images_history(gr, opts, is_img2img=False) + show_images_history(gr, opts, False, run_pnginfo, switch_dict) with gr.Tab("img2img history", id="images_history_img2img"): with gr.Blocks(analytics_enabled=False) as images_history_img2img: - show_images_history(gr, opts, is_img2img=True) + show_images_history(gr, opts, True, run_pnginfo, switch_dict) return images_history diff --git a/modules/ui.py b/modules/ui.py index 8762fcf5..21c9236b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1131,8 +1131,15 @@ def create_ui(wrap_gradio_gpu_call): return f'{changed} settings changed.', opts.dumpjson() + #images history + images_history_switch_dict = { + "fn":modules.generation_parameters_copypaste.connect_paste, + "t2i":txt2img_paste_fields, + "i2i":img2img_paste_fields + } + images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) - images_history = img_his.create_history_tabs(gr, opts) + with gr.Blocks(analytics_enabled=False) as settings_interface: settings_submit = gr.Button(value="Apply settings", variant='primary') result = gr.HTML() diff --git a/testui.py b/testui.py deleted file mode 100644 index f54e4a62..00000000 --- a/testui.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -import threading -import time -import importlib -import signal -import threading - -from modules.paths import script_path - -from modules import devices, sd_samplers -import modules.codeformer_model as codeformer -import modules.extras -import modules.face_restoration -import modules.gfpgan_model as gfpgan -import modules.img2img - -import modules.lowvram -import modules.paths -import modules.scripts -import modules.sd_hijack -import modules.sd_models -import modules.shared as shared -import modules.txt2img - -import modules.ui -from modules import devices -from modules import modelloader -from modules.paths import script_path -from modules.shared import cmd_opts - -modelloader.cleanup_models() -modules.sd_models.setup_model() -codeformer.setup_model(cmd_opts.codeformer_models_path) -gfpgan.setup_model(cmd_opts.gfpgan_models_path) -shared.face_restorers.append(modules.face_restoration.FaceRestoration()) -modelloader.load_upscalers() -queue_lock = threading.Lock() - - -def wrap_queued_call(func): - def f(*args, **kwargs): - with queue_lock: - res = func(*args, **kwargs) - - return res - - return f - - -def wrap_gradio_gpu_call(func, extra_outputs=None): - def f(*args, **kwargs): - devices.torch_gc() - - shared.state.sampling_step = 0 - shared.state.job_count = -1 - shared.state.job_no = 0 - shared.state.job_timestamp = shared.state.get_job_timestamp() - shared.state.current_latent = None - shared.state.current_image = None - shared.state.current_image_sampling_step = 0 - shared.state.interrupted = False - shared.state.textinfo = None - - with queue_lock: - res = func(*args, **kwargs) - - shared.state.job = "" - shared.state.job_count = 0 - - devices.torch_gc() - - return res - - return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs) - - -modules.scripts.load_scripts(os.path.join(script_path, "scripts")) - -shared.sd_model = None #modules.sd_models.load_model() -#shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model))) - - -def webui(): - # make the program just exit at ctrl+c without waiting for anything - def sigint_handler(sig, frame): - print(f'Interrupted with signal {sig} in {frame}') - os._exit(0) - - signal.signal(signal.SIGINT, sigint_handler) - - while 1: - - demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call) - - demo.launch( - share=cmd_opts.share, - server_name="0.0.0.0" if cmd_opts.listen else None, - server_port=cmd_opts.port, - debug=cmd_opts.gradio_debug, - auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None, - inbrowser=cmd_opts.autolaunch, - prevent_thread_lock=True - ) - - while 1: - time.sleep(0.5) - if getattr(demo, 'do_restart', False): - time.sleep(0.5) - demo.close() - time.sleep(0.5) - break - - sd_samplers.set_samplers() - - print('Reloading Custom Scripts') - modules.scripts.reload_scripts(os.path.join(script_path, "scripts")) - print('Reloading modules: modules.ui') - importlib.reload(modules.ui) - print('Restarting Gradio') - - - -if __name__ == "__main__": - webui() -- cgit v1.2.3 From a1a94b8b5f342f467aecc53b21b80ed0227ee76a Mon Sep 17 00:00:00 2001 From: yfszzx Date: Thu, 13 Oct 2022 00:19:34 +0800 Subject: images history improvement --- javascript/images_history.js | 125 ++++++++++++++++++++++--------------------- modules/images_history.py | 7 +-- modules/ui.py | 40 +++++++------- 3 files changed, 88 insertions(+), 84 deletions(-) (limited to 'modules/ui.py') diff --git a/javascript/images_history.js b/javascript/images_history.js index c9a63166..620f242c 100644 --- a/javascript/images_history.js +++ b/javascript/images_history.js @@ -18,20 +18,26 @@ var images_history_click_image = function(){ } var images_history_click_tab = function(){ - var tabs_box = gradioApp().getElementById("images_history_tab"); - if (!tabs_box.classList.contains(this.getAttribute("tabname"))) { - gradioApp().getElementById(this.getAttribute("tabname") + "_images_history_renew_page").click(); - tabs_box.classList.add(this.getAttribute("tabname")) - } + var tabs_box = gradioApp().getElementById("images_history_tab"); + if (!tabs_box.classList.contains(this.getAttribute("tabname"))) { + gradioApp().getElementById(this.getAttribute("tabname") + "_images_history_renew_page").click(); + tabs_box.classList.add(this.getAttribute("tabname")) + } } var images_history_close_full_view = function(){ - var box = images_history_get_parent_by_class(this, "images_history_cantainor"); - box.querySelector(".images_history_del_button").setAttribute("disabled", "disabled"); + var box = images_history_get_parent_by_class(this, "images_history_cantainor"); + box.querySelector(".images_history_del_button").setAttribute("disabled", "disabled"); +} + +function images_history_disabled_del(){ + gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){ + btn.setAttribute('disabled','disabled'); + }); } function images_history_get_parent_by_class(item, class_name){ - var parent = item.parentElement; + var parent = item.parentElement; while(!parent.classList.contains(class_name)){ parent = parent.parentElement; } @@ -39,14 +45,15 @@ function images_history_get_parent_by_class(item, class_name){ } function images_history_get_parent_by_tagname(item, tagname){ - var parent = item.parentElement; - tagname = tagname.toUpperCase() + var parent = item.parentElement; + tagname = tagname.toUpperCase() while(parent.tagName != tagname){ - console.log(parent.tagName, tagname) + console.log(parent.tagName, tagname) parent = parent.parentElement; } return parent; } + function images_history_hide_buttons(hidden_list, gallery){ var buttons = gallery.querySelectorAll(".gallery-item"); var num = 0; @@ -54,7 +61,7 @@ function images_history_hide_buttons(hidden_list, gallery){ if (e.style.display == "none"){ num += 1; } - }) + }); if (num == hidden_list.length){ setTimeout(images_history_hide_buttons, 10, hidden_list, gallery); } @@ -74,14 +81,15 @@ function images_history_set_image_info(button){ if(e.style.display != "none"){ i += 1; } - }) + }); var gallery = images_history_get_parent_by_class(button, "images_history_cantainor"); var set_btn = gallery.querySelector(".images_history_set_index"); - set_btn.setAttribute("img_index", index); + var curr_idx = set_btn.getAttribute("img_index", index); + if (curr_idx != index) { + set_btn.setAttribute("img_index", index); + images_history_disabled_del(); + } set_btn.click(); - gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){ - btn.setAttribute('disabled','disabled'); - }) } @@ -102,24 +110,24 @@ function images_history_delete(tabname, img_path, img_file_name, page_index, fil if (e.style.display != 'none'){ buttons.push(e); } - }); - + }); var img_num = buttons.length / 2; if (img_num === 1){ setTimeout(function(tabname){ gradioApp().getElementById(tabname + '_images_history_renew_page').click(); }, 30, tabname); - } else { + } else { buttons[image_index].style.display = 'none'; buttons[image_index + img_num].style.display = 'none'; var bnt; if (image_index >= img_num - 1){ btn = buttons[img_num - 2]; - } else { + } else { btn = buttons[image_index + 1] ; - } + } setTimeout(function(btn){btn.click()}, 30, btn); - } + } + images_history_disabled_del(); return [tabname, img_path, img_file_name, page_index, filenames, image_index]; } @@ -132,61 +140,58 @@ function images_history_turnpage(img_path, page_index, image_index, tabname){ } function images_history_enable_del_buttons(){ - gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){ - btn.removeAttribute('disabled'); + gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){ + btn.removeAttribute('disabled'); }) } function images_history_init(){ - if (gradioApp().getElementById('txt2img_images_history_renew_page') == null) { - setTimeout(images_history_init, 500); - } else { + var load_txt2img_button = gradioApp().getElementById('txt2img_images_history_renew_page') + if (load_txt2img_button){ for (var i in images_history_tab_list ){ tab = images_history_tab_list[i]; gradioApp().getElementById(tab + '_images_history').classList.add("images_history_cantainor"); gradioApp().getElementById(tab + '_images_history_set_index').classList.add("images_history_set_index"); gradioApp().getElementById(tab + '_images_history_del_button').classList.add("images_history_del_button"); - gradioApp().getElementById(tab + '_images_history_gallery').classList.add("images_history_gallery"); - - + gradioApp().getElementById(tab + '_images_history_gallery').classList.add("images_history_gallery"); + } var tabs_box = gradioApp().getElementById("tab_images_history").querySelector("div").querySelector("div").querySelector("div"); - tabs_box.setAttribute("id", "images_history_tab"); - tabs_box.classList.add(images_history_tab_list[0]); - gradioApp().getElementById("txt2img_images_history_renew_page").click(); - } + tabs_box.setAttribute("id", "images_history_tab"); + var tab_btns = tabs_box.querySelectorAll("button"); + for (var i in images_history_tab_list){ + var tabname = images_history_tab_list[i] + tab_btns[i].setAttribute("tabname", tabname); + tab_btns[i].addEventListener('click', images_history_click_tab); + } + tabs_box.classList.add(images_history_tab_list[0]); + load_txt2img_button.click(); + } else { + setTimeout(images_history_init, 500); + } } var images_history_tab_list = ["txt2img", "img2img", "extras"]; -var images_history_start_flag = false; - -onUiUpdate(function(){ - var tab = gradioApp().getElementById("images_history_tab"); - if (tab) { - if (!images_history_start_flag){ - images_history_init(); - images_history_start_flag = true; - } - var tab_btns = gradioApp().getElementById("images_history_tab").querySelectorAll("button"); - for (var i in images_history_tab_list ){ - var buttons = gradioApp().querySelectorAll('#' + images_history_tab_list[i] + '_images_history .gallery-item'); - buttons.forEach(function(bnt){ - bnt.addEventListener('click', images_history_click_image, true); - }); - var tabname = images_history_tab_list[i] - tab_btns[i].setAttribute("tabname", tabname); - tab_btns[i].addEventListener('click', images_history_click_tab, true); +setTimeout(images_history_init, 500) +document.addEventListener("DOMContentLoaded", function() { + var mutationObserver = new MutationObserver(function(m){ + for (var i in images_history_tab_list ){ + var buttons = gradioApp().querySelectorAll('#' + images_history_tab_list[i] + '_images_history .gallery-item'); + buttons.forEach(function(bnt){ + bnt.addEventListener('click', images_history_click_image, true); + }); // var cls_btn = gradioApp().getElementById(tabname + '_images_history_gallery').querySelector("svg"); // if (cls_btn){ - // cls_btn.addEventListener('click', images_history_close_full_view, false); + // cls_btn.addEventListener('click', images_history_close_full_view, false); // } // console.log(cls_btn, cls_btn.parentElement.parentElement) // if (cls_btn) { - // cls_btn = images_history_get_parent_by_tagname(cls_btn, "BUTTON"); - // cls_btn.addEventListener('click', images_history_close_full_view, true); - // } - } + // cls_btn = images_history_get_parent_by_tagname(cls_btn, "BUTTON"); + // cls_btn.addEventListener('click', images_history_close_full_view, true); + } + }); + mutationObserver.observe( gradioApp(), { childList:true, subtree:true }); + +}); - } -}); diff --git a/modules/images_history.py b/modules/images_history.py index 2bc4b7ee..1bca0ad9 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -61,7 +61,7 @@ def delete_image(tabname, dir_name, name, page_index, filenames, image_index): os.remove(path) txt_file = os.path.splitext(path)[0] + ".txt" if os.path.exists(txt_file): - os.remove(txt_file) + os.remove(txt_file) new_file_list = [] for f in filenames: if f == name: @@ -88,7 +88,7 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): end_page = gr.Button('End Page') with gr.Row(elem_id=tabname + "_images_history"): with gr.Row(): - with gr.Column(scale=2): + with gr.Column(scale=2): history_gallery = gr.Gallery(show_label=False, elem_id=tabname + "_images_history_gallery").style(grid=6) delete = gr.Button('Delete', elem_id=tabname + "_images_history_del_button") with gr.Column(): @@ -126,9 +126,10 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): #other funcitons set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, img_path, filenames], outputs=[img_file_name, image_index, hide_image]) + img_file_name.change(fn=None, _js="images_history_enable_del_buttons", inputs=None, outputs=None) delete.click(delete_image,_js="images_history_delete", inputs=[tabname_box, img_path, img_file_name, page_index, filenames, image_index], outputs=[page_index, filenames]) hide_image.change(fn=run_pnginfo, inputs=[hide_image], outputs=[info1, img_file_info, info2]) - hide_image.change(fn=None, _js="images_history_enable_del_buttons", inputs=None, outputs=None) + #pnginfo.click(fn=run_pnginfo, inputs=[hide_image], outputs=[info1, img_file_info, info2]) switch_dict["fn"](pnginfo_send_to_txt2img, switch_dict["t2i"], img_file_info, 'switch_to_txt2img') switch_dict["fn"](pnginfo_send_to_img2img, switch_dict["i2i"], img_file_info, 'switch_to_img2img_img2img') diff --git a/modules/ui.py b/modules/ui.py index 94297ba6..8cd12b51 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -39,7 +39,7 @@ import modules.generation_parameters_copypaste from modules import prompt_parser from modules.images import save_image import modules.textual_inversion.ui -import modules.hypernetwork.ui +import modules.hypernetworks.ui import modules.images_history as img_his # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI @@ -554,6 +554,7 @@ def create_ui(wrap_gradio_gpu_call): custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False) with gr.Column(variant='panel'): + with gr.Group(): txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False) txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4) @@ -573,9 +574,9 @@ def create_ui(wrap_gradio_gpu_call): with gr.Row(): download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) - with gr.Group(): - html_info = gr.HTML() - generation_info = gr.Textbox(visible=False) + with gr.Group(): + html_info = gr.HTML() + generation_info = gr.Textbox(visible=False) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) @@ -669,7 +670,6 @@ def create_ui(wrap_gradio_gpu_call): ] modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt) token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) - with gr.Blocks(analytics_enabled=False) as img2img_interface: img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True) @@ -762,10 +762,10 @@ def create_ui(wrap_gradio_gpu_call): with gr.Row(): download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) - with gr.Group(): - html_info = gr.HTML() - generation_info = gr.Textbox(visible=False) - + with gr.Group(): + html_info = gr.HTML() + generation_info = gr.Textbox(visible=False) + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) @@ -1016,6 +1016,13 @@ def create_ui(wrap_gradio_gpu_call): inputs=[image], outputs=[html, generation_info, html2], ) + #images history + images_history_switch_dict = { + "fn":modules.generation_parameters_copypaste.connect_paste, + "t2i":txt2img_paste_fields, + "i2i":img2img_paste_fields + } + images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) with gr.Blocks() as modelmerger_interface: with gr.Row().style(equal_height=False): @@ -1285,16 +1292,7 @@ Requested path was: {f} opts.save(shared.config_filename) - return f'{changed} settings changed.', opts.dumpjson() - - #images history - images_history_switch_dict = { - "fn":modules.generation_parameters_copypaste.connect_paste, - "t2i":txt2img_paste_fields, - "i2i":img2img_paste_fields - } - images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) - + return f'{changed} settings changed.', opts.dumpjson() def run_settings_single(value, key): if not opts.same_type(value, opts.data_labels[key].default): @@ -1393,11 +1391,10 @@ Requested path was: {f} (img2img_interface, "img2img", "img2img"), (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (images_history, "History", "images_history"), + (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "ti"), (settings_interface, "Settings", "settings"), - ] with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file: @@ -1616,3 +1613,4 @@ if 'gradio_routes_templates_response' not in globals(): gradio_routes_templates_response = gradio.routes.templates.TemplateResponse gradio.routes.templates.TemplateResponse = template_response + -- cgit v1.2.3 From 1cfc2a18981ee56bdb69a2de7b463a11ad05e329 Mon Sep 17 00:00:00 2001 From: Melan Date: Wed, 12 Oct 2022 23:36:29 +0200 Subject: Save a csv containing the loss while training --- modules/hypernetworks/hypernetwork.py | 17 ++++++++++++++++- modules/textual_inversion/textual_inversion.py | 17 ++++++++++++++++- modules/ui.py | 3 +++ 3 files changed, 35 insertions(+), 2 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index b6c06d49..6522078f 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -5,6 +5,7 @@ import os import sys import traceback import tqdm +import csv import torch @@ -174,7 +175,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None): return self.to_out(out) -def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_image_prompt): +def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, write_csv_every, template_file, preview_image_prompt): assert hypernetwork_name, 'hypernetwork not selected' path = shared.hypernetworks.get(hypernetwork_name, None) @@ -256,6 +257,20 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name}-{hypernetwork.step}.pt') hypernetwork.save(last_saved_file) + print(f"{write_csv_every} > {hypernetwork.step % write_csv_every == 0}, {write_csv_every}") + if write_csv_every > 0 and hypernetwork_dir is not None and hypernetwork.step % write_csv_every == 0: + write_csv_header = False if os.path.exists(os.path.join(hypernetwork_dir, "hypernetwork_loss.csv")) else True + + with open(os.path.join(hypernetwork_dir, "hypernetwork_loss.csv"), "a+") as fout: + + csv_writer = csv.DictWriter(fout, fieldnames=["step", "loss"]) + + if write_csv_header: + csv_writer.writeheader() + + csv_writer.writerow({"step": hypernetwork.step, + "loss": f"{losses.mean():.7f}"}) + if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0: last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fa0e33a2..25038a89 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -6,6 +6,7 @@ import torch import tqdm import html import datetime +import csv from PIL import Image, PngImagePlugin @@ -172,7 +173,7 @@ def create_embedding(name, num_vectors_per_token, init_text='*'): return fn -def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_image_prompt): +def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, write_csv_every, template_file, save_image_with_stored_embedding, preview_image_prompt): assert embedding_name, 'embedding not selected' shared.state.textinfo = "Initializing textual inversion training..." @@ -256,6 +257,20 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt') embedding.save(last_saved_file) + if write_csv_every > 0 and log_directory is not None and embedding.step % write_csv_every == 0: + write_csv_header = False if os.path.exists(os.path.join(log_directory, "textual_inversion_loss.csv")) else True + + with open(os.path.join(log_directory, "textual_inversion_loss.csv"), "a+") as fout: + + csv_writer = csv.DictWriter(fout, fieldnames=["epoch", "epoch_step", "loss"]) + + if write_csv_header: + csv_writer.writeheader() + + csv_writer.writerow({"epoch": epoch_num + 1, + "epoch_step": epoch_step - 1, + "loss": f"{losses.mean():.7f}"}) + if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0: last_saved_image = os.path.join(images_dir, f'{embedding_name}-{embedding.step}.png') diff --git a/modules/ui.py b/modules/ui.py index e07ee0e1..1195c2f1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1096,6 +1096,7 @@ def create_ui(wrap_gradio_gpu_call): training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512) steps = gr.Number(label='Max steps', value=100000, precision=0) create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) + write_csv_every = gr.Number(label='Save an csv containing the loss to log directory every N steps, 0 to disable', value=500, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) preview_image_prompt = gr.Textbox(label='Preview prompt', value="") @@ -1174,6 +1175,7 @@ def create_ui(wrap_gradio_gpu_call): steps, create_image_every, save_embedding_every, + write_csv_every, template_file, save_image_with_stored_embedding, preview_image_prompt, @@ -1195,6 +1197,7 @@ def create_ui(wrap_gradio_gpu_call): steps, create_image_every, save_embedding_every, + write_csv_every, template_file, preview_image_prompt, ], -- cgit v1.2.3 From 54e0051bdd7dea7348825c09600ec61ea0771cb8 Mon Sep 17 00:00:00 2001 From: d8ahazard Date: Wed, 12 Oct 2022 18:17:26 -0500 Subject: Add drag/drop param loading. Drop an image or generational text onto the prompt bar, it loads the info for parsing. --- javascript/dragdrop.js | 3 +++ javascript/imageParams.js | 22 ++++++++++++++++++++++ modules/images.py | 20 ++++++++++++++++++++ modules/ui.py | 30 +++++++++++++++++++++++++++++- 4 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 javascript/imageParams.js (limited to 'modules/ui.py') diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index 5aac57f7..cf900f50 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -53,6 +53,9 @@ window.document.addEventListener('dragover', e => { window.document.addEventListener('drop', e => { const target = e.composedPath()[0]; + if (target.placeholder === "Prompt") { + return; + } const imgWrap = target.closest('[data-testid="image"]'); if ( !imgWrap ) { return; diff --git a/javascript/imageParams.js b/javascript/imageParams.js new file mode 100644 index 00000000..f9d0c0aa --- /dev/null +++ b/javascript/imageParams.js @@ -0,0 +1,22 @@ +window.onload = (function(){ + window.addEventListener('drop', e => { + const target = e.composedPath()[0]; + const idx = selected_gallery_index(); + let prompt_target = "txt2img_prompt_image"; + if (idx === 1) { + prompt_target = "img2img_prompt_image"; + } + if (target.placeholder === "Prompt") { + e.stopPropagation(); + e.preventDefault(); + const imgParent = gradioApp().getElementById(prompt_target); + const files = e.dataTransfer.files; + const fileInput = imgParent.querySelector('input[type="file"]'); + if ( fileInput ) { + fileInput.files = files; + fileInput.dispatchEvent(new Event('change')); + } + } + }); + +}); \ No newline at end of file diff --git a/modules/images.py b/modules/images.py index c0a90676..f1155b7f 100644 --- a/modules/images.py +++ b/modules/images.py @@ -463,3 +463,23 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i txt_fullfn = None return fullfn, txt_fullfn + + +def image_data(image_path): + file, ext = os.path.splitext(image_path.name) + data = {} + if "png" in ext: + image = Image.open(image_path.name, "r") + print(f"Image data requested for {image_path.name} {image.format} of {type(image)}") + try: + data = image.text["parameters"] + except Exception as e: + print(f"Exception: {e}") + pass + print(f"Image data: {data}") + if "txt" in ext: + myfile = open(image_path.name, 'r') + data = myfile.read() + myfile.close() + + return data, None diff --git a/modules/ui.py b/modules/ui.py index 2b332267..dd793c39 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -431,7 +431,6 @@ def create_toprow(is_img2img): with gr.Column(scale=80): with gr.Row(): prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2) - with gr.Column(scale=1, elem_id="roll_col"): roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) paste = gr.Button(value=paste_symbol, elem_id="paste") @@ -513,6 +512,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Blocks(analytics_enabled=False) as txt2img_interface: txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) + txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="file", visible=False) with gr.Row(elem_id='txt2img_progress_row'): with gr.Column(scale=1): @@ -614,6 +614,18 @@ def create_ui(wrap_gradio_gpu_call): txt2img_prompt.submit(**txt2img_args) submit.click(**txt2img_args) + txt_prompt_img.change( + fn=modules.images.image_data, + # _js = "get_extras_tab_index", + inputs=[ + txt_prompt_img + ], + outputs=[ + txt2img_prompt, + txt_prompt_img + ] + ) + enable_hr.change( fn=lambda x: gr_show(x), inputs=[enable_hr], @@ -674,6 +686,9 @@ def create_ui(wrap_gradio_gpu_call): img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True) with gr.Row(elem_id='img2img_progress_row'): + img2img_prompt_img = gr.File(label="", elem_id="txt_prompt_image", file_count="single", type="file", + visible=False) + with gr.Column(scale=1): pass @@ -768,6 +783,18 @@ def create_ui(wrap_gradio_gpu_call): connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + img2img_prompt_img.change( + fn=modules.images.image_data, + # _js = "get_extras_tab_index", + inputs=[ + txt_prompt_img + ], + outputs=[ + img2img_prompt, + img2img_prompt_img + ] + ) + mask_mode.change( lambda mode, img: { init_img_with_mask: gr_show(mode == 0), @@ -956,6 +983,7 @@ def create_ui(wrap_gradio_gpu_call): button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else '' open_extras_folder = gr.Button('Open output directory', elem_id=button_id) + submit.click( fn=wrap_gradio_gpu_call(modules.extras.run_extras), _js="get_extras_tab_index", -- cgit v1.2.3 From b2261b53ae4ad01b3713bc73ff62ab7b6f479e26 Mon Sep 17 00:00:00 2001 From: Buckzor Date: Thu, 13 Oct 2022 17:07:06 +0100 Subject: Added first_pass_width and height as adjustable inputs to "High Res Fix" --- modules/processing.py | 6 ++++-- modules/txt2img.py | 5 ++++- modules/ui.py | 6 ++++++ 3 files changed, 14 insertions(+), 3 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/processing.py b/modules/processing.py index d5172f00..abbfdf98 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -506,11 +506,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): firstphase_width_truncated = 0 firstphase_height_truncated = 0 - def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, **kwargs): + def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, first_pass_width=512, first_pass_height=512, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.scale_latent = scale_latent self.denoising_strength = denoising_strength + self.first_pass_width = first_pass_width + self.first_pass_height = first_pass_height def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: @@ -519,7 +521,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: state.job_count = state.job_count * 2 - desired_pixel_count = 512 * 512 + desired_pixel_count = self.first_pass_width * self.first_pass_height actual_pixel_count = self.width * self.height scale = math.sqrt(desired_pixel_count / actual_pixel_count) diff --git a/modules/txt2img.py b/modules/txt2img.py index e985242b..85cbece4 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -6,7 +6,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html -def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, scale_latent: bool, denoising_strength: float, *args): +def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, scale_latent: bool, denoising_strength: float, first_pass_width: int, first_pass_height: int, *args): p = StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, @@ -32,6 +32,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: enable_hr=enable_hr, scale_latent=scale_latent if enable_hr else None, denoising_strength=denoising_strength if enable_hr else None, + first_pass_width=first_pass_width if enable_hr else None, + first_pass_height=first_pass_height if enable_hr else None, + ) if cmd_opts.enable_console_prompts: diff --git a/modules/ui.py b/modules/ui.py index 220fb80b..544419b2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -540,6 +540,8 @@ def create_ui(wrap_gradio_gpu_call): enable_hr = gr.Checkbox(label='Highres. fix', value=False) with gr.Row(visible=False) as hr_options: + first_pass_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512) + first_pass_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512) scale_latent = gr.Checkbox(label='Scale latent', value=False) denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) @@ -604,6 +606,8 @@ def create_ui(wrap_gradio_gpu_call): enable_hr, scale_latent, denoising_strength, + first_pass_width, + first_pass_height, ] + custom_inputs, outputs=[ txt2img_gallery, @@ -668,6 +672,8 @@ def create_ui(wrap_gradio_gpu_call): (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), + (first_pass_width, "First pass width"), + (first_pass_height, "First pass height"), ] modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt) token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) -- cgit v1.2.3 From 40d1c6e423b4dc52b3bdae43d9e2442960760ced Mon Sep 17 00:00:00 2001 From: Buckzor Date: Thu, 13 Oct 2022 20:04:22 +0100 Subject: Option between stretch and crop for Highres. fix --- modules/processing.py | 34 ++++++++++++++++++++++------------ modules/txt2img.py | 7 ++++--- modules/ui.py | 25 ++++++++++++++++--------- 3 files changed, 42 insertions(+), 24 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/processing.py b/modules/processing.py index abbfdf98..0246f5dd 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -506,13 +506,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): firstphase_width_truncated = 0 firstphase_height_truncated = 0 - def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, first_pass_width=512, first_pass_height=512, **kwargs): + def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, firstphase_width=512, firstphase_height=512, crop_scale=False, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.scale_latent = scale_latent self.denoising_strength = denoising_strength - self.first_pass_width = first_pass_width - self.first_pass_height = first_pass_height + self.firstphase_width = firstphase_width + self.firstphase_height = firstphase_height + self.crop_scale = crop_scale def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: @@ -521,14 +522,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: state.job_count = state.job_count * 2 - desired_pixel_count = self.first_pass_width * self.first_pass_height - actual_pixel_count = self.width * self.height - scale = math.sqrt(desired_pixel_count / actual_pixel_count) + #desired_pixel_count = self.firstphase_width * self.firstphase_height + #actual_pixel_count = self.width * self.height + #scale = math.sqrt(desired_pixel_count / actual_pixel_count) - self.firstphase_width = math.ceil(scale * self.width / 64) * 64 - self.firstphase_height = math.ceil(scale * self.height / 64) * 64 - self.firstphase_width_truncated = int(scale * self.width) - self.firstphase_height_truncated = int(scale * self.height) + #self.firstphase_width = math.ceil(scale * self.width / 64) * 64 + #self.firstphase_height = math.ceil(scale * self.height / 64) * 64 + #self.firstphase_width_truncated = int(scale * self.width) + #self.firstphase_height_truncated = int(scale * self.height) def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model) @@ -541,8 +542,17 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning) - truncate_x = (self.firstphase_width - self.firstphase_width_truncated) // opt_f - truncate_y = (self.firstphase_height - self.firstphase_height_truncated) // opt_f + truncate_x = 0 + truncate_y = 0 + + if self.crop_scale: + if self.width/self.firstphase_width > self.height/self.firstphase_height: + #Crop to landscape + truncate_y = (self.width - self.firstphase_width)//2 // opt_f + + elif self.width/self.firstphase_width < self.height/self.firstphase_height: + #Crop to portrait + truncate_x = (self.height - self.firstphase_height)//2 // opt_f samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2] diff --git a/modules/txt2img.py b/modules/txt2img.py index 85cbece4..447ec3d3 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -6,7 +6,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html -def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, scale_latent: bool, denoising_strength: float, first_pass_width: int, first_pass_height: int, *args): +def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, scale_latent: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, crop_scale: bool, *args): p = StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, @@ -32,8 +32,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: enable_hr=enable_hr, scale_latent=scale_latent if enable_hr else None, denoising_strength=denoising_strength if enable_hr else None, - first_pass_width=first_pass_width if enable_hr else None, - first_pass_height=first_pass_height if enable_hr else None, + firstphase_width=firstphase_width if enable_hr else None, + firstphase_height=firstphase_height if enable_hr else None, + crop_scale=crop_scale if enable_hr else None, ) diff --git a/modules/ui.py b/modules/ui.py index 544419b2..f2d81f68 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -540,12 +540,18 @@ def create_ui(wrap_gradio_gpu_call): enable_hr = gr.Checkbox(label='Highres. fix', value=False) with gr.Row(visible=False) as hr_options: - first_pass_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512) - first_pass_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512) - scale_latent = gr.Checkbox(label='Scale latent', value=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) + with gr.Column(scale=1.0): + firstphase_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512) + firstphase_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512) + + with gr.Column(scale=1.0): + with gr.Row(): + crop_scale = gr.Checkbox(label='Crop when scaling', value=False) + scale_latent = gr.Checkbox(label='Scale latent', value=False) + with gr.Row(): + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) - with gr.Row(): + with gr.Row(equal_height=True): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1) batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1) @@ -606,8 +612,9 @@ def create_ui(wrap_gradio_gpu_call): enable_hr, scale_latent, denoising_strength, - first_pass_width, - first_pass_height, + firstphase_width, + firstphase_height, + crop_scale, ] + custom_inputs, outputs=[ txt2img_gallery, @@ -672,8 +679,8 @@ def create_ui(wrap_gradio_gpu_call): (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (first_pass_width, "First pass width"), - (first_pass_height, "First pass height"), + (firstphase_width, "First pass width"), + (firstphase_height, "First pass height"), ] modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt) token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) -- cgit v1.2.3 From e644b5a80beb54b6df4caa63fb19d889dd4ceff6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 14 Oct 2022 17:03:03 +0300 Subject: remove scale latent and no-crop options from hires fix support copy-pasting new parameters for hires fix --- modules/processing.py | 64 ++++++++++++++++++++++----------------------------- modules/txt2img.py | 9 +++----- modules/ui.py | 19 ++++----------- 3 files changed, 35 insertions(+), 57 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/processing.py b/modules/processing.py index d9b0e0e7..100a259f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -506,14 +506,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): firstphase_width_truncated = 0 firstphase_height_truncated = 0 - def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, firstphase_width=512, firstphase_height=512, crop_scale=False, **kwargs): + def __init__(self, enable_hr=False, denoising_strength=0.75, firstphase_width=512, firstphase_height=512, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr - self.scale_latent = scale_latent self.denoising_strength = denoising_strength self.firstphase_width = firstphase_width self.firstphase_height = firstphase_height - self.crop_scale = crop_scale def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: @@ -530,6 +528,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning) return samples + self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}" + x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning) @@ -538,46 +538,36 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): width_ratio = self.width/self.firstphase_width height_ratio = self.height/self.firstphase_height - if self.crop_scale: - if width_ratio > height_ratio: - #Crop to landscape - truncate_y = int((self.width - self.firstphase_width) / width_ratio / height_ratio / opt_f) + if width_ratio > height_ratio: + truncate_y = int((self.width - self.firstphase_width) / width_ratio / height_ratio / opt_f) - elif width_ratio < height_ratio: - #Crop to portrait - truncate_x = int((self.height - self.firstphase_height) / width_ratio / height_ratio / opt_f) + elif width_ratio < height_ratio: + truncate_x = int((self.height - self.firstphase_height) / width_ratio / height_ratio / opt_f) - samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2] - - + samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2] - + decoded_samples = decode_first_stage(self.sd_model, samples) - if self.scale_latent: - samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") + if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None": + decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear") else: - decoded_samples = decode_first_stage(self.sd_model, samples) + lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0) - if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None": - decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear") - else: - lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0) - - batch_images = [] - for i, x_sample in enumerate(lowres_samples): - x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) - x_sample = x_sample.astype(np.uint8) - image = Image.fromarray(x_sample) - image = images.resize_image(0, image, self.width, self.height) - image = np.array(image).astype(np.float32) / 255.0 - image = np.moveaxis(image, 2, 0) - batch_images.append(image) - - decoded_samples = torch.from_numpy(np.array(batch_images)) - decoded_samples = decoded_samples.to(shared.device) - decoded_samples = 2. * decoded_samples - 1. - - samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) + batch_images = [] + for i, x_sample in enumerate(lowres_samples): + x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) + x_sample = x_sample.astype(np.uint8) + image = Image.fromarray(x_sample) + image = images.resize_image(0, image, self.width, self.height) + image = np.array(image).astype(np.float32) / 255.0 + image = np.moveaxis(image, 2, 0) + batch_images.append(image) + + decoded_samples = torch.from_numpy(np.array(batch_images)) + decoded_samples = decoded_samples.to(shared.device) + decoded_samples = 2. * decoded_samples - 1. + + samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) shared.state.nextjob() diff --git a/modules/txt2img.py b/modules/txt2img.py index 447ec3d3..2381347f 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -6,7 +6,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html -def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, scale_latent: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, crop_scale: bool, *args): +def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, *args): p = StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, @@ -30,12 +30,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: restore_faces=restore_faces, tiling=tiling, enable_hr=enable_hr, - scale_latent=scale_latent if enable_hr else None, denoising_strength=denoising_strength if enable_hr else None, - firstphase_width=firstphase_width if enable_hr else None, - firstphase_height=firstphase_height if enable_hr else None, - crop_scale=crop_scale if enable_hr else None, - + firstphase_width=firstphase_width if enable_hr else None, + firstphase_height=firstphase_height if enable_hr else None, ) if cmd_opts.enable_console_prompts: diff --git a/modules/ui.py b/modules/ui.py index f2d81f68..d66ddc14 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -540,16 +540,9 @@ def create_ui(wrap_gradio_gpu_call): enable_hr = gr.Checkbox(label='Highres. fix', value=False) with gr.Row(visible=False) as hr_options: - with gr.Column(scale=1.0): - firstphase_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512) - firstphase_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512) - - with gr.Column(scale=1.0): - with gr.Row(): - crop_scale = gr.Checkbox(label='Crop when scaling', value=False) - scale_latent = gr.Checkbox(label='Scale latent', value=False) - with gr.Row(): - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) + firstphase_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512) + firstphase_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512) + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) with gr.Row(equal_height=True): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1) @@ -610,11 +603,9 @@ def create_ui(wrap_gradio_gpu_call): height, width, enable_hr, - scale_latent, denoising_strength, firstphase_width, firstphase_height, - crop_scale, ] + custom_inputs, outputs=[ txt2img_gallery, @@ -679,8 +670,8 @@ def create_ui(wrap_gradio_gpu_call): (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (firstphase_width, "First pass width"), - (firstphase_height, "First pass height"), + (firstphase_width, "First pass size-1"), + (firstphase_height, "First pass size-2"), ] modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt) token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) -- cgit v1.2.3 From 0aec19d7837d8564355fdb286541db7165852e41 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 14 Oct 2022 18:15:03 +0300 Subject: make pasting into img2img prompt work make image params request not use temp files --- modules/images.py | 36 ++++++++++++++++++------------------ modules/ui.py | 9 +++------ 2 files changed, 21 insertions(+), 24 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/images.py b/modules/images.py index f1155b7f..68cdbc93 100644 --- a/modules/images.py +++ b/modules/images.py @@ -1,4 +1,5 @@ import datetime +import io import math import os from collections import namedtuple @@ -465,21 +466,20 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i return fullfn, txt_fullfn -def image_data(image_path): - file, ext = os.path.splitext(image_path.name) - data = {} - if "png" in ext: - image = Image.open(image_path.name, "r") - print(f"Image data requested for {image_path.name} {image.format} of {type(image)}") - try: - data = image.text["parameters"] - except Exception as e: - print(f"Exception: {e}") - pass - print(f"Image data: {data}") - if "txt" in ext: - myfile = open(image_path.name, 'r') - data = myfile.read() - myfile.close() - - return data, None +def image_data(data): + try: + image = Image.open(io.BytesIO(data)) + textinfo = image.text["parameters"] + return textinfo, None + except Exception: + pass + + try: + text = data.decode('utf8') + assert len(text) < 10000 + return text, None + + except Exception: + pass + + return '', None diff --git a/modules/ui.py b/modules/ui.py index 0a3ee887..6266db49 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -514,7 +514,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Blocks(analytics_enabled=False) as txt2img_interface: txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="file", visible=False) + txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False) with gr.Row(elem_id='txt2img_progress_row'): with gr.Column(scale=1): @@ -620,7 +620,6 @@ def create_ui(wrap_gradio_gpu_call): txt_prompt_img.change( fn=modules.images.image_data, - # _js = "get_extras_tab_index", inputs=[ txt_prompt_img ], @@ -692,8 +691,7 @@ def create_ui(wrap_gradio_gpu_call): img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True) with gr.Row(elem_id='img2img_progress_row'): - img2img_prompt_img = gr.File(label="", elem_id="txt_prompt_image", file_count="single", type="file", - visible=False) + img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="bytes", visible=False) with gr.Column(scale=1): pass @@ -791,9 +789,8 @@ def create_ui(wrap_gradio_gpu_call): img2img_prompt_img.change( fn=modules.images.image_data, - # _js = "get_extras_tab_index", inputs=[ - txt_prompt_img + img2img_prompt_img ], outputs=[ img2img_prompt, -- cgit v1.2.3 From 67f447ddcc8a17d11939c3801dca635dc22944c7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 14 Oct 2022 19:30:28 +0300 Subject: possibility to load checkpoint, clip skip, and hypernet from infotext --- modules/ui.py | 52 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 7 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 6266db49..a37a4e17 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -22,7 +22,7 @@ import gradio as gr import gradio.utils import gradio.routes -from modules import sd_hijack +from modules import sd_hijack, sd_models from modules.paths import script_path from modules.shared import opts, cmd_opts if cmd_opts.deepdanbooru: @@ -507,12 +507,38 @@ def setup_progressbar(progressbar, preview, id_part, textinfo=None): ) +def apply_setting(key, value): + if value is None: + return gr.update() + + if key == "sd_model_checkpoint": + ckpt_info = sd_models.get_closet_checkpoint_match(value) + + if ckpt_info is not None: + value = ckpt_info.title + else: + return gr.update() + + comp_args = opts.data_labels[key].component_args + if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: + return + + valtype = type(opts.data_labels[key].default) + oldval = opts.data[key] + opts.data[key] = valtype(value) if valtype != type(None) else value + if oldval != value and opts.data_labels[key].onchange is not None: + opts.data_labels[key].onchange() + + opts.save(shared.config_filename) + return value + + def create_ui(wrap_gradio_gpu_call): import modules.img2img import modules.txt2img with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False) + txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False) @@ -684,11 +710,10 @@ def create_ui(wrap_gradio_gpu_call): (firstphase_width, "First pass size-1"), (firstphase_height, "First pass size-2"), ] - modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt) token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True) + img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, token_counter, token_button = create_toprow(is_img2img=True) with gr.Row(elem_id='img2img_progress_row'): img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="bytes", visible=False) @@ -938,7 +963,6 @@ def create_ui(wrap_gradio_gpu_call): (seed_resize_from_h, "Seed resize from-2"), (denoising_strength, "Denoising strength"), ] - modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt) token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) with gr.Blocks(analytics_enabled=False) as extras_interface: @@ -1580,8 +1604,22 @@ Requested path was: {f} outputs=[extras_image], ) - modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img') - modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img') + settings_map = { + 'sd_hypernetwork': 'Hypernet', + 'CLIP_stop_at_last_layers': 'Clip skip', + 'sd_model_checkpoint': 'Model hash', + } + + settings_paste_fields = [ + (component_dict[k], lambda d, k=k, v=v: apply_setting(k, d.get(v, None))) + for k, v in settings_map.items() + ] + + modules.generation_parameters_copypaste.connect_paste(txt2img_paste, txt2img_paste_fields + settings_paste_fields, txt2img_prompt) + modules.generation_parameters_copypaste.connect_paste(img2img_paste, img2img_paste_fields + settings_paste_fields, img2img_prompt) + + modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields + settings_paste_fields, generation_info, 'switch_to_txt2img') + modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields + settings_paste_fields, generation_info, 'switch_to_img2img_img2img') ui_config_file = cmd_opts.ui_config_file ui_settings = {} -- cgit v1.2.3 From c344ba3b325459abbf9b0df2c1b18f7bf99805b2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 14 Oct 2022 20:31:49 +0300 Subject: add option to read generation params for learning previews from txt2img --- modules/hypernetworks/hypernetwork.py | 21 ++++++++++++++++----- modules/textual_inversion/textual_inversion.py | 25 ++++++++++++++++++------- modules/ui.py | 20 +++++++++++++++++--- 3 files changed, 51 insertions(+), 15 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index f1248bb7..e5cb1817 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -180,7 +180,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None): return self.to_out(out) -def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_image_prompt): +def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert hypernetwork_name, 'hypernetwork not selected' path = shared.hypernetworks.get(hypernetwork_name, None) @@ -265,20 +265,31 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0: last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png') - preview_text = entry.cond_text if preview_image_prompt == "" else preview_image_prompt - optimizer.zero_grad() shared.sd_model.cond_stage_model.to(devices.device) shared.sd_model.first_stage_model.to(devices.device) p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, - prompt=preview_text, - steps=20, do_not_save_grid=True, do_not_save_samples=True, ) + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entry.cond_text + p.steps = 20 + + preview_text = p.prompt + processed = processing.process_images(p) image = processed.images[0] if len(processed.images)>0 else None diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fa0e33a2..3d835358 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -172,7 +172,7 @@ def create_embedding(name, num_vectors_per_token, init_text='*'): return fn -def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_image_prompt): +def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert embedding_name, 'embedding not selected' shared.state.textinfo = "Initializing textual inversion training..." @@ -259,18 +259,29 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0: last_saved_image = os.path.join(images_dir, f'{embedding_name}-{embedding.step}.png') - preview_text = entry.cond_text if preview_image_prompt == "" else preview_image_prompt - p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, - prompt=preview_text, - steps=20, - height=training_height, - width=training_width, do_not_save_grid=True, do_not_save_samples=True, ) + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entry.cond_text + p.steps = 20 + p.width = training_width + p.height = training_height + + preview_text = p.prompt + processed = processing.process_images(p) image = processed.images[0] diff --git a/modules/ui.py b/modules/ui.py index 828bfeea..4a04c2cc 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -711,6 +711,18 @@ def create_ui(wrap_gradio_gpu_call): (firstphase_width, "First pass size-1"), (firstphase_height, "First pass size-2"), ] + + txt2img_preview_params = [ + txt2img_prompt, + txt2img_negative_prompt, + steps, + sampler_index, + cfg_scale, + seed, + width, + height, + ] + token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) with gr.Blocks(analytics_enabled=False) as img2img_interface: @@ -1162,7 +1174,7 @@ def create_ui(wrap_gradio_gpu_call): create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) - preview_image_prompt = gr.Textbox(label='Preview prompt', value="") + preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False) with gr.Row(): interrupt_training = gr.Button(value="Interrupt") @@ -1240,7 +1252,8 @@ def create_ui(wrap_gradio_gpu_call): save_embedding_every, template_file, save_image_with_stored_embedding, - preview_image_prompt, + preview_from_txt2img, + *txt2img_preview_params, ], outputs=[ ti_output, @@ -1260,7 +1273,8 @@ def create_ui(wrap_gradio_gpu_call): create_image_every, save_embedding_every, template_file, - preview_image_prompt, + preview_from_txt2img, + *txt2img_preview_params, ], outputs=[ ti_output, -- cgit v1.2.3 From c250cb289c97fe303cef69064bf45899406f6a40 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 14 Oct 2022 22:01:49 +0300 Subject: change checkpoint merger to work in a more obvious way remove sigmoid and inverse sigmoid because they just did the same thing as weighed sum only with changed multiplier --- javascript/hints.js | 4 ++-- modules/extras.py | 24 +++++------------------- modules/ui.py | 4 ++-- 3 files changed, 9 insertions(+), 23 deletions(-) (limited to 'modules/ui.py') diff --git a/javascript/hints.js b/javascript/hints.js index af010a59..8fec907d 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -87,8 +87,8 @@ titles = { "Quicksettings list": "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.", - "Weighted Sum": "Result = A * (1 - M) + B * M", - "Add difference": "Result = A + (B - C) * (1 - M)", + "Weighted sum": "Result = A * (1 - M) + B * M", + "Add difference": "Result = A + (B - C) * M", } diff --git a/modules/extras.py b/modules/extras.py index 2e7b3751..f2f5a7b0 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -159,24 +159,12 @@ def run_pnginfo(image): return '', geninfo, info -def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_name, interp_method, interp_amount, save_as_half, custom_name): - # Linear interpolation (https://en.wikipedia.org/wiki/Linear_interpolation) +def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_name, interp_method, multiplier, save_as_half, custom_name): def weighted_sum(theta0, theta1, theta2, alpha): return ((1 - alpha) * theta0) + (alpha * theta1) - # Smoothstep (https://en.wikipedia.org/wiki/Smoothstep) - def sigmoid(theta0, theta1, theta2, alpha): - alpha = alpha * alpha * (3 - (2 * alpha)) - return theta0 + ((theta1 - theta0) * alpha) - - # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep) - def inv_sigmoid(theta0, theta1, theta2, alpha): - import math - alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0) - return theta0 + ((theta1 - theta0) * alpha) - def add_difference(theta0, theta1, theta2, alpha): - return theta0 + (theta1 - theta2) * (1.0 - alpha) + return theta0 + (theta1 - theta2) * alpha primary_model_info = sd_models.checkpoints_list[primary_model_name] secondary_model_info = sd_models.checkpoints_list[secondary_model_name] @@ -198,9 +186,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam theta_2 = None theta_funcs = { - "Weighted Sum": weighted_sum, - "Sigmoid": sigmoid, - "Inverse Sigmoid": inv_sigmoid, + "Weighted sum": weighted_sum, "Add difference": add_difference, } theta_func = theta_funcs[interp_method] @@ -213,7 +199,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam if t2 is None: t2 = torch.zeros_like(theta_0[key]) - theta_0[key] = theta_func(theta_0[key], theta_1[key], t2, (float(1.0) - interp_amount)) # Need to reverse the interp_amount to match the desired mix ration in the merged checkpoint + theta_0[key] = theta_func(theta_0[key], theta_1[key], t2, multiplier) if save_as_half: theta_0[key] = theta_0[key].half() @@ -227,7 +213,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path - filename = primary_model_info.model_name + '_' + str(round(interp_amount, 2)) + '-' + secondary_model_info.model_name + '_' + str(round((float(1.0) - interp_amount), 2)) + '-' + interp_method.replace(" ", "_") + '-merged.ckpt' + filename = primary_model_info.model_name + '_' + str(round(1-multiplier, 2)) + '-' + secondary_model_info.model_name + '_' + str(round(multiplier, 2)) + '-' + interp_method.replace(" ", "_") + '-merged.ckpt' filename = filename if custom_name == '' else (custom_name + '.ckpt') output_modelname = os.path.join(ckpt_dir, filename) diff --git a/modules/ui.py b/modules/ui.py index 4a04c2cc..a08ffc9b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1101,8 +1101,8 @@ def create_ui(wrap_gradio_gpu_call): secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") custom_name = gr.Textbox(label="Custom Name (Optional)") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation amount (1 - M)', value=0.3) - interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid", "Add difference"], value="Weighted Sum", label="Interpolation Method") + interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3) + interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method") save_as_half = gr.Checkbox(value=False, label="Save as float16") modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary') -- cgit v1.2.3 From 03d62538aebeff51713619fe808c953bdb70193d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 14 Oct 2022 22:43:55 +0300 Subject: remove duplicate code for log loss, add step, make it read from options rather than gradio input --- modules/hypernetworks/hypernetwork.py | 20 ++++-------- modules/shared.py | 3 +- modules/textual_inversion/textual_inversion.py | 44 ++++++++++++++++++-------- modules/ui.py | 3 -- 4 files changed, 38 insertions(+), 32 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index edb8cba1..59c7ac6e 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -15,6 +15,7 @@ import torch from torch import einsum from einops import rearrange, repeat import modules.textual_inversion.dataset +from modules.textual_inversion import textual_inversion from modules.textual_inversion.learn_schedule import LearnRateScheduler @@ -210,7 +211,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." with torch.autocast("cuda"): - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=512, height=512, repeats=1, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True) + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=512, height=512, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True) if unload: shared.sd_model.cond_stage_model.to(devices.cpu) @@ -263,19 +264,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name}-{hypernetwork.step}.pt') hypernetwork.save(last_saved_file) - if write_csv_every > 0 and hypernetwork_dir is not None and hypernetwork.step % write_csv_every == 0: - write_csv_header = False if os.path.exists(os.path.join(hypernetwork_dir, "hypernetwork_loss.csv")) else True - - with open(os.path.join(hypernetwork_dir, "hypernetwork_loss.csv"), "a+") as fout: - - csv_writer = csv.DictWriter(fout, fieldnames=["step", "loss", "learn_rate"]) - - if write_csv_header: - csv_writer.writeheader() - - csv_writer.writerow({"step": hypernetwork.step, - "loss": f"{losses.mean():.7f}", - "learn_rate": scheduler.learn_rate}) + textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { + "loss": f"{losses.mean():.7f}", + "learn_rate": scheduler.learn_rate + }) if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0: last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png') diff --git a/modules/shared.py b/modules/shared.py index 695d29b6..d41a7ab3 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -236,7 +236,8 @@ options_templates.update(options_section(('training', "Training"), { "unload_models_when_training": OptionInfo(False, "Unload VAE and CLIP from VRAM when training"), "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), - "training_image_repeats_per_epoch": OptionInfo(100, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), + "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), + "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), })) options_templates.update(options_section(('sd', "Stable Diffusion"), { diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 1f5ace6f..da0d77a0 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -173,6 +173,32 @@ def create_embedding(name, num_vectors_per_token, init_text='*'): return fn +def write_loss(log_directory, filename, step, epoch_len, values): + if shared.opts.training_write_csv_every == 0: + return + + if step % shared.opts.training_write_csv_every != 0: + return + + write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True + + with open(os.path.join(log_directory, filename), "a+", newline='') as fout: + csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())]) + + if write_csv_header: + csv_writer.writeheader() + + epoch = step // epoch_len + epoch_step = step - epoch * epoch_len + + csv_writer.writerow({ + "step": step + 1, + "epoch": epoch + 1, + "epoch_step": epoch_step + 1, + **values, + }) + + def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert embedding_name, 'embedding not selected' @@ -257,20 +283,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt') embedding.save(last_saved_file) - if write_csv_every > 0 and log_directory is not None and embedding.step % write_csv_every == 0: - write_csv_header = False if os.path.exists(os.path.join(log_directory, "textual_inversion_loss.csv")) else True - - with open(os.path.join(log_directory, "textual_inversion_loss.csv"), "a+") as fout: - - csv_writer = csv.DictWriter(fout, fieldnames=["epoch", "epoch_step", "loss", "learn_rate"]) - - if write_csv_header: - csv_writer.writeheader() - - csv_writer.writerow({"epoch": epoch_num + 1, - "epoch_step": epoch_step - 1, - "loss": f"{losses.mean():.7f}", - "learn_rate": scheduler.learn_rate}) + write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { + "loss": f"{losses.mean():.7f}", + "learn_rate": scheduler.learn_rate + }) if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0: last_saved_image = os.path.join(images_dir, f'{embedding_name}-{embedding.step}.png') diff --git a/modules/ui.py b/modules/ui.py index be4a43a7..a08ffc9b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1172,7 +1172,6 @@ def create_ui(wrap_gradio_gpu_call): training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512) steps = gr.Number(label='Max steps', value=100000, precision=0) create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) - write_csv_every = gr.Number(label='Save an csv containing the loss to log directory every N steps, 0 to disable', value=500, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False) @@ -1251,7 +1250,6 @@ def create_ui(wrap_gradio_gpu_call): steps, create_image_every, save_embedding_every, - write_csv_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, @@ -1274,7 +1272,6 @@ def create_ui(wrap_gradio_gpu_call): steps, create_image_every, save_embedding_every, - write_csv_every, template_file, preview_from_txt2img, *txt2img_preview_params, -- cgit v1.2.3 From cd58e44051f658f2efb544203a92837f43786372 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 14 Oct 2022 23:17:28 +0300 Subject: disabling history - i knew it was slow as fuck but i didn't realize it would also show galleries on launch --- modules/ui.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index a08ffc9b..6d193955 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1089,7 +1089,8 @@ def create_ui(wrap_gradio_gpu_call): "t2i":txt2img_paste_fields, "i2i":img2img_paste_fields } - images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) + + #images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) with gr.Blocks() as modelmerger_interface: with gr.Row().style(equal_height=False): @@ -1486,7 +1487,7 @@ Requested path was: {f} (img2img_interface, "img2img", "img2img"), (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), - (images_history, "History", "images_history"), + #(images_history, "History", "images_history"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "ti"), (settings_interface, "Settings", "settings"), -- cgit v1.2.3 From 368f4cc4c73509c1968cd9defe068d8bf4ff7c4f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 14 Oct 2022 23:19:05 +0300 Subject: set firstpass w/h to 0 by default and rever to old behavior when any are 0 --- modules/processing.py | 49 ++++++++++++++++++++++++++++++------------------- modules/ui.py | 4 ++-- 2 files changed, 32 insertions(+), 21 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/processing.py b/modules/processing.py index 100a259f..a75b9f84 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -501,17 +501,15 @@ def process_images(p: StableDiffusionProcessing) -> Processed: class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - firstphase_width = 0 - firstphase_height = 0 - firstphase_width_truncated = 0 - firstphase_height_truncated = 0 - def __init__(self, enable_hr=False, denoising_strength=0.75, firstphase_width=512, firstphase_height=512, **kwargs): + def __init__(self, enable_hr=False, denoising_strength=0.75, firstphase_width=0, firstphase_height=0, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength self.firstphase_width = firstphase_width self.firstphase_height = firstphase_height + self.truncate_x = 0 + self.truncate_y = 0 def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: @@ -520,6 +518,32 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: state.job_count = state.job_count * 2 + if self.firstphase_width == 0 or self.firstphase_height == 0: + desired_pixel_count = 512 * 512 + actual_pixel_count = self.width * self.height + scale = math.sqrt(desired_pixel_count / actual_pixel_count) + self.firstphase_width = math.ceil(scale * self.width / 64) * 64 + self.firstphase_height = math.ceil(scale * self.height / 64) * 64 + firstphase_width_truncated = int(scale * self.width) + firstphase_height_truncated = int(scale * self.height) + + else: + self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}" + + width_ratio = self.width / self.firstphase_width + height_ratio = self.height / self.firstphase_height + + if width_ratio > height_ratio: + firstphase_width_truncated = self.firstphase_width + firstphase_height_truncated = self.firstphase_width * self.height / self.width + else: + firstphase_width_truncated = self.firstphase_height * self.width / self.height + firstphase_height_truncated = self.firstphase_height + + self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f + self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f + + def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model) @@ -528,23 +552,10 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning) return samples - self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}" - x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning) - truncate_x = 0 - truncate_y = 0 - width_ratio = self.width/self.firstphase_width - height_ratio = self.height/self.firstphase_height - - if width_ratio > height_ratio: - truncate_y = int((self.width - self.firstphase_width) / width_ratio / height_ratio / opt_f) - - elif width_ratio < height_ratio: - truncate_x = int((self.height - self.firstphase_height) / width_ratio / height_ratio / opt_f) - - samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2] + samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2] decoded_samples = decode_first_stage(self.sd_model, samples) diff --git a/modules/ui.py b/modules/ui.py index 6d193955..a1d18be9 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -567,8 +567,8 @@ def create_ui(wrap_gradio_gpu_call): enable_hr = gr.Checkbox(label='Highres. fix', value=False) with gr.Row(visible=False) as hr_options: - firstphase_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512) - firstphase_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512) + firstphase_width = gr.Slider(minimum=0, maximum=1024, step=64, label="First pass width", value=0) + firstphase_height = gr.Slider(minimum=0, maximum=1024, step=64, label="First pass height", value=0) denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) with gr.Row(equal_height=True): -- cgit v1.2.3 From 4dc426509918e90bf4557ecfd1f84031362360c0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 15 Oct 2022 00:21:48 +0300 Subject: rename firstpass w/h to discard old user settings --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index a1d18be9..c5d295ea 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -567,8 +567,8 @@ def create_ui(wrap_gradio_gpu_call): enable_hr = gr.Checkbox(label='Highres. fix', value=False) with gr.Row(visible=False) as hr_options: - firstphase_width = gr.Slider(minimum=0, maximum=1024, step=64, label="First pass width", value=0) - firstphase_height = gr.Slider(minimum=0, maximum=1024, step=64, label="First pass height", value=0) + firstphase_width = gr.Slider(minimum=0, maximum=1024, step=64, label="Firstpass width", value=0) + firstphase_height = gr.Slider(minimum=0, maximum=1024, step=64, label="Firstpass height", value=0) denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) with gr.Row(equal_height=True): -- cgit v1.2.3 From acedbe67d2b8a3af99ca3b9a2f809e7a2db285d1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 15 Oct 2022 00:43:15 +0300 Subject: bring history tab back, make it behave; it's still slow but won't fuck anything up until you use it --- javascript/images_history.js | 16 ++++++++++++---- modules/ui.py | 4 ++-- 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'modules/ui.py') diff --git a/javascript/images_history.js b/javascript/images_history.js index 3a20056b..f7d052c3 100644 --- a/javascript/images_history.js +++ b/javascript/images_history.js @@ -163,10 +163,15 @@ function images_history_init(){ for (var i in images_history_tab_list){ var tabname = images_history_tab_list[i] tab_btns[i].setAttribute("tabname", tabname); - tab_btns[i].addEventListener('click', images_history_click_tab); + + // this refreshes history upon tab switch + // until the history is known to work well, which is not the case now, we do not do this at startup + //tab_btns[i].addEventListener('click', images_history_click_tab); } - tabs_box.classList.add(images_history_tab_list[0]); - load_txt2img_button.click(); + tabs_box.classList.add(images_history_tab_list[0]); + + // same as above, at page load + //load_txt2img_button.click(); } else { setTimeout(images_history_init, 500); } @@ -182,12 +187,15 @@ document.addEventListener("DOMContentLoaded", function() { buttons.forEach(function(bnt){ bnt.addEventListener('click', images_history_click_image, true); }); + + // same as load_txt2img_button.click() above + /* var cls_btn = gradioApp().getElementById(tabname + '_images_history_gallery').querySelector("svg"); if (cls_btn){ cls_btn.addEventListener('click', function(){ gradioApp().getElementById(tabname + '_images_history_renew_page').click(); }, false); - } + }*/ } }); diff --git a/modules/ui.py b/modules/ui.py index c5d295ea..1bc919c7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1090,7 +1090,7 @@ def create_ui(wrap_gradio_gpu_call): "i2i":img2img_paste_fields } - #images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) + images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) with gr.Blocks() as modelmerger_interface: with gr.Row().style(equal_height=False): @@ -1487,7 +1487,7 @@ Requested path was: {f} (img2img_interface, "img2img", "img2img"), (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), - #(images_history, "History", "images_history"), + (images_history, "History", "images_history"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "ti"), (settings_interface, "Settings", "settings"), -- cgit v1.2.3 From c7a86f7fe9c0b8967a87e8d709f507d2f44400d8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 15 Oct 2022 09:24:59 +0300 Subject: add option to use batch size for training --- modules/hypernetworks/hypernetwork.py | 33 +++++++++++++++++++------- modules/textual_inversion/dataset.py | 31 ++++++++++++++---------- modules/textual_inversion/textual_inversion.py | 17 +++++++------ modules/ui.py | 3 +++ 4 files changed, 54 insertions(+), 30 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 59c7ac6e..a2b3bc0a 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -182,7 +182,21 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None): return self.to_out(out) -def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def stack_conds(conds): + if len(conds) == 1: + return torch.stack(conds) + + # same as in reconstruct_multicond_batch + token_count = max([x.shape[0] for x in conds]) + for i in range(len(conds)): + if conds[i].shape[0] != token_count: + last_vector = conds[i][-1:] + last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1]) + conds[i] = torch.vstack([conds[i], last_vector_repeated]) + + return torch.stack(conds) + +def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert hypernetwork_name, 'hypernetwork not selected' path = shared.hypernetworks.get(hypernetwork_name, None) @@ -211,7 +225,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." with torch.autocast("cuda"): - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=512, height=512, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True) + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=512, height=512, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size) if unload: shared.sd_model.cond_stage_model.to(devices.cpu) @@ -235,7 +249,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) - for i, entry in pbar: + for i, entries in pbar: hypernetwork.step = i + ititial_step scheduler.apply(optimizer, hypernetwork.step) @@ -246,11 +260,12 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, break with torch.autocast("cuda"): - cond = entry.cond.to(devices.device) - x = entry.latent.to(devices.device) - loss = shared.sd_model(x.unsqueeze(0), cond)[0] + c = stack_conds([entry.cond for entry in entries]).to(devices.device) +# c = torch.vstack([entry.cond for entry in entries]).to(devices.device) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] del x - del cond + del c losses[hypernetwork.step % losses.shape[0]] = loss.item() @@ -292,7 +307,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, p.width = preview_width p.height = preview_height else: - p.prompt = entry.cond_text + p.prompt = entries[0].cond_text p.steps = 20 preview_text = p.prompt @@ -315,7 +330,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory,

Loss: {losses.mean():.7f}
Step: {hypernetwork.step}
-Last prompt: {html.escape(entry.cond_text)}
+Last prompt: {html.escape(entries[0].cond_text)}
Last saved embedding: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 67e90afe..bd99c0cb 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -24,11 +24,12 @@ class DatasetEntry: class PersonalizedBase(Dataset): - def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, device=None, template_file=None, include_cond=False): - re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex)>0 else None + def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, device=None, template_file=None, include_cond=False, batch_size=1): + re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None self.placeholder_token = placeholder_token + self.batch_size = batch_size self.width = width self.height = height self.flip = transforms.RandomHorizontalFlip(p=flip_p) @@ -78,13 +79,13 @@ class PersonalizedBase(Dataset): if include_cond: entry.cond_text = self.create_text(filename_text) - entry.cond = cond_model([entry.cond_text]).to(devices.cpu) + entry.cond = cond_model([entry.cond_text]).to(devices.cpu).squeeze(0) self.dataset.append(entry) - self.length = len(self.dataset) * repeats + self.length = len(self.dataset) * repeats // batch_size - self.initial_indexes = np.arange(self.length) % len(self.dataset) + self.initial_indexes = np.arange(len(self.dataset)) self.indexes = None self.shuffle() @@ -101,13 +102,19 @@ class PersonalizedBase(Dataset): return self.length def __getitem__(self, i): - if i % len(self.dataset) == 0: - self.shuffle() + res = [] - index = self.indexes[i % len(self.indexes)] - entry = self.dataset[index] + for j in range(self.batch_size): + position = i * self.batch_size + j + if position % len(self.indexes) == 0: + self.shuffle() - if entry.cond is None: - entry.cond_text = self.create_text(entry.filename_text) + index = self.indexes[position % len(self.indexes)] + entry = self.dataset[index] - return entry + if entry.cond is None: + entry.cond_text = self.create_text(entry.filename_text) + + res.append(entry) + + return res diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index da0d77a0..e754747e 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -199,7 +199,7 @@ def write_loss(log_directory, filename, step, epoch_len, values): }) -def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert embedding_name, 'embedding not selected' shared.state.textinfo = "Initializing textual inversion training..." @@ -231,7 +231,7 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." with torch.autocast("cuda"): - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file) + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size) hijack = sd_hijack.model_hijack @@ -251,7 +251,7 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) - for i, entry in pbar: + for i, entries in pbar: embedding.step = i + ititial_step scheduler.apply(optimizer, embedding.step) @@ -262,10 +262,9 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini break with torch.autocast("cuda"): - c = cond_model([entry.cond_text]) - - x = entry.latent.to(devices.device) - loss = shared.sd_model(x.unsqueeze(0), c)[0] + c = cond_model([entry.cond_text for entry in entries]) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] del x losses[embedding.step % losses.shape[0]] = loss.item() @@ -307,7 +306,7 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini p.width = preview_width p.height = preview_height else: - p.prompt = entry.cond_text + p.prompt = entries[0].cond_text p.steps = 20 p.width = training_width p.height = training_height @@ -348,7 +347,7 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini

Loss: {losses.mean():.7f}
Step: {embedding.step}
-Last prompt: {html.escape(entry.cond_text)}
+Last prompt: {html.escape(entries[0].cond_text)}
Last saved embedding: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

diff --git a/modules/ui.py b/modules/ui.py index 1bc919c7..45550ea8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1166,6 +1166,7 @@ def create_ui(wrap_gradio_gpu_call): train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', choices=[x for x in shared.hypernetworks.keys()]) learn_rate = gr.Textbox(label='Learning rate', placeholder="Learning rate", value="0.005") + batch_size = gr.Number(label='Batch size', value=1, precision=0) dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt")) @@ -1244,6 +1245,7 @@ def create_ui(wrap_gradio_gpu_call): inputs=[ train_embedding_name, learn_rate, + batch_size, dataset_directory, log_directory, training_width, @@ -1268,6 +1270,7 @@ def create_ui(wrap_gradio_gpu_call): inputs=[ train_hypernetwork_name, learn_rate, + batch_size, dataset_directory, log_directory, steps, -- cgit v1.2.3 From db27b987a97fc8b7894a9dd34bd7641536f9c424 Mon Sep 17 00:00:00 2001 From: aoirusann Date: Sat, 15 Oct 2022 11:48:13 +0800 Subject: Add hint for `ctrl/alt enter` And duplicate implementations are removed --- javascript/ui.js | 10 ---------- modules/ui.py | 10 ++++++++-- script.js | 4 ++-- 3 files changed, 10 insertions(+), 14 deletions(-) (limited to 'modules/ui.py') diff --git a/javascript/ui.js b/javascript/ui.js index 0f8fe68e..56f4216f 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -187,12 +187,10 @@ onUiUpdate(function(){ if (!txt2img_textarea) { txt2img_textarea = gradioApp().querySelector("#txt2img_prompt > label > textarea"); txt2img_textarea?.addEventListener("input", () => update_token_counter("txt2img_token_button")); - txt2img_textarea?.addEventListener("keyup", (event) => submit_prompt(event, "txt2img_generate")); } if (!img2img_textarea) { img2img_textarea = gradioApp().querySelector("#img2img_prompt > label > textarea"); img2img_textarea?.addEventListener("input", () => update_token_counter("img2img_token_button")); - img2img_textarea?.addEventListener("keyup", (event) => submit_prompt(event, "img2img_generate")); } }) @@ -220,14 +218,6 @@ function update_token_counter(button_id) { token_timeout = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time); } -function submit_prompt(event, generate_button_id) { - if (event.altKey && event.keyCode === 13) { - event.preventDefault(); - gradioApp().getElementById(generate_button_id).click(); - return; - } -} - function restart_reload(){ document.body.innerHTML='

Reloading...

'; setTimeout(function(){location.reload()},2000) diff --git a/modules/ui.py b/modules/ui.py index 45550ea8..baf4c397 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -433,7 +433,10 @@ def create_toprow(is_img2img): with gr.Row(): with gr.Column(scale=80): with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2) + prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=2, + placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)" + ) + with gr.Column(scale=1, elem_id="roll_col"): roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) paste = gr.Button(value=paste_symbol, elem_id="paste") @@ -446,7 +449,10 @@ def create_toprow(is_img2img): with gr.Row(): with gr.Column(scale=8): with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2) + negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=2, + placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)" + ) + with gr.Column(scale=1, elem_id="roll_col"): sh = gr.Button(elem_id="sh", visible=True) diff --git a/script.js b/script.js index 9543cbe6..88f2c839 100644 --- a/script.js +++ b/script.js @@ -50,9 +50,9 @@ document.addEventListener("DOMContentLoaded", function() { document.addEventListener('keydown', function(e) { var handled = false; if (e.key !== undefined) { - if((e.key == "Enter" && (e.metaKey || e.ctrlKey))) handled = true; + if((e.key == "Enter" && (e.metaKey || e.ctrlKey || e.altKey))) handled = true; } else if (e.keyCode !== undefined) { - if((e.keyCode == 13 && (e.metaKey || e.ctrlKey))) handled = true; + if((e.keyCode == 13 && (e.metaKey || e.ctrlKey || e.altKey))) handled = true; } if (handled) { button = get_uiCurrentTabContent().querySelector('button[id$=_generate]'); -- cgit v1.2.3 From e8729dd0511f8410db967d9ef192645cfef1be8a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 15 Oct 2022 12:54:23 +0300 Subject: re-apply height hacks to work with new gradio --- modules/ui.py | 4 ++-- style.css | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index baf4c397..9c7a67dd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -750,10 +750,10 @@ def create_ui(wrap_gradio_gpu_call): with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode: with gr.TabItem('img2img', id='img2img'): - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool) + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool).style(height=480) with gr.TabItem('Inpaint', id='inpaint'): - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA") + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=480) init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base") init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask") diff --git a/style.css b/style.css index 2306c002..c27e53f8 100644 --- a/style.css +++ b/style.css @@ -167,10 +167,6 @@ button{ align-self: stretch !important; } -#img2maskimg .h-60{ - height: 30rem; -} - .overflow-hidden, .gr-panel{ overflow: visible !important; } @@ -443,10 +439,6 @@ input[type="range"]{ --tw-bg-opacity: 0 !important; } -#img2img_image div.h-60{ - height: 480px; -} - #context-menu{ z-index:9999; position:absolute; @@ -521,3 +513,11 @@ canvas[key="mask"] { .row.gr-compact{ overflow: visible; } + +#img2img_image, #img2img_image > .h-60, #img2img_image > .h-60 > div, #img2img_image > .h-60 > div > img, +img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h-60 > div > img +{ + height: 480px !important; + max-height: 480px !important; + min-height: 480px !important; +} \ No newline at end of file -- cgit v1.2.3 From 5967d07d1aa4e2fef031a57b1612b1ab04a3cd78 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 15 Oct 2022 13:11:28 +0300 Subject: fix new gradio failing to preserve image params --- modules/ui.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index 9c7a67dd..de5ab929 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -7,6 +7,7 @@ import mimetypes import os import random import sys +import tempfile import time import traceback import platform @@ -176,6 +177,23 @@ def save_files(js_data, images, do_make_zip, index): return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}") +def save_pil_to_file(pil_image, dir=None): + use_metadata = False + metadata = PngImagePlugin.PngInfo() + for key, value in pil_image.info.items(): + if isinstance(key, str) and isinstance(value, str): + metadata.add_text(key, value) + use_metadata = True + + file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=dir) + pil_image.save(file_obj, pnginfo=(metadata if use_metadata else None)) + return file_obj + + +# override save to file function so that it also writes PNG info +gr.processing_utils.save_pil_to_file = save_pil_to_file + + def wrap_gradio_call(func, extra_outputs=None): def f(*args, extra_outputs_array=extra_outputs, **kwargs): run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled -- cgit v1.2.3 From d3463bc59a44d62c2de8b357184c49876d84f654 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 15 Oct 2022 14:22:30 +0300 Subject: change styling for top right corner UI made save style button not die when you cancel --- javascript/hints.js | 2 ++ javascript/ui.js | 2 +- modules/ui.py | 57 ++++++++++++++++++++++++++--------------------------- style.css | 16 +++++++-------- 4 files changed, 39 insertions(+), 38 deletions(-) (limited to 'modules/ui.py') diff --git a/javascript/hints.js b/javascript/hints.js index 8fec907d..b98012f5 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -16,6 +16,8 @@ titles = { "\u{1f3a8}": "Add a random artist to the prompt.", "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u{1f4c2}": "Open images output directory", + "\u{1f4be}": "Save style", + "\u{1f4cb}": "Apply selected styles to current prompt", "Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt", "SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back", diff --git a/javascript/ui.js b/javascript/ui.js index 56f4216f..9e1bed4c 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -141,7 +141,7 @@ function submit_img2img(){ function ask_for_style_name(_, prompt_text, negative_prompt_text) { name_ = prompt('Style name:') - return name_ === null ? [null, null, null]: [name_, prompt_text, negative_prompt_text] + return [name_, prompt_text, negative_prompt_text] } diff --git a/modules/ui.py b/modules/ui.py index de5ab929..cab8ab11 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -81,6 +81,8 @@ art_symbol = '\U0001f3a8' # 🎨 paste_symbol = '\u2199\ufe0f' # ↙ folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 +save_style_symbol = '\U0001f4be' # 💾 +apply_style_symbol = '\U0001f4cb' # 📋 def plaintext_to_html(text): @@ -322,7 +324,7 @@ def visit(x, func, path=""): def add_style(name: str, prompt: str, negative_prompt: str): if name is None: - return [gr_show(), gr_show()] + return [gr_show() for x in range(4)] style = modules.styles.PromptStyle(name, prompt, negative_prompt) shared.prompt_styles.styles[style.name] = style @@ -447,7 +449,7 @@ def create_toprow(is_img2img): id_part = "img2img" if is_img2img else "txt2img" with gr.Row(elem_id="toprow"): - with gr.Column(scale=4): + with gr.Column(scale=6): with gr.Row(): with gr.Column(scale=80): with gr.Row(): @@ -455,27 +457,30 @@ def create_toprow(is_img2img): placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)" ) - with gr.Column(scale=1, elem_id="roll_col"): - roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) - paste = gr.Button(value=paste_symbol, elem_id="paste") - token_counter = gr.HTML(value="", elem_id=f"{id_part}_token_counter") - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - - with gr.Column(scale=10, elem_id="style_pos_col"): - prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1) - with gr.Row(): - with gr.Column(scale=8): + with gr.Column(scale=80): with gr.Row(): negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=2, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)" ) - with gr.Column(scale=1, elem_id="roll_col"): - sh = gr.Button(elem_id="sh", visible=True) + with gr.Column(scale=1, elem_id="roll_col"): + roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) + paste = gr.Button(value=paste_symbol, elem_id="paste") + save_style = gr.Button(value=save_style_symbol, elem_id="style_create") + prompt_style_apply = gr.Button(value=apply_style_symbol, elem_id="style_apply") - with gr.Column(scale=1, elem_id="style_neg_col"): - prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1) + token_counter = gr.HTML(value="", elem_id=f"{id_part}_token_counter") + token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") + + button_interrogate = None + button_deepbooru = None + if is_img2img: + with gr.Column(scale=1, elem_id="interrogate_col"): + button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + + if cmd_opts.deepdanbooru: + button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") with gr.Column(scale=1): with gr.Row(): @@ -495,20 +500,14 @@ def create_toprow(is_img2img): outputs=[], ) - with gr.Row(scale=1): - if is_img2img: - interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - if cmd_opts.deepdanbooru: - deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - else: - deepbooru = None - else: - interrogate = None - deepbooru = None - prompt_style_apply = gr.Button('Apply style', elem_id="style_apply") - save_style = gr.Button('Create style', elem_id="style_create") + with gr.Row(): + with gr.Column(scale=1, elem_id="style_pos_col"): + prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys()))) + + with gr.Column(scale=1, elem_id="style_neg_col"): + prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys()))) - return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button + return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button def setup_progressbar(progressbar, preview, id_part, textinfo=None): diff --git a/style.css b/style.css index c27e53f8..b534f950 100644 --- a/style.css +++ b/style.css @@ -115,7 +115,7 @@ padding: 0.4em 0; } -#roll, #paste{ +#roll, #paste, #style_create, #style_apply{ min-width: 2em; min-height: 2em; max-width: 2em; @@ -126,14 +126,14 @@ margin: 0.1em 0; } -#style_apply, #style_create, #interrogate{ - margin: 0.75em 0.25em 0.25em 0.25em; - min-width: 5em; +#interrogate_col{ + min-width: 0 !important; + max-width: 8em !important; } - -#style_apply, #style_create, #deepbooru{ - margin: 0.75em 0.25em 0.25em 0.25em; - min-width: 5em; +#interrogate, #deepbooru{ + margin: 0em 0.25em 0.9em 0.25em; + min-width: 8em; + max-width: 8em; } #style_pos_col, #style_neg_col{ -- cgit v1.2.3 From 20a1f68c752f8e37492ea00911c97bfc572a6e67 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 15 Oct 2022 15:44:46 +0300 Subject: fix gadio issue with sending files between tabs --- modules/ui.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'modules/ui.py') diff --git a/modules/ui.py b/modules/ui.py index cab8ab11..c9b53247 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -91,6 +91,14 @@ def plaintext_to_html(text): def image_from_url_text(filedata): + if type(filedata) == dict and filedata["is_file"]: + filename = filedata["name"] + tempdir = os.path.normpath(tempfile.gettempdir()) + normfn = os.path.normpath(filename) + assert normfn.startswith(tempdir), 'trying to open image file not in temporary directory' + + return Image.open(filename) + if type(filedata) == list: if len(filedata) == 0: return None -- cgit v1.2.3