aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--extensions/put extension here.txt0
-rw-r--r--javascript/inspiration.js48
-rw-r--r--modules/inspiration.py193
-rw-r--r--modules/shared.py9
-rw-r--r--modules/ui.py14
-rw-r--r--scripts/create_inspiration_images.py57
-rw-r--r--webui.py8
8 files changed, 322 insertions, 9 deletions
diff --git a/.gitignore b/.gitignore
index 8fa05852..8d01bc6a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,3 +29,5 @@ notification.mp3
/textual_inversion
.vscode
/extensions
+ /inspiration
+
diff --git a/extensions/put extension here.txt b/extensions/put extension here.txt
deleted file mode 100644
index e69de29b..00000000
--- a/extensions/put extension here.txt
+++ /dev/null
diff --git a/javascript/inspiration.js b/javascript/inspiration.js
new file mode 100644
index 00000000..39844544
--- /dev/null
+++ b/javascript/inspiration.js
@@ -0,0 +1,48 @@
+function public_image_index_in_gallery(item, gallery){
+ var imgs = gallery.querySelectorAll("img.h-full")
+ var index;
+ var i = 0;
+ imgs.forEach(function(e){
+ if (e == item)
+ index = i;
+ i += 1;
+ });
+ var all_imgs = gallery.querySelectorAll("img")
+ if (all_imgs.length > imgs.length){
+ var num = imgs.length / 2
+ index = (index < num) ? index : (index - num)
+ }
+ return index;
+}
+
+function inspiration_selected(name, name_list){
+ var btn = gradioApp().getElementById("inspiration_select_button")
+ return [gradioApp().getElementById("inspiration_select_button").getAttribute("img-index")];
+}
+
+function inspiration_click_get_button(){
+ gradioApp().getElementById("inspiration_get_button").click();
+}
+
+var inspiration_image_click = function(){
+ var index = public_image_index_in_gallery(this, gradioApp().getElementById("inspiration_gallery"));
+ var btn = gradioApp().getElementById("inspiration_select_button");
+ btn.setAttribute("img-index", index);
+ setTimeout(function(btn){btn.click();}, 10, btn);
+}
+
+document.addEventListener("DOMContentLoaded", function() {
+ var mutationObserver = new MutationObserver(function(m){
+ var gallery = gradioApp().getElementById("inspiration_gallery")
+ if (gallery) {
+ var node = gallery.querySelector(".absolute.backdrop-blur.h-full")
+ if (node) {
+ node.style.display = "None";
+ }
+ gallery.querySelectorAll('img').forEach(function(e){
+ e.onclick = inspiration_image_click
+ });
+ }
+ });
+ mutationObserver.observe( gradioApp(), { childList:true, subtree:true });
+});
diff --git a/modules/inspiration.py b/modules/inspiration.py
new file mode 100644
index 00000000..29cf8297
--- /dev/null
+++ b/modules/inspiration.py
@@ -0,0 +1,193 @@
+import os
+import random
+import gradio
+from modules.shared import opts
+inspiration_system_path = os.path.join(opts.inspiration_dir, "system")
+def read_name_list(file, types=None, keyword=None):
+ if not os.path.exists(file):
+ return []
+ ret = []
+ f = open(file, "r")
+ line = f.readline()
+ while len(line) > 0:
+ line = line.rstrip("\n")
+ if types is not None:
+ dirname = os.path.split(line)
+ if dirname[0] in types and keyword in dirname[1].lower():
+ ret.append(line)
+ else:
+ ret.append(line)
+ line = f.readline()
+ return ret
+
+def save_name_list(file, name):
+ name_list = read_name_list(file)
+ if name not in name_list:
+ with open(file, "a") as f:
+ f.write(name + "\n")
+
+def get_types_list():
+ files = os.listdir(opts.inspiration_dir)
+ types = []
+ for x in files:
+ path = os.path.join(opts.inspiration_dir, x)
+ if x[0] == ".":
+ continue
+ if not os.path.isdir(path):
+ continue
+ if path == inspiration_system_path:
+ continue
+ types.append(x)
+ return types
+
+def get_inspiration_images(source, types, keyword):
+ keyword = keyword.strip(" ").lower()
+ get_num = int(opts.inspiration_rows_num * opts.inspiration_cols_num)
+ if source == "Favorites":
+ names = read_name_list(os.path.join(inspiration_system_path, "faverites.txt"), types, keyword)
+ names = random.sample(names, get_num) if len(names) > get_num else names
+ elif source == "Abandoned":
+ names = read_name_list(os.path.join(inspiration_system_path, "abandoned.txt"), types, keyword)
+ names = random.sample(names, get_num) if len(names) > get_num else names
+ elif source == "Exclude abandoned":
+ abandoned = read_name_list(os.path.join(inspiration_system_path, "abandoned.txt"), types, keyword)
+ all_names = []
+ for tp in types:
+ name_list = os.listdir(os.path.join(opts.inspiration_dir, tp))
+ all_names += [os.path.join(tp, x) for x in name_list if keyword in x.lower()]
+
+ if len(all_names) > get_num:
+ names = []
+ while len(names) < get_num:
+ name = random.choice(all_names)
+ if name not in abandoned:
+ names.append(name)
+ else:
+ names = all_names
+ else:
+ all_names = []
+ for tp in types:
+ name_list = os.listdir(os.path.join(opts.inspiration_dir, tp))
+ all_names += [os.path.join(tp, x) for x in name_list if keyword in x.lower()]
+ names = random.sample(all_names, get_num) if len(all_names) > get_num else all_names
+ image_list = []
+ for a in names:
+ image_path = os.path.join(opts.inspiration_dir, a)
+ images = os.listdir(image_path)
+ if len(images) > 0:
+ image_list.append((os.path.join(image_path, random.choice(images)), a))
+ else:
+ print(image_path)
+ return image_list, names
+
+def select_click(index, name_list):
+ name = name_list[int(index)]
+ path = os.path.join(opts.inspiration_dir, name)
+ images = os.listdir(path)
+ return name, [os.path.join(path, x) for x in images], ""
+
+def give_up_click(name):
+ file = os.path.join(inspiration_system_path, "abandoned.txt")
+ save_name_list(file, name)
+ return "Added to abandoned list"
+
+def collect_click(name):
+ file = os.path.join(inspiration_system_path, "faverites.txt")
+ save_name_list(file, name)
+ return "Added to faverite list"
+
+def moveout_click(name, source):
+ if source == "Abandoned":
+ file = os.path.join(inspiration_system_path, "abandoned.txt")
+ elif source == "Favorites":
+ file = os.path.join(inspiration_system_path, "faverites.txt")
+ else:
+ return None
+ name_list = read_name_list(file)
+ os.remove(file)
+ with open(file, "a") as f:
+ for a in name_list:
+ if a != name:
+ f.write(a + "\n")
+ return f"Moved out {name} from {source} list"
+
+def source_change(source):
+ if source in ["Abandoned", "Favorites"]:
+ return gradio.update(visible=True), []
+ else:
+ return gradio.update(visible=False), []
+def add_to_prompt(name, prompt):
+ name = os.path.basename(name)
+ return prompt + "," + name
+
+def clear_keyword():
+ return ""
+
+def ui(gr, opts, txt2img_prompt, img2img_prompt):
+ with gr.Blocks(analytics_enabled=False) as inspiration:
+ flag = os.path.exists(opts.inspiration_dir)
+ if flag:
+ types = get_types_list()
+ flag = len(types) > 0
+ else:
+ os.makedirs(opts.inspiration_dir)
+ if not flag:
+ gr.HTML("""
+ <div align='center' width="50%"><h2>To activate inspiration function, you need get "inspiration" images first. </h2><br>
+ You can create these images by run "Create inspiration images" script in txt2img page, <br> you can get the artists or art styles list from here<br>
+ <a href="https://github.com/pharmapsychotic/clip-interrogator/tree/main/data">https://github.com/pharmapsychotic/clip-interrogator/tree/main/data</a><br>
+ download these files, and select these files in the "Create inspiration images" script UI<br>
+ There about 6000 artists and art styles in these files. <br>This takes server hours depending on your GPU type and how many pictures you generate for each artist/style
+ <br>I suggest at least four images for each<br><br><br>
+ <h2>You can also download generated pictures from here:</h2><br>
+ <a href="https://huggingface.co/datasets/yfszzx/inspiration">https://huggingface.co/datasets/yfszzx/inspiration</a><br>
+ unzip the file to the project directory of webui<br>
+ and restart webui, and enjoy the joy of creation!<br></div>
+ """)
+ return inspiration
+ if not os.path.exists(inspiration_system_path):
+ os.mkdir(inspiration_system_path)
+ with gr.Row():
+ with gr.Column(scale=2):
+ inspiration_gallery = gr.Gallery(show_label=False, elem_id="inspiration_gallery").style(grid=opts.inspiration_cols_num, height='auto')
+ with gr.Column(scale=1):
+ types = gr.CheckboxGroup(choices=types, value=types)
+ with gr.Row():
+ source = gr.Dropdown(choices=["All", "Favorites", "Exclude abandoned", "Abandoned"], value="Exclude abandoned", label="Source")
+ keyword = gr.Textbox("", label="Key word")
+ get_inspiration = gr.Button("Get inspiration", elem_id="inspiration_get_button")
+ name = gr.Textbox(show_label=False, interactive=False)
+ with gr.Row():
+ send_to_txt2img = gr.Button('to txt2img')
+ send_to_img2img = gr.Button('to img2img')
+ collect = gr.Button('Collect')
+ give_up = gr.Button("Don't show again")
+ moveout = gr.Button("Move out", visible=False)
+ warning = gr.HTML()
+ style_gallery = gr.Gallery(show_label=False).style(grid=2, height='auto')
+
+
+
+ with gr.Row(visible=False):
+ select_button = gr.Button('set button', elem_id="inspiration_select_button")
+ name_list = gr.State()
+
+ get_inspiration.click(get_inspiration_images, inputs=[source, types, keyword], outputs=[inspiration_gallery, name_list])
+ keyword.submit(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None)
+ source.change(source_change, inputs=[source], outputs=[moveout, style_gallery])
+ source.change(fn=clear_keyword, _js="inspiration_click_get_button", inputs=None, outputs=[keyword])
+ types.change(fn=clear_keyword, _js="inspiration_click_get_button", inputs=None, outputs=[keyword])
+
+ select_button.click(select_click, _js="inspiration_selected", inputs=[name, name_list], outputs=[name, style_gallery, warning])
+ give_up.click(give_up_click, inputs=[name], outputs=[warning])
+ collect.click(collect_click, inputs=[name], outputs=[warning])
+ moveout.click(moveout_click, inputs=[name, source], outputs=[warning])
+ moveout.click(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None)
+
+ send_to_txt2img.click(add_to_prompt, inputs=[name, txt2img_prompt], outputs=[txt2img_prompt])
+ send_to_img2img.click(add_to_prompt, inputs=[name, img2img_prompt], outputs=[img2img_prompt])
+ send_to_txt2img.click(collect_click, inputs=[name], outputs=[warning])
+ send_to_img2img.click(collect_click, inputs=[name], outputs=[warning])
+ send_to_txt2img.click(None, _js='switch_to_txt2img', inputs=None, outputs=None)
+ send_to_img2img.click(None, _js="switch_to_img2img_img2img", inputs=None, outputs=None)
+ return inspiration
diff --git a/modules/shared.py b/modules/shared.py
index b55371d3..0aaaadac 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -80,6 +80,7 @@ parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencode
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui")
+parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--browse-all-images", action='store_true', help="Allow browsing all images by Image Browser", default=False)
@@ -320,6 +321,13 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
}))
+options_templates.update(options_section(('inspiration', "Inspiration"), {
+ "inspiration_dir": OptionInfo("inspiration", "Directory of inspiration", component_args=hide_dirs),
+ "inspiration_max_samples": OptionInfo(4, "Maximum number of samples, used to determine which folders to skip when continue running the create script", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}),
+ "inspiration_rows_num": OptionInfo(4, "Rows of inspiration interface frame", gr.Slider, {"minimum": 4, "maximum": 16, "step": 1}),
+ "inspiration_cols_num": OptionInfo(8, "Columns of inspiration interface frame", gr.Slider, {"minimum": 4, "maximum": 16, "step": 1}),
+}))
+
options_templates.update(options_section(('images-history', "Images Browser"), {
#"images_history_reconstruct_directory": OptionInfo(False, "Reconstruct output directory structure.This can greatly improve the speed of loading , but will change the original output directory structure"),
"images_history_preload": OptionInfo(False, "Preload images at startup"),
@@ -329,7 +337,6 @@ options_templates.update(options_section(('images-history', "Images Browser"), {
}))
-
class Options:
data = None
data_labels = options_templates
diff --git a/modules/ui.py b/modules/ui.py
index 2311572c..a73175f5 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -38,7 +38,6 @@ import modules.codeformer_model
import modules.generation_parameters_copypaste
import modules.gfpgan_model
import modules.hypernetworks.ui
-import modules.images_history as img_his
import modules.ldsr_model
import modules.scripts
import modules.shared as shared
@@ -50,8 +49,9 @@ from modules.sd_hijack import model_hijack
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.textual_inversion.ui
import modules.hypernetworks.ui
+import modules.images_history as images_history
+import modules.inspiration as inspiration
-import modules.images_history as img_his
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
@@ -1104,9 +1104,9 @@ def create_ui(wrap_gradio_gpu_call):
upscaling_resize_w = gr.Number(label="Width", value=512, precision=0)
upscaling_resize_h = gr.Number(label="Height", value=512, precision=0)
upscaling_crop = gr.Checkbox(label='Crop to fit', value=True)
-
+
with gr.Group():
- extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
+ extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers] , value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
@@ -1200,7 +1200,8 @@ def create_ui(wrap_gradio_gpu_call):
"i2i": img2img_paste_fields
}
- images_history = img_his.create_history_tabs(gr, opts, cmd_opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict)
+ browser_interface = images_history.create_history_tabs(gr, opts, cmd_opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict)
+ inspiration_interface = inspiration.ui(gr, opts, txt2img_prompt, img2img_prompt)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
@@ -1650,7 +1651,8 @@ Requested path was: {f}
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
- (images_history, "Image Browser", "images_history"),
+ (inspiration_interface, "Inspiration", "inspiration"),
+ (browser_interface , "Image Browser", "images_history"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(train_interface, "Train", "ti"),
]
diff --git a/scripts/create_inspiration_images.py b/scripts/create_inspiration_images.py
new file mode 100644
index 00000000..2fd30578
--- /dev/null
+++ b/scripts/create_inspiration_images.py
@@ -0,0 +1,57 @@
+import csv, os, shutil
+import modules.scripts as scripts
+from modules import processing, shared, sd_samplers, images
+from modules.processing import Processed
+from modules.shared import opts
+import gradio
+class Script(scripts.Script):
+ def title(self):
+ return "Create inspiration images"
+
+ def show(self, is_img2img):
+ return True
+
+ def ui(self, is_img2img):
+ file = gradio.Files(label="Artist or styles name list. '.txt' files with one name per line",)
+ with gradio.Row():
+ prefix = gradio.Textbox("a painting in", label="Prompt words before artist or style name", file_count="multiple")
+ suffix= gradio.Textbox("style", label="Prompt words after artist or style name")
+ negative_prompt = gradio.Textbox("picture frame, portrait photo", label="Negative Prompt")
+ with gradio.Row():
+ batch_size = gradio.Number(1, label="Batch size")
+ batch_count = gradio.Number(2, label="Batch count")
+ return [batch_size, batch_count, prefix, suffix, negative_prompt, file]
+
+ def run(self, p, batch_size, batch_count, prefix, suffix, negative_prompt, files):
+ p.batch_size = int(batch_size)
+ p.n_iterint = int(batch_count)
+ p.negative_prompt = negative_prompt
+ p.do_not_save_samples = True
+ p.do_not_save_grid = True
+ for file in files:
+ tp = file.orig_name.split(".")[0]
+ print(tp)
+ path = os.path.join(opts.inspiration_dir, tp)
+ if not os.path.exists(path):
+ os.makedirs(path)
+ f = open(file.name, "r")
+ line = f.readline()
+ while len(line) > 0:
+ name = line.rstrip("\n").split(",")[0]
+ line = f.readline()
+ artist_path = os.path.join(path, name)
+ if not os.path.exists(artist_path):
+ os.mkdir(artist_path)
+ if len(os.listdir(artist_path)) >= opts.inspiration_max_samples:
+ continue
+ p.prompt = f"{prefix} {name} {suffix}"
+ print(p.prompt)
+ processed = processing.process_images(p)
+ for img in processed.images:
+ i = 0
+ filename = os.path.join(artist_path, format(0, "03d") + ".jpg")
+ while os.path.exists(filename):
+ i += 1
+ filename = os.path.join(artist_path, format(i, "03d") + ".jpg")
+ img.save(filename, quality=80)
+ return processed
diff --git a/webui.py b/webui.py
index b1deca1b..b5dbc6ad 100644
--- a/webui.py
+++ b/webui.py
@@ -73,6 +73,12 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
def initialize():
+ modules.scripts.load_scripts()
+ if cmd_opts.ui_debug_mode:
+ class enmpty():
+ name = None
+ shared.sd_upscalers = [enmpty()]
+ return
modelloader.cleanup_models()
modules.sd_models.setup_model()
codeformer.setup_model(cmd_opts.codeformer_models_path)
@@ -80,8 +86,6 @@ def initialize():
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
modelloader.load_upscalers()
- modules.scripts.load_scripts()
-
modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))