From 70931652a4289e28d83869b6d10cf11e80a70345 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Fri, 30 Sep 2022 18:02:46 -0700 Subject: [xy_grid] made -1 seed fixing apply to Var. seed too --- scripts/xy_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 146663b0..9c078888 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -218,7 +218,7 @@ class Script(scripts.Script): ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): - if axis_opt.label == 'Seed': + if axis_opt.label == 'Seed' or 'Var. seed': return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list] else: return axis_list -- cgit v1.2.3 From cf141157e7b49b0b3a6e57dc7aa0d1345158b4c8 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Fri, 30 Sep 2022 22:02:29 -0700 Subject: Added X/Y plot parameters to extra_generation_params --- scripts/xy_grid.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'scripts') diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 9c078888..d9f8d55b 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -244,6 +244,14 @@ class Script(scripts.Script): return process_images(pc) + if not x_opt.label == 'Nothing': + p.extra_generation_params["X/Y Plot X Type"] = x_opt.label + p.extra_generation_params["X Values"] = '{' + ", ".join([f'{x}' for x in xs]) + '}' + + if not y_opt.label == 'Nothing': + p.extra_generation_params["X/Y Plot Y Type"] = y_opt.label + p.extra_generation_params["Y Values"] = '{' + ", ".join([f'{y}' for y in ys]) + '}' + processed = draw_xy_grid( p, xs=xs, -- cgit v1.2.3 From eba0c29dbc3bad8c4e32f1fa3a03dc6f9caf1f5a Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 13:56:29 -0700 Subject: Updated xy_grid infotext formatting, parser regex --- modules/generation_parameters_copypaste.py | 2 +- scripts/xy_grid.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'scripts') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index ac1ba7f4..39d67d94 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,7 +1,7 @@ import re import gradio as gr -re_param_code = r"\s*([\w ]+):\s*([^,]+)(?:,|$)" +re_param_code = r"\s*([\w ]+):\s*((?:{[^}]+})|(?:[^,]+))(?:,|$)" re_param = re.compile(re_param_code) re_params = re.compile(r"^(?:" + re_param_code + "){3,}$") re_imagesize = re.compile(r"^(\d+)x(\d+)$") diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index d9f8d55b..f87c6c1f 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -245,12 +245,16 @@ class Script(scripts.Script): return process_images(pc) if not x_opt.label == 'Nothing': - p.extra_generation_params["X/Y Plot X Type"] = x_opt.label - p.extra_generation_params["X Values"] = '{' + ", ".join([f'{x}' for x in xs]) + '}' + p.extra_generation_params["XY Plot X Type"] = x_opt.label + p.extra_generation_params["X Values"] = '{' + x_values + '}' + if x_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: + p.extra_generation_params["Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}' if not y_opt.label == 'Nothing': - p.extra_generation_params["X/Y Plot Y Type"] = y_opt.label - p.extra_generation_params["Y Values"] = '{' + ", ".join([f'{y}' for y in ys]) + '}' + p.extra_generation_params["XY Plot Y Type"] = y_opt.label + p.extra_generation_params["Y Values"] = '{' + y_values + '}' + if y_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: + p.extra_generation_params["Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}' processed = draw_xy_grid( p, -- cgit v1.2.3 From b99a4f769f11ed74df0344a23069d3858613fbef Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 14:26:12 -0700 Subject: fixed expression error in condition --- scripts/xy_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index f87c6c1f..f1f54d9c 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -218,7 +218,7 @@ class Script(scripts.Script): ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): - if axis_opt.label == 'Seed' or 'Var. seed': + if axis_opt.label in ["Seed","Var. seed"]: return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list] else: return axis_list -- cgit v1.2.3 From fe6e2362e8fa5d739de6997ab155a26686d20a49 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sun, 2 Oct 2022 22:04:28 -0700 Subject: Update xy_grid.py Changed XY Plot infotext value keys to not be so generic. --- scripts/xy_grid.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'scripts') diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index f1f54d9c..ae011a17 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -246,15 +246,15 @@ class Script(scripts.Script): if not x_opt.label == 'Nothing': p.extra_generation_params["XY Plot X Type"] = x_opt.label - p.extra_generation_params["X Values"] = '{' + x_values + '}' + p.extra_generation_params["XY Plot X Values"] = '{' + x_values + '}' if x_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: - p.extra_generation_params["Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}' + p.extra_generation_params["XY Plot Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}' if not y_opt.label == 'Nothing': p.extra_generation_params["XY Plot Y Type"] = y_opt.label - p.extra_generation_params["Y Values"] = '{' + y_values + '}' + p.extra_generation_params["XY Plot Y Values"] = '{' + y_values + '}' if y_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: - p.extra_generation_params["Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}' + p.extra_generation_params["XY Plot Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}' processed = draw_xy_grid( p, -- cgit v1.2.3 From 4281f255d5e7c67515d619f53654be59a6fc1e13 Mon Sep 17 00:00:00 2001 From: wywywywy Date: Thu, 20 Oct 2022 15:31:09 +0100 Subject: Implemented batch count logic to Outpainting mk2 --- scripts/outpainting_mk_2.py | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) (limited to 'scripts') diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index a6468e09..02e655e9 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -242,21 +242,37 @@ class Script(scripts.Script): out = out.crop((0, 0, res_w, res_h)) return out - img = init_image - - if left > 0: - img = expand(img, left, is_left=True) - if right > 0: - img = expand(img, right, is_right=True) - if up > 0: - img = expand(img, up, is_top=True) - if down > 0: - img = expand(img, down, is_bottom=True) - - res = Processed(p, [img], initial_seed_and_info[0], initial_seed_and_info[1]) + batch_count = p.n_iter + p.n_iter = 1 + state.job_count = batch_count + all_images = [] + + for i in range(batch_count): + img = init_image + state.job = f"Batch {i + 1} out of {state.job_count}" + + if left > 0: + img = expand(img, left, is_left=True) + if right > 0: + img = expand(img, right, is_right=True) + if up > 0: + img = expand(img, up, is_top=True) + if down > 0: + img = expand(img, down, is_bottom=True) + + all_images.append(img) + + combined_grid_image = images.image_grid(all_images) + if opts.return_grid: + all_images = [combined_grid_image] + all_images + + res = Processed(p, all_images, initial_seed_and_info[0], initial_seed_and_info[1]) if opts.samples_save: images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p) + if opts.grid_save: + images.save_image(combined_grid_image, p.outpath_grids, "grid", res.seed, p.prompt, opts.grid_format, info=res.info, short_filename=not opts.grid_extended_filename, grid=True, p=p) + return res -- cgit v1.2.3 From 91efe138b35dda65e83070c14e9eb94f481fe476 Mon Sep 17 00:00:00 2001 From: wywywywy Date: Thu, 20 Oct 2022 16:02:32 +0100 Subject: Implemented batch_size logic in outpainting_mk2 --- scripts/outpainting_mk_2.py | 118 +++++++++++++++++++++++--------------------- 1 file changed, 63 insertions(+), 55 deletions(-) (limited to 'scripts') diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 02e655e9..0377ab32 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -176,50 +176,53 @@ class Script(scripts.Script): state.job_count = (1 if left > 0 else 0) + (1 if right > 0 else 0) + (1 if up > 0 else 0) + (1 if down > 0 else 0) - def expand(init, expand_pixels, is_left=False, is_right=False, is_top=False, is_bottom=False): + def expand(init, count, expand_pixels, is_left=False, is_right=False, is_top=False, is_bottom=False): is_horiz = is_left or is_right is_vert = is_top or is_bottom pixels_horiz = expand_pixels if is_horiz else 0 pixels_vert = expand_pixels if is_vert else 0 - res_w = init.width + pixels_horiz - res_h = init.height + pixels_vert - process_res_w = math.ceil(res_w / 64) * 64 - process_res_h = math.ceil(res_h / 64) * 64 - - img = Image.new("RGB", (process_res_w, process_res_h)) - img.paste(init, (pixels_horiz if is_left else 0, pixels_vert if is_top else 0)) - mask = Image.new("RGB", (process_res_w, process_res_h), "white") - draw = ImageDraw.Draw(mask) - draw.rectangle(( - expand_pixels + mask_blur if is_left else 0, - expand_pixels + mask_blur if is_top else 0, - mask.width - expand_pixels - mask_blur if is_right else res_w, - mask.height - expand_pixels - mask_blur if is_bottom else res_h, - ), fill="black") - - np_image = (np.asarray(img) / 255.0).astype(np.float64) - np_mask = (np.asarray(mask) / 255.0).astype(np.float64) - noised = get_matched_noise(np_image, np_mask, noise_q, color_variation) - out = Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB") - - target_width = min(process_width, init.width + pixels_horiz) if is_horiz else img.width - target_height = min(process_height, init.height + pixels_vert) if is_vert else img.height - - crop_region = ( - 0 if is_left else out.width - target_width, - 0 if is_top else out.height - target_height, - target_width if is_left else out.width, - target_height if is_top else out.height, - ) - - image_to_process = out.crop(crop_region) - mask = mask.crop(crop_region) - - p.width = target_width if is_horiz else img.width - p.height = target_height if is_vert else img.height - p.init_images = [image_to_process] - p.image_mask = mask + images_to_process = [] + for n in range(count): + res_w = init[n].width + pixels_horiz + res_h = init[n].height + pixels_vert + process_res_w = math.ceil(res_w / 64) * 64 + process_res_h = math.ceil(res_h / 64) * 64 + + img = Image.new("RGB", (process_res_w, process_res_h)) + img.paste(init[n], (pixels_horiz if is_left else 0, pixels_vert if is_top else 0)) + mask = Image.new("RGB", (process_res_w, process_res_h), "white") + draw = ImageDraw.Draw(mask) + draw.rectangle(( + expand_pixels + mask_blur if is_left else 0, + expand_pixels + mask_blur if is_top else 0, + mask.width - expand_pixels - mask_blur if is_right else res_w, + mask.height - expand_pixels - mask_blur if is_bottom else res_h, + ), fill="black") + + np_image = (np.asarray(img) / 255.0).astype(np.float64) + np_mask = (np.asarray(mask) / 255.0).astype(np.float64) + noised = get_matched_noise(np_image, np_mask, noise_q, color_variation) + out = Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB") + + target_width = min(process_width, init[n].width + pixels_horiz) if is_horiz else img.width + target_height = min(process_height, init[n].height + pixels_vert) if is_vert else img.height + p.width = target_width if is_horiz else img.width + p.height = target_height if is_vert else img.height + + crop_region = ( + 0 if is_left else out.width - target_width, + 0 if is_top else out.height - target_height, + target_width if is_left else out.width, + target_height if is_top else out.height, + ) + mask = mask.crop(crop_region) + p.image_mask = mask + + image_to_process = out.crop(crop_region) + images_to_process.append(image_to_process) + + p.init_images = images_to_process latent_mask = Image.new("RGB", (p.width, p.height), "white") draw = ImageDraw.Draw(latent_mask) @@ -232,44 +235,49 @@ class Script(scripts.Script): p.latent_mask = latent_mask proc = process_images(p) - proc_img = proc.images[0] if initial_seed_and_info[0] is None: initial_seed_and_info[0] = proc.seed initial_seed_and_info[1] = proc.info - out.paste(proc_img, (0 if is_left else out.width - proc_img.width, 0 if is_top else out.height - proc_img.height)) - out = out.crop((0, 0, res_w, res_h)) - return out + for proc_img in proc.images: + out.paste(proc_img, (0 if is_left else out.width - proc_img.width, 0 if is_top else out.height - proc_img.height)) + out = out.crop((0, 0, res_w, res_h)) + + return proc.images batch_count = p.n_iter + batch_size = p.batch_size p.n_iter = 1 state.job_count = batch_count - all_images = [] + all_processed_images = [] for i in range(batch_count): - img = init_image - state.job = f"Batch {i + 1} out of {state.job_count}" + imgs = [init_img] * batch_size + state.job = f"Batch {i + 1} out of {batch_count}" if left > 0: - img = expand(img, left, is_left=True) + imgs = expand(imgs, batch_size, left, is_left=True) if right > 0: - img = expand(img, right, is_right=True) + imgs = expand(imgs, batch_size, right, is_right=True) if up > 0: - img = expand(img, up, is_top=True) + imgs = expand(imgs, batch_size, up, is_top=True) if down > 0: - img = expand(img, down, is_bottom=True) + imgs = expand(imgs, batch_size, down, is_bottom=True) - all_images.append(img) + all_processed_images += imgs + + combined_grid_image = images.image_grid(all_processed_images) + all_images = all_processed_images - combined_grid_image = images.image_grid(all_images) if opts.return_grid: - all_images = [combined_grid_image] + all_images - + all_images = [combined_grid_image] + all_processed_images + res = Processed(p, all_images, initial_seed_and_info[0], initial_seed_and_info[1]) if opts.samples_save: - images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p) + for img in all_processed_images: + images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p) if opts.grid_save: images.save_image(combined_grid_image, p.outpath_grids, "grid", res.seed, p.prompt, opts.grid_format, info=res.info, short_filename=not opts.grid_extended_filename, grid=True, p=p) -- cgit v1.2.3 From 18df060c3e9252f1cf79b494e7173aff4181049a Mon Sep 17 00:00:00 2001 From: wywywywy Date: Thu, 20 Oct 2022 16:16:09 +0100 Subject: Fixed outpainting_mk2 output cropping --- scripts/outpainting_mk_2.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'scripts') diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 0377ab32..726417e7 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -183,6 +183,7 @@ class Script(scripts.Script): pixels_vert = expand_pixels if is_vert else 0 images_to_process = [] + output_images = [] for n in range(count): res_w = init[n].width + pixels_horiz res_h = init[n].height + pixels_vert @@ -203,7 +204,7 @@ class Script(scripts.Script): np_image = (np.asarray(img) / 255.0).astype(np.float64) np_mask = (np.asarray(mask) / 255.0).astype(np.float64) noised = get_matched_noise(np_image, np_mask, noise_q, color_variation) - out = Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB") + output_images.append(Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB")) target_width = min(process_width, init[n].width + pixels_horiz) if is_horiz else img.width target_height = min(process_height, init[n].height + pixels_vert) if is_vert else img.height @@ -211,15 +212,15 @@ class Script(scripts.Script): p.height = target_height if is_vert else img.height crop_region = ( - 0 if is_left else out.width - target_width, - 0 if is_top else out.height - target_height, - target_width if is_left else out.width, - target_height if is_top else out.height, + 0 if is_left else output_images[n].width - target_width, + 0 if is_top else output_images[n].height - target_height, + target_width if is_left else output_images[n].width, + target_height if is_top else output_images[n].height, ) mask = mask.crop(crop_region) p.image_mask = mask - image_to_process = out.crop(crop_region) + image_to_process = output_images[n].crop(crop_region) images_to_process.append(image_to_process) p.init_images = images_to_process @@ -240,11 +241,11 @@ class Script(scripts.Script): initial_seed_and_info[0] = proc.seed initial_seed_and_info[1] = proc.info - for proc_img in proc.images: - out.paste(proc_img, (0 if is_left else out.width - proc_img.width, 0 if is_top else out.height - proc_img.height)) - out = out.crop((0, 0, res_w, res_h)) + for n in range(count): + output_images[n].paste(proc.images[n], (0 if is_left else output_images[n].width - proc.images[n].width, 0 if is_top else output_images[n].height - proc.images[n].height)) + output_images[n] = output_images[n].crop((0, 0, res_w, res_h)) - return proc.images + return output_images batch_count = p.n_iter batch_size = p.batch_size -- cgit v1.2.3 From 49533eed9e3aad19e9868ee140708baec4fd44be Mon Sep 17 00:00:00 2001 From: random_thoughtss Date: Thu, 20 Oct 2022 16:01:27 -0700 Subject: XY grid correctly re-assignes model when config changes --- modules/sd_models.py | 6 +++--- scripts/xy_grid.py | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/modules/sd_models.py b/modules/sd_models.py index 7072db08..fea84630 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -204,9 +204,9 @@ def load_model_weights(model, checkpoint_info): model.sd_checkpoint_info = checkpoint_info -def load_model(): +def load_model(checkpoint_info=None): from modules import lowvram, sd_hijack - checkpoint_info = select_checkpoint() + checkpoint_info = checkpoint_info or select_checkpoint() if checkpoint_info.config != shared.cmd_opts.config: print(f"Loading config from: {checkpoint_info.config}") @@ -249,7 +249,7 @@ def reload_model_weights(sd_model, info=None): if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info): checkpoints_loaded.clear() - shared.sd_model = load_model() + shared.sd_model = load_model(checkpoint_info) return shared.sd_model if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 5cca168a..eff0c942 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -89,6 +89,7 @@ def apply_checkpoint(p, x, xs): if info is None: raise RuntimeError(f"Unknown checkpoint: {x}") modules.sd_models.reload_model_weights(shared.sd_model, info) + p.sd_model = shared.sd_model def confirm_checkpoints(p, xs): -- cgit v1.2.3 From 1fc278bcc642f720484a77eb169271054d3153b1 Mon Sep 17 00:00:00 2001 From: wywywywy Date: Fri, 21 Oct 2022 02:38:24 +0100 Subject: Fixed job count & single-output grid --- scripts/outpainting_mk_2.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'scripts') diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 726417e7..633dc119 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -172,10 +172,6 @@ class Script(scripts.Script): if down > 0: down = target_h - init_img.height - up - init_image = p.init_images[0] - - state.job_count = (1 if left > 0 else 0) + (1 if right > 0 else 0) + (1 if up > 0 else 0) + (1 if down > 0 else 0) - def expand(init, count, expand_pixels, is_left=False, is_right=False, is_top=False, is_bottom=False): is_horiz = is_left or is_right is_vert = is_top or is_bottom @@ -250,7 +246,7 @@ class Script(scripts.Script): batch_count = p.n_iter batch_size = p.batch_size p.n_iter = 1 - state.job_count = batch_count + state.job_count = batch_count * batch_size * ((1 if left > 0 else 0) + (1 if right > 0 else 0) + (1 if up > 0 else 0) + (1 if down > 0 else 0)) all_processed_images = [] for i in range(batch_count): @@ -268,10 +264,11 @@ class Script(scripts.Script): all_processed_images += imgs - combined_grid_image = images.image_grid(all_processed_images) all_images = all_processed_images - if opts.return_grid: + combined_grid_image = images.image_grid(all_processed_images) + unwanted_grid_because_of_img_count = len(all_processed_images) < 2 and opts.grid_only_if_multiple + if opts.return_grid and not unwanted_grid_because_of_img_count: all_images = [combined_grid_image] + all_processed_images res = Processed(p, all_images, initial_seed_and_info[0], initial_seed_and_info[1]) @@ -280,8 +277,7 @@ class Script(scripts.Script): for img in all_processed_images: images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p) - if opts.grid_save: + if opts.grid_save and not unwanted_grid_because_of_img_count: images.save_image(combined_grid_image, p.outpath_grids, "grid", res.seed, p.prompt, opts.grid_format, info=res.info, short_filename=not opts.grid_extended_filename, grid=True, p=p) return res - -- cgit v1.2.3 From 3d898044e5e55dca1698e9b5b7d3558b5b78675a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 21 Oct 2022 17:26:30 +0300 Subject: batch_size does not affect job count --- scripts/outpainting_mk_2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 633dc119..2afd4aa5 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -246,7 +246,7 @@ class Script(scripts.Script): batch_count = p.n_iter batch_size = p.batch_size p.n_iter = 1 - state.job_count = batch_count * batch_size * ((1 if left > 0 else 0) + (1 if right > 0 else 0) + (1 if up > 0 else 0) + (1 if down > 0 else 0)) + state.job_count = batch_count * ((1 if left > 0 else 0) + (1 if right > 0 else 0) + (1 if up > 0 else 0) + (1 if down > 0 else 0)) all_processed_images = [] for i in range(batch_count): -- cgit v1.2.3 From 7613ea12f267143ceb70a9aeb45eb20aca086e3e Mon Sep 17 00:00:00 2001 From: random_thoughtss Date: Fri, 21 Oct 2022 11:32:56 -0700 Subject: Fixed img2imgalt after inpainting update --- scripts/img2imgalt.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index d438175c..88abc093 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -34,6 +34,9 @@ def find_noise_for_image(p, cond, uncond, cfg_scale, steps): sigma_in = torch.cat([sigmas[i] * s_in] * 2) cond_in = torch.cat([uncond, cond]) + image_conditioning = torch.cat([p.image_conditioning] * 2) + cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]} + c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)] t = dnw.sigma_to_t(sigma_in) @@ -78,6 +81,9 @@ def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps): sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2) cond_in = torch.cat([uncond, cond]) + image_conditioning = torch.cat([p.image_conditioning] * 2) + cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]} + c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)] if i == 1: @@ -194,7 +200,7 @@ class Script(scripts.Script): p.seed = p.seed + 1 - return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning) + return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning) p.sample = sample_extra -- cgit v1.2.3 From 605d27687f433c0fefb9025aace7dc94f0ebd454 Mon Sep 17 00:00:00 2001 From: random_thoughtss Date: Tue, 25 Oct 2022 12:20:54 -0700 Subject: Added conditioning image masking to xy_grid. Use `True` and `False` to select values. --- modules/processing.py | 2 +- scripts/xy_grid.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/modules/processing.py b/modules/processing.py index 96f56b0d..23ee5e02 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -770,7 +770,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): conditioning_mask = conditioning_mask.to(image.device) conditioning_image = image - if shared.opts.inpainting_mask_image: + if getattr(self, "inpainting_mask_image", shared.opts.inpainting_mask_image): conditioning_image = conditioning_image * (1.0 - conditioning_mask) conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image)) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index eff0c942..0843adcc 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -153,6 +153,8 @@ def str_permutations(x): """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" return x +def str_to_bool(x): + return "true" in x.lower().strip() AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value", "confirm"]) AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value", "confirm"]) @@ -178,6 +180,7 @@ axis_options = [ AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None), AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None), AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None), + AxisOption("Mask Conditioning Image", str_to_bool, apply_field("inpainting_mask_image"), format_value_add_label, None), ] -- cgit v1.2.3 From 8b4f32779f28010fc8077e8fcfb85a3205b36bc2 Mon Sep 17 00:00:00 2001 From: random_thoughtss Date: Tue, 25 Oct 2022 13:15:08 -0700 Subject: Switch to a continous blend for cond. image. --- modules/processing.py | 9 ++++++--- modules/shared.py | 2 +- scripts/xy_grid.py | 5 +---- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'scripts') diff --git a/modules/processing.py b/modules/processing.py index 23ee5e02..02292bdc 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -769,9 +769,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): # Create another latent image, this time with a masked version of the original input. conditioning_mask = conditioning_mask.to(image.device) - conditioning_image = image - if getattr(self, "inpainting_mask_image", shared.opts.inpainting_mask_image): - conditioning_image = conditioning_image * (1.0 - conditioning_mask) + # Smoothly interpolate between the masked and unmasked latent conditioning image. + conditioning_image = torch.lerp( + image, + image * (1.0 - conditioning_mask), + getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) + ) conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image)) diff --git a/modules/shared.py b/modules/shared.py index 1d0ff1a1..e0ffb824 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -320,7 +320,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), - "inpainting_mask_image": OptionInfo(True, "Mask original image for conditioning used by inpainting model."), + "inpainting_mask_weight": OptionInfo(1.0, "Blend betweeen an unmasked and masked conditioning image for inpainting models.", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), })) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 0843adcc..f5255786 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -153,9 +153,6 @@ def str_permutations(x): """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" return x -def str_to_bool(x): - return "true" in x.lower().strip() - AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value", "confirm"]) AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value", "confirm"]) @@ -180,7 +177,7 @@ axis_options = [ AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None), AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None), AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None), - AxisOption("Mask Conditioning Image", str_to_bool, apply_field("inpainting_mask_image"), format_value_add_label, None), + AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None), ] -- cgit v1.2.3 From 99d728b5b18829c8a6b7b2d69c9b9327dd257896 Mon Sep 17 00:00:00 2001 From: Tony Beeman Date: Sun, 23 Oct 2022 23:16:47 -0700 Subject: Add Iterate Button and Improve PFF UI --- scripts/prompts_from_file.py | 54 ++++++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 22 deletions(-) (limited to 'scripts') diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 1266be6f..1be22960 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -1,6 +1,7 @@ import copy import math import os +import random import sys import traceback import shlex @@ -81,32 +82,34 @@ def cmdargs(line): return res +def load_prompt_file(file): + if (file is None): + lines = [] + else: + lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")] + + return None, "\n".join(lines), gr.update(lines=7) + class Script(scripts.Script): def title(self): return "Prompts from file or textbox" def ui(self, is_img2img): - # This checkbox would look nicer as two tabs, but there are two problems: - # 1) There is a bug in Gradio 3.3 that prevents visibility from working on Tabs - # 2) Even with Gradio 3.3.1, returning a control (like Tabs) that can't be used as input - # causes a AttributeError: 'Tabs' object has no attribute 'preprocess' assert, - # due to the way Script assumes all controls returned can be used as inputs. - # Therefore, there's no good way to use grouping components right now, - # so we will use a checkbox! :) - checkbox_txt = gr.Checkbox(label="Show Textbox", value=False) - file = gr.File(label="File with inputs", type='bytes') - prompt_txt = gr.TextArea(label="Prompts") - checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt]) - return [checkbox_txt, file, prompt_txt] - - def on_show(self, checkbox_txt, file, prompt_txt): - return [ gr.Checkbox.update(visible = True), gr.File.update(visible = not checkbox_txt), gr.TextArea.update(visible = checkbox_txt) ] - - def run(self, p, checkbox_txt, data: bytes, prompt_txt: str): - if checkbox_txt: - lines = [x.strip() for x in prompt_txt.splitlines()] - else: - lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")] + checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False) + + prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1) + file = gr.File(label="Upload prompt inputs", type='bytes') + + file.change(fn=load_prompt_file, inputs=[file], outputs=[file, prompt_txt, prompt_txt]) + + # We start at one line. When the text changes, we jump to seven lines, or two lines if no \n. + # We don't shrink back to 1, because that causes the control to ignore [enter], and it may + # be unclear to the user that shift-enter is needed. + prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt]) + return [checkbox_iterate, file, prompt_txt] + + def run(self, p, checkbox_iterate, file, prompt_txt: str): + lines = [x.strip() for x in prompt_txt.splitlines()] lines = [x for x in lines if len(x) > 0] p.do_not_save_grid = True @@ -134,6 +137,9 @@ class Script(scripts.Script): jobs.append(args) print(f"Will process {len(lines)} lines in {job_count} jobs.") + if (checkbox_iterate and p.seed == -1): + p.seed = int(random.randrange(4294967294)) + state.job_count = job_count images = [] @@ -146,5 +152,9 @@ class Script(scripts.Script): proc = process_images(copy_p) images += proc.images + + if (checkbox_iterate): + p.seed = p.seed + (p.batch_size * p.n_iter) + - return Processed(p, images, p.seed, "") + return Processed(p, images, p.seed, "") \ No newline at end of file -- cgit v1.2.3 From 315bd7c9e8a20a28fa7fd1ddd5fddbf3b5a9b41c Mon Sep 17 00:00:00 2001 From: Keith Dreibelbis Date: Tue, 1 Nov 2022 19:45:35 -0700 Subject: prompts_from_file: allow random seeds to be preserved for the list of prompts --- scripts/prompts_from_file.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 1be22960..8d4911ae 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -96,6 +96,7 @@ class Script(scripts.Script): def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False) + checkbox_iterate_batch = gr.Checkbox(label="Preserve random seed across lines (for use with \"Generate Forever\")", value=False) prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1) file = gr.File(label="Upload prompt inputs", type='bytes') @@ -106,9 +107,9 @@ class Script(scripts.Script): # We don't shrink back to 1, because that causes the control to ignore [enter], and it may # be unclear to the user that shift-enter is needed. prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt]) - return [checkbox_iterate, file, prompt_txt] + return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt] - def run(self, p, checkbox_iterate, file, prompt_txt: str): + def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str): lines = [x.strip() for x in prompt_txt.splitlines()] lines = [x for x in lines if len(x) > 0] @@ -137,7 +138,7 @@ class Script(scripts.Script): jobs.append(args) print(f"Will process {len(lines)} lines in {job_count} jobs.") - if (checkbox_iterate and p.seed == -1): + if ((checkbox_iterate or checkbox_iterate_batch) and p.seed == -1): p.seed = int(random.randrange(4294967294)) state.job_count = job_count -- cgit v1.2.3 From 55688c48806f9383f3a56f6b9a0ab8fbf205edd2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 2 Nov 2022 07:02:45 +0300 Subject: rename the seed option from #4146 --- scripts/prompts_from_file.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 8d4911ae..d187cd9c 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -96,7 +96,7 @@ class Script(scripts.Script): def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False) - checkbox_iterate_batch = gr.Checkbox(label="Preserve random seed across lines (for use with \"Generate Forever\")", value=False) + checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False) prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1) file = gr.File(label="Upload prompt inputs", type='bytes') @@ -138,7 +138,7 @@ class Script(scripts.Script): jobs.append(args) print(f"Will process {len(lines)} lines in {job_count} jobs.") - if ((checkbox_iterate or checkbox_iterate_batch) and p.seed == -1): + if (checkbox_iterate or checkbox_iterate_batch) and p.seed == -1: p.seed = int(random.randrange(4294967294)) state.job_count = job_count @@ -154,7 +154,7 @@ class Script(scripts.Script): proc = process_images(copy_p) images += proc.images - if (checkbox_iterate): + if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) -- cgit v1.2.3 From eb5e82c7ddf5e72fa13b83bd1f12d3a07a4de1a4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 2 Nov 2022 12:45:03 +0300 Subject: do not unnecessarily run VAE one more time when saving intermediate image with hires fix --- modules/processing.py | 39 ++++++++++++++++++++------------------- modules/sd_samplers.py | 1 + modules/shared.py | 2 +- scripts/img2imgalt.py | 3 +-- 4 files changed, 23 insertions(+), 22 deletions(-) (limited to 'scripts') diff --git a/modules/processing.py b/modules/processing.py index 2dcf4879..3a364b5f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -199,7 +199,7 @@ class StableDiffusionProcessing(): def init(self, all_prompts, all_seeds, all_subseeds): pass - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): + def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): raise NotImplementedError() def close(self): @@ -521,11 +521,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: shared.state.job = f"Batch {n+1} out of {p.n_iter}" with devices.autocast(): - # Only Txt2Img needs an extra argument, n, when saving intermediate images pre highres fix. - if isinstance(p, StableDiffusionProcessingTxt2Img): - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, n=n) - else: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) samples_ddim = samples_ddim.to(devices.dtype_vae) x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim) @@ -653,7 +649,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, n=0): + def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model) if not self.enable_hr: @@ -666,9 +662,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2] + """saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images""" + def save_intermediate(image, index): + if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix: + return + + if not isinstance(image, Image.Image): + image = sd_samplers.sample_to_image(image, index) + + images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix") + if opts.use_scale_latent_for_hires_fix: samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") + for i in range(samples.shape[0]): + save_intermediate(samples, i) else: decoded_samples = decode_first_stage(self.sd_model, samples) lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0) @@ -678,6 +686,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) image = Image.fromarray(x_sample) + + save_intermediate(image, i) + image = images.resize_image(0, image, self.width, self.height) image = np.array(image).astype(np.float32) / 255.0 image = np.moveaxis(image, 2, 0) @@ -689,15 +700,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) - # Save a copy of the image/s before doing highres fix, if applicable. - if opts.save and not self.do_not_save_samples and opts.save_images_before_highres_fix: - for i in range(self.batch_size): - # This batch's ith image. - img = sd_samplers.sample_to_image(samples, i) - # Index that accounts for both batch size and batch count. - ind = i + self.batch_size*n - images.save_image(img, self.outpath_samples, "", self.all_seeds[ind], self.all_prompts[ind], opts.samples_format, suffix=f"-before-highres-fix") - shared.state.nextjob() self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model) @@ -844,8 +846,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask) - - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): + def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) @@ -856,4 +857,4 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): del x devices.torch_gc() - return samples \ No newline at end of file + return samples diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d7fa89a0..c7c414ef 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -96,6 +96,7 @@ def single_sample_to_image(sample): def sample_to_image(samples, index=0): return single_sample_to_image(samples[index]) + def samples_to_image_grid(samples): return images.image_grid([single_sample_to_image(sample) for sample in samples]) diff --git a/modules/shared.py b/modules/shared.py index ce991424..01f47e38 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -256,6 +256,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), + "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), "export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"), @@ -322,7 +323,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}), "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index 88abc093..964b75c7 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -166,8 +166,7 @@ class Script(scripts.Script): if override_strength: p.denoising_strength = 1.0 - - def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): + def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): lat = (p.init_latent.cpu().numpy() * 10).astype(int) same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \ -- cgit v1.2.3 From 4dd898b8c15e342f817d3fb1c8dc9f2d5d111022 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 4 Nov 2022 08:38:11 +0300 Subject: do not mess with components' visibility for scripts; instead create group components and show/hide those; this will break scripts that create invisible components and rely on UI but the earlier i make this change the better --- modules/scripts.py | 34 ++++++++++++++++++---------------- scripts/custom_code.py | 2 +- scripts/outpainting_mk_2.py | 2 +- scripts/poor_mans_outpainting.py | 4 ++-- scripts/prompts_from_file.py | 10 +++++----- scripts/sd_upscale.py | 4 ++-- scripts/xy_grid.py | 8 ++++---- 7 files changed, 33 insertions(+), 31 deletions(-) (limited to 'scripts') diff --git a/modules/scripts.py b/modules/scripts.py index 533db45c..28ce07f4 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -18,6 +18,9 @@ class Script: args_to = None alwayson = False + """A gr.Group component that has all script's UI inside it""" + group = None + infotext_fields = None """if set in ui(), this is a list of pairs of gradio component + text; the text will be used when parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example @@ -218,8 +221,6 @@ class ScriptRunner: for control in controls: control.custom_script_source = os.path.basename(script.filename) - if not script.alwayson: - control.visible = False if script.infotext_fields is not None: self.infotext_fields += script.infotext_fields @@ -229,40 +230,41 @@ class ScriptRunner: script.args_to = len(inputs) for script in self.alwayson_scripts: - with gr.Group(): + with gr.Group() as group: create_script_ui(script, inputs, inputs_alwayson) + script.group = group + dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index") dropdown.save_to_config = True inputs[0] = dropdown for script in self.selectable_scripts: - create_script_ui(script, inputs, inputs_alwayson) + with gr.Group(visible=False) as group: + create_script_ui(script, inputs, inputs_alwayson) + + script.group = group def select_script(script_index): - if 0 < script_index <= len(self.selectable_scripts): - script = self.selectable_scripts[script_index-1] - args_from = script.args_from - args_to = script.args_to - else: - args_from = 0 - args_to = 0 + selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None - return [ui.gr_show(True if i == 0 else args_from <= i < args_to or is_alwayson) for i, is_alwayson in enumerate(inputs_alwayson)] + return [gr.update(visible=selected_script == s) for s in self.selectable_scripts] def init_field(title): + """called when an initial value is set from ui-config.json to show script's UI components""" + if title == 'None': return + script_index = self.titles.index(title) - script = self.selectable_scripts[script_index] - for i in range(script.args_from, script.args_to): - inputs[i].visible = True + self.selectable_scripts[script_index].group.visible = True dropdown.init_field = init_field + dropdown.change( fn=select_script, inputs=[dropdown], - outputs=inputs + outputs=[script.group for script in self.selectable_scripts] ) return inputs diff --git a/scripts/custom_code.py b/scripts/custom_code.py index a9b10c09..22e7b77a 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -14,7 +14,7 @@ class Script(scripts.Script): return cmd_opts.allow_code def ui(self, is_img2img): - code = gr.Textbox(label="Python code", visible=False, lines=1) + code = gr.Textbox(label="Python code", lines=1) return [code] diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 2afd4aa5..cf71cb92 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -132,7 +132,7 @@ class Script(scripts.Script): info = gr.HTML("

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

") pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8) direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0) color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05) diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index b0469110..ea45beb0 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -22,8 +22,8 @@ class Script(scripts.Script): return None pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False) - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4) + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index") direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) return [pixels, mask_blur, inpainting_fill, direction] diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index d187cd9c..3388bc77 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -83,13 +83,14 @@ def cmdargs(line): def load_prompt_file(file): - if (file is None): + if file is None: lines = [] else: lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")] return None, "\n".join(lines), gr.update(lines=7) + class Script(scripts.Script): def title(self): return "Prompts from file or textbox" @@ -107,9 +108,9 @@ class Script(scripts.Script): # We don't shrink back to 1, because that causes the control to ignore [enter], and it may # be unclear to the user that shift-enter is needed. prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt]) - return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt] + return [checkbox_iterate, checkbox_iterate_batch, prompt_txt] - def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str): + def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str): lines = [x.strip() for x in prompt_txt.splitlines()] lines = [x for x in lines if len(x) > 0] @@ -157,5 +158,4 @@ class Script(scripts.Script): if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) - - return Processed(p, images, p.seed, "") \ No newline at end of file + return Processed(p, images, p.seed, "") diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index cb37ff7e..01074291 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -18,8 +18,8 @@ class Script(scripts.Script): def ui(self, is_img2img): info = gr.HTML("

Will upscale the image to twice the dimensions; use width and height sliders to set tile size

") - overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False) - upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False) + overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64) + upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") return [info, overlap, upscaler_index] diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index f5255786..417ed0d4 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -263,12 +263,12 @@ class Script(scripts.Script): current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img] with gr.Row(): - x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type") - x_values = gr.Textbox(label="X values", visible=False, lines=1) + x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type") + x_values = gr.Textbox(label="X values", lines=1) with gr.Row(): - y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, visible=False, type="index", elem_id="y_type") - y_values = gr.Textbox(label="Y values", visible=False, lines=1) + y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type") + y_values = gr.Textbox(label="Y values", lines=1) draw_legend = gr.Checkbox(label='Draw legend', value=True) include_lone_images = gr.Checkbox(label='Include Separate Images', value=False) -- cgit v1.2.3 From 4796db85b5315ea74c4f795b5d85384c8f521a3f Mon Sep 17 00:00:00 2001 From: byzod Date: Sun, 6 Nov 2022 10:12:57 +0800 Subject: ignores file format settings for grids --- scripts/prompt_matrix.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index e49c9b20..fd314b55 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -82,6 +82,6 @@ class Script(scripts.Script): processed.images.insert(0, grid) if opts.grid_save: - images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", prompt=original_prompt, seed=processed.seed, grid=True, p=p) + images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", extension=opts.grid_format, prompt=original_prompt, seed=processed.seed, grid=True, p=p) return processed -- cgit v1.2.3 From 9cc48fee4859908deefbb917b9521dc8aa43a89e Mon Sep 17 00:00:00 2001 From: byzod Date: Sun, 6 Nov 2022 10:15:02 +0800 Subject: fix scripts ignores file format settings for grids --- scripts/xy_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 417ed0d4..45a78db2 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -393,6 +393,6 @@ class Script(scripts.Script): ) if opts.grid_save: - images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p) + images.save_image(processed.images[0], p.outpath_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p) return processed -- cgit v1.2.3 From 81f2575df91a50e4aa9ca816e02e3f77342eedc8 Mon Sep 17 00:00:00 2001 From: Liam Date: Wed, 9 Nov 2022 15:24:31 -0500 Subject: updating the displayed generation info when user clicks images in the gallery. feature request 4415 --- javascript/ui.js | 10 +++++++++- modules/ui.py | 20 ++++++++++++++++++++ scripts/prompt_matrix.py | 2 ++ scripts/prompts_from_file.py | 6 +++++- 4 files changed, 36 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/javascript/ui.js b/javascript/ui.js index 95cfd106..443d1642 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -179,9 +179,17 @@ onUiUpdate(function(){ img2img_textarea = gradioApp().querySelector("#img2img_prompt > label > textarea"); img2img_textarea?.addEventListener("input", () => update_token_counter("img2img_token_button")); } + if (!txt2img_gallery) { + txt2img_gallery = gradioApp().querySelector('#txt2img_gallery') + txt2img_gallery?.addEventListener('click', () => gradioApp().getElementById("txt2img_generation_info_button").click()); + } + if (!img2img_gallery) { + img2img_gallery = gradioApp().querySelector('#img2img_gallery') + img2img_gallery?.addEventListener('click', () => gradioApp().getElementById("img2img_generation_info_button").click()); + } }) -let txt2img_textarea, img2img_textarea = undefined; +let txt2img_textarea, img2img_textarea, txt2img_gallery, img2img_gallery = undefined; let wait_time = 800 let token_timeout; diff --git a/modules/ui.py b/modules/ui.py index 7ea1177f..756499d1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -566,6 +566,17 @@ def apply_setting(key, value): return value +def update_generation_info(args): + generation_info, html_info, img_index = args + try: + generation_info = json.loads(generation_info) + return plaintext_to_html(generation_info["infotexts"][img_index]) + except Exception: + pass + # if the json parse or anything else fails, just return the old html_info + return html_info + + def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id): def refresh(): refresh_method() @@ -638,6 +649,15 @@ Requested path was: {f} with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) + if tabname == 'txt2img' or tabname == 'img2img': + generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button") + generation_info_button.click( + fn=update_generation_info, + _js="(x, y) => [x, y, selected_gallery_index()]", + inputs=[generation_info, html_info], + outputs=[html_info], + preprocess=False + ) save.click( fn=wrap_gradio_call(save_files), diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index e49c9b20..4d1e152d 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -80,6 +80,8 @@ class Script(scripts.Script): grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts) processed.images.insert(0, grid) + processed.index_of_first_image = 1 + processed.infotexts.insert(0, processed.infotexts[0]) if opts.grid_save: images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", prompt=original_prompt, seed=processed.seed, grid=True, p=p) diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 3388bc77..32fe6bdb 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -145,6 +145,8 @@ class Script(scripts.Script): state.job_count = job_count images = [] + all_prompts = [] + infotexts = [] for n, args in enumerate(jobs): state.job = f"{state.job_no + 1} out of {state.job_count}" @@ -157,5 +159,7 @@ class Script(scripts.Script): if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) + all_prompts += proc.all_prompts + infotexts += proc.infotexts - return Processed(p, images, p.seed, "") + return Processed(p, images, p.seed, "", all_prompts=all_prompts, infotexts=infotexts) -- cgit v1.2.3 From cdc8020d13c5eef099c609b0a911ccf3568afc0d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 19 Nov 2022 12:01:51 +0300 Subject: change StableDiffusionProcessing to internally use sampler name instead of sampler index --- modules/api/api.py | 26 ++++++++--------------- modules/hypernetworks/hypernetwork.py | 4 ++-- modules/images.py | 2 +- modules/img2img.py | 4 ++-- modules/processing.py | 29 +++++++++++--------------- modules/sd_samplers.py | 13 +++++++++--- modules/textual_inversion/textual_inversion.py | 4 ++-- modules/txt2img.py | 3 ++- modules/ui.py | 2 +- scripts/img2imgalt.py | 4 ++-- scripts/xy_grid.py | 12 +++++------ 11 files changed, 49 insertions(+), 54 deletions(-) (limited to 'scripts') diff --git a/modules/api/api.py b/modules/api/api.py index 596a6616..0eccccbb 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -6,9 +6,9 @@ from threading import Lock from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image from fastapi import APIRouter, Depends, FastAPI, HTTPException import modules.shared as shared +from modules import sd_samplers from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -from modules.sd_samplers import all_samplers from modules.extras import run_extras, run_pnginfo from PIL import PngImagePlugin from modules.sd_models import checkpoints_list @@ -25,8 +25,12 @@ def upscaler_to_index(name: str): raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}") -sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) +def validate_sampler_name(name): + config = sd_samplers.all_samplers_map.get(name, None) + if config is None: + raise HTTPException(status_code=404, detail="Sampler not found") + return name def setUpscalers(req: dict): reqDict = vars(req) @@ -82,14 +86,9 @@ class Api: self.app.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): - sampler_index = sampler_to_index(txt2imgreq.sampler_index) - - if sampler_index is None: - raise HTTPException(status_code=404, detail="Sampler not found") - populate = txt2imgreq.copy(update={ # Override __init__ params "sd_model": shared.sd_model, - "sampler_index": sampler_index[0], + "sampler_name": validate_sampler_name(txt2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True } @@ -109,12 +108,6 @@ class Api: return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): - sampler_index = sampler_to_index(img2imgreq.sampler_index) - - if sampler_index is None: - raise HTTPException(status_code=404, detail="Sampler not found") - - init_images = img2imgreq.init_images if init_images is None: raise HTTPException(status_code=404, detail="Init image not found") @@ -123,10 +116,9 @@ class Api: if mask: mask = decode_base64_to_image(mask) - populate = img2imgreq.copy(update={ # Override __init__ params "sd_model": shared.sd_model, - "sampler_index": sampler_index[0], + "sampler_name": validate_sampler_name(img2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True, "mask": mask @@ -272,7 +264,7 @@ class Api: return vars(shared.cmd_opts) def get_samplers(self): - return [{"name":sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in all_samplers] + return [{"name":sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers] def get_upscalers(self): upscalers = [] diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 7f182712..fbb87dd1 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -12,7 +12,7 @@ import torch import tqdm from einops import rearrange, repeat from ldm.util import default -from modules import devices, processing, sd_models, shared +from modules import devices, processing, sd_models, shared, sd_samplers from modules.textual_inversion import textual_inversion from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum @@ -535,7 +535,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log p.prompt = preview_prompt p.negative_prompt = preview_negative_prompt p.steps = preview_steps - p.sampler_index = preview_sampler_index + p.sampler_name = sd_samplers.samplers[preview_sampler_index].name p.cfg_scale = preview_cfg_scale p.seed = preview_seed p.width = preview_width diff --git a/modules/images.py b/modules/images.py index ae705cbd..26d5b7a9 100644 --- a/modules/images.py +++ b/modules/images.py @@ -303,7 +303,7 @@ class FilenameGenerator: 'width': lambda self: self.image.width, 'height': lambda self: self.image.height, 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False), - 'sampler': lambda self: self.p and sanitize_filename_part(sd_samplers.samplers[self.p.sampler_index].name, replace_spaces=False), + 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False), 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash), 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'), 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime