aboutsummaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/img2img.py9
-rw-r--r--modules/processing.py4
-rw-r--r--modules/sd_samplers.py36
-rw-r--r--modules/shared.py9
-rw-r--r--modules/ui.py45
5 files changed, 95 insertions, 8 deletions
diff --git a/modules/img2img.py b/modules/img2img.py
index e6707f96..600a5172 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -55,7 +55,10 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
initial_seed = None
initial_info = None
+ state.job_count = n_iter
+
for i in range(n_iter):
+
p.n_iter = 1
p.batch_size = 1
p.do_not_save_grid = True
@@ -72,6 +75,8 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
p.denoising_strength = max(p.denoising_strength * 0.95, 0.1)
history.append(processed.images[0])
+ state.nextjob()
+
grid = images.image_grid(history, batch_size, rows=1)
images.save_image(grid, p.outpath_grids, "grid", initial_seed, prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename)
@@ -103,6 +108,8 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
batch_count = math.ceil(len(work) / p.batch_size)
print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} in a total of {batch_count} batches.")
+ state.job_count = batch_count
+
for i in range(batch_count):
p.init_images = work[i*p.batch_size:(i+1)*p.batch_size]
@@ -116,6 +123,8 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
p.seed = processed.seed + 1
work_results += processed.images
+ state.nextjob()
+
image_index = 0
for y, h, row in grid.tiles:
for tiledata in row:
diff --git a/modules/processing.py b/modules/processing.py
index c0c1adb7..1351579b 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -153,6 +153,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
with torch.no_grad(), precision_scope("cuda"), ema_scope():
p.init()
+ state.job_count = p.n_iter
+
for n in range(p.n_iter):
if state.interrupted:
break
@@ -207,6 +209,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
output_images.append(image)
+ state.nextjob()
+
unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
if not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
return_grid = opts.return_grid
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 6f028f5f..896e8b3f 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -1,10 +1,12 @@
from collections import namedtuple
+
+import ldm.models.diffusion.ddim
import torch
import tqdm
import k_diffusion.sampling
-from ldm.models.diffusion.ddim import DDIMSampler
-from ldm.models.diffusion.plms import PLMSSampler
+import ldm.models.diffusion.ddim
+import ldm.models.diffusion.plms
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
@@ -29,8 +31,8 @@ samplers_data_k_diffusion = [
samplers = [
*samplers_data_k_diffusion,
- SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(DDIMSampler, model), []),
- SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(PLMSSampler, model), []),
+ SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []),
+ SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []),
]
samplers_for_img2img = [x for x in samplers if x.name != 'PLMS']
@@ -43,6 +45,23 @@ def p_sample_ddim_hook(sampler_wrapper, x_dec, cond, ts, *args, **kwargs):
return sampler_wrapper.orig_p_sample_ddim(x_dec, cond, ts, *args, **kwargs)
+def extended_tdqm(sequence, *args, desc=None, **kwargs):
+ state.sampling_steps = len(sequence)
+ state.sampling_step = 0
+
+ for x in tqdm.tqdm(sequence, *args, desc=state.job, **kwargs):
+ if state.interrupted:
+ break
+
+ yield x
+
+ state.sampling_step += 1
+
+
+ldm.models.diffusion.ddim.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
+ldm.models.diffusion.plms.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
+
+
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
@@ -102,13 +121,18 @@ class CFGDenoiser(torch.nn.Module):
return denoised
-def extended_trange(*args, **kwargs):
- for x in tqdm.trange(*args, desc=state.job, **kwargs):
+def extended_trange(count, *args, **kwargs):
+ state.sampling_steps = count
+ state.sampling_step = 0
+
+ for x in tqdm.trange(count, *args, desc=state.job, **kwargs):
if state.interrupted:
break
yield x
+ state.sampling_step += 1
+
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
diff --git a/modules/shared.py b/modules/shared.py
index 4e36df37..53861daf 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -42,10 +42,18 @@ batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram o
class State:
interrupted = False
job = ""
+ job_no = 0
+ job_count = 0
+ sampling_step = 0
+ sampling_steps = 0
def interrupt(self):
self.interrupted = True
+ def nextjob(self):
+ self.job_no += 1
+ self.sampling_step = 0
+
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
@@ -89,6 +97,7 @@ class Options:
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscaling. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
"upscale_at_full_resolution_padding": OptionInfo(16, "Inpainting at full resolution: padding, in pixels, for the masked region.", gr.Slider, {"minimum": 0, "maximum": 128, "step": 4}),
+ "show_progressbar": OptionInfo(True, "Show progressbar"),
}
def __init__(self):
diff --git a/modules/ui.py b/modules/ui.py
index aa5a61b7..a9e4fd00 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -48,7 +48,6 @@ css_hide_progressbar = """
.meta-text { display:none!important; }
"""
-
def plaintext_to_html(text):
text = "".join([f"<p>{html.escape(x)}</p>\n" for x in text.split('\n')])
return text
@@ -134,6 +133,24 @@ def wrap_gradio_call(func):
return f
+def check_progress_call():
+ if not opts.show_progressbar:
+ return ""
+
+ if shared.state.job_count == 0:
+ return ""
+
+ progress = shared.state.job_no / shared.state.job_count
+ if shared.state.sampling_steps > 0:
+ progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
+
+ progress = min(progress, 1)
+
+ progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
+
+ return f"<span style='display: none'>{time.time()}</span><p>{progressbar}</p>"
+
+
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
@@ -154,8 +171,9 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="txt2img_prompt", show_label=False, placeholder="Prompt", lines=1)
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="txt2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1, visible=False)
- roll = gr.Button('Roll', elem_id="txt2img_roll", visible=len(shared.artist_db.artists)>0)
+ roll = gr.Button('Roll', elem_id="txt2img_roll", visible=len(shared.artist_db.artists) > 0)
submit = gr.Button('Generate', elem_id="txt2img_generate", variant='primary')
+ check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
@@ -185,6 +203,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Group():
txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery')
+
with gr.Group():
with gr.Row():
save = gr.Button('Save')
@@ -193,12 +212,16 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
+ progressbar = gr.HTML(elem_id="progressbar")
+
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
+
txt2img_args = dict(
fn=txt2img,
+ _js="submit",
inputs=[
prompt,
negative_prompt,
@@ -223,6 +246,13 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
+ check_progress.click(
+ fn=check_progress_call,
+ inputs=[],
+ outputs=[progressbar],
+ )
+
+
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
@@ -252,10 +282,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
]
)
+
with gr.Blocks(analytics_enabled=False) as img2img_interface:
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="img2img_prompt", show_label=False, placeholder="Prompt", lines=1)
submit = gr.Button('Generate', elem_id="img2img_generate", variant='primary')
+ check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
with gr.Row().style(equal_height=False):
@@ -310,6 +342,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
save = gr.Button('Save')
img2img_send_to_extras = gr.Button('Send to extras')
+ progressbar = gr.HTML(elem_id="progressbar")
+
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
@@ -352,6 +386,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
img2img_args = dict(
fn=img2img,
+ _js="submit",
inputs=[
prompt,
init_img,
@@ -386,6 +421,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
prompt.submit(**img2img_args)
submit.click(**img2img_args)
+ check_progress.click(
+ fn=check_progress_call,
+ inputs=[],
+ outputs=[progressbar],
+ )
+
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],