diff options
author | 不会画画的中医不是好程序员 <yfszzx@gmail.com> | 2022-10-10 12:21:25 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-10-10 12:21:25 +0000 |
commit | 1e18a5ffcc439b72adaaf425c0b79f3acb34322e (patch) | |
tree | 01f9c73c02076694a9bc3c965875646473771db8 /modules/processing.py | |
parent | 23f2989799ee3911d2959cfceb74b921f20c9a51 (diff) | |
parent | a3578233395e585e68c2118d3630cb2a961d4a36 (diff) | |
download | stable-diffusion-webui-gfx803-1e18a5ffcc439b72adaaf425c0b79f3acb34322e.tar.gz stable-diffusion-webui-gfx803-1e18a5ffcc439b72adaaf425c0b79f3acb34322e.tar.bz2 stable-diffusion-webui-gfx803-1e18a5ffcc439b72adaaf425c0b79f3acb34322e.zip |
Merge branch 'AUTOMATIC1111:master' into master
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 31 |
1 files changed, 25 insertions, 6 deletions
diff --git a/modules/processing.py b/modules/processing.py index f773a30e..94d2dd62 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -46,6 +46,12 @@ def apply_color_correction(correction, image): return image
+def get_correct_sampler(p):
+ if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img):
+ return sd_samplers.samplers
+ elif isinstance(p, modules.processing.StableDiffusionProcessingImg2Img):
+ return sd_samplers.samplers_for_img2img
+
class StableDiffusionProcessing:
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None):
self.sd_model = sd_model
@@ -123,6 +129,7 @@ class Processed: self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
+ self.clip_skip = opts.CLIP_stop_at_last_layers
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
@@ -169,6 +176,7 @@ class Processed: "infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
+ "clip_skip": self.clip_skip,
}
return json.dumps(obj)
@@ -266,14 +274,18 @@ def fix_seed(p): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
+ clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
+
generation_params = {
"Steps": p.steps,
- "Sampler": sd_samplers.samplers[p.sampler_index].name,
+ "Sampler": get_correct_sampler(p)[p.sampler_index].name,
"CFG scale": p.cfg_scale,
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
+ "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
+ "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name.replace(',', '').replace(':', '')),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
@@ -281,6 +293,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
+ "Clip skip": None if clip_skip <= 1 else clip_skip,
}
generation_params.update(p.extra_generation_params)
@@ -312,6 +325,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: os.makedirs(p.outpath_grids, exist_ok=True)
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
+ modules.sd_hijack.model_hijack.clear_comments()
comments = {}
@@ -341,7 +355,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: infotexts = []
output_images = []
- with torch.no_grad():
+ with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(all_prompts, all_seeds, all_subseeds)
@@ -349,6 +363,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: state.job_count = p.n_iter
for n in range(p.n_iter):
+ if state.skipped:
+ state.skipped = False
+
if state.interrupted:
break
@@ -375,9 +392,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: with devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
- if state.interrupted:
+ if state.interrupted or state.skipped:
- # if we are interruped, sample returns just noise
+ # if we are interrupted, sample returns just noise
# use the image collected previously in sampler loop
samples_ddim = shared.state.current_latent
@@ -436,7 +453,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: text = infotext(n, i)
infotexts.append(text)
- image.info["parameters"] = text
+ if opts.enable_pnginfo:
+ image.info["parameters"] = text
output_images.append(image)
del x_samples_ddim
@@ -455,7 +473,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if opts.return_grid:
text = infotext()
infotexts.insert(0, text)
- grid.info["parameters"] = text
+ if opts.enable_pnginfo:
+ grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
|