diff options
author | papuSpartan <30642826+papuSpartan@users.noreply.github.com> | 2023-05-13 15:23:42 +0000 |
---|---|---|
committer | papuSpartan <30642826+papuSpartan@users.noreply.github.com> | 2023-05-13 15:23:42 +0000 |
commit | ac83627a31daac06f4d48b0e7db223ef807fe8e5 (patch) | |
tree | 68d89cf786fa340f87eb7b8c1afe7ccae463ccd2 /modules/processing.py | |
parent | 55e52c878ab669d5b11b001a4152ee1a3b8d4880 (diff) | |
download | stable-diffusion-webui-gfx803-ac83627a31daac06f4d48b0e7db223ef807fe8e5.tar.gz stable-diffusion-webui-gfx803-ac83627a31daac06f4d48b0e7db223ef807fe8e5.tar.bz2 stable-diffusion-webui-gfx803-ac83627a31daac06f4d48b0e7db223ef807fe8e5.zip |
heavily simplify
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 35 |
1 files changed, 15 insertions, 20 deletions
diff --git a/modules/processing.py b/modules/processing.py index 6828e898..32ff61e9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -34,7 +34,7 @@ import tomesd # add a logger for the processing module
logger = logging.getLogger(__name__)
# manually set output level here since there is no option to do so yet through launch options
-# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s')
+logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s')
# some of those options should not be changed at all because they would break the model, so I removed them from options.
@@ -496,15 +496,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
"Clip skip": None if clip_skip <= 1 else clip_skip,
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
- "Token merging ratio": None if not opts.token_merging or opts.token_merging_hr_only else opts.token_merging_ratio,
- "Token merging ratio hr": None if not opts.token_merging else opts.token_merging_ratio_hr,
- "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random,
- "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention,
- "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention,
- "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp,
- "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x,
- "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y,
- "Token merging maximum down sampling": None if opts.token_merging_maximum_down_sampling == 1 else opts.token_merging_maximum_down_sampling,
+ "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio,
+ "Token merging ratio hr": None if not p.enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr,
"Init image hash": getattr(p, 'init_img_hash', None),
"RNG": opts.randn_source if opts.randn_source != "GPU" else None,
"NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
@@ -538,15 +531,15 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae':
sd_vae.reload_vae_weights()
- if opts.token_merging and not opts.token_merging_hr_only:
+ if opts.token_merging_ratio > 0:
sd_models.apply_token_merging(sd_model=p.sd_model, hr=False)
- logger.debug('Token merging applied')
+ logger.debug(f"Token merging applied to first pass. Ratio: '{opts.token_merging_ratio}'")
res = process_images_inner(p)
finally:
# undo model optimizations made by tomesd
- if opts.token_merging:
+ if opts.token_merging_ratio > 0:
tomesd.remove_patch(p.sd_model)
logger.debug('Token merging model optimizations removed')
@@ -1003,19 +996,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): devices.torch_gc()
# apply token merging optimizations from tomesd for high-res pass
- # check if hr_only so we are not redundantly patching
- if opts.token_merging and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio):
- # case where user wants to use separate merge ratios
- if not opts.token_merging_hr_only:
- # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive)
+ if opts.token_merging_ratio_hr > 0:
+ # in case the user has used separate merge ratios
+ if opts.token_merging_ratio > 0:
tomesd.remove_patch(self.sd_model)
- logger.debug('Temporarily removed token merging optimizations in preparation for next pass')
+ logger.debug('Adjusting token merging ratio for high-res pass')
sd_models.apply_token_merging(sd_model=self.sd_model, hr=True)
- logger.debug('Applied token merging for high-res pass')
+ logger.debug(f"Applied token merging for high-res pass. Ratio: '{opts.token_merging_ratio_hr}'")
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
+ if opts.token_merging_ratio_hr > 0 or opts.token_merging_ratio > 0:
+ tomesd.remove_patch(self.sd_model)
+ logger.debug('Removed token merging optimizations from model')
+
self.is_hr_pass = False
return samples
|