From 3bd898b6ce94f34bcc7685672f0e4318d2d86b33 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sat, 21 Jan 2023 23:14:59 +0300 Subject: First test of different sampler for hi-res fix --- modules/processing.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6e6371a1..b3206f80 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -710,7 +710,7 @@ def old_hires_fix_first_pass_dimensions(width, height): class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler: int = 0, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength @@ -721,6 +721,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_resize_y = hr_resize_y self.hr_upscale_to_x = hr_resize_x self.hr_upscale_to_y = hr_resize_y + self.hr_sampler = hr_sampler if firstphase_width != 0 or firstphase_height != 0: self.hr_upscale_to_x = self.width @@ -862,6 +863,10 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): shared.state.nextjob() img2img_sampler_name = self.sampler_name if self.sampler_name != 'PLMS' else 'DDIM' # PLMS does not support img2img so we just silently switch ot DDIM + + if self.hr_sampler != 0 and sd_samplers.samplers[self.hr_sampler].name != 'PLMS': + img2img_sampler_name = sd_samplers.samplers[self.hr_sampler].name + self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] -- cgit v1.2.3 From 6c0566f937ba212e3faf1d30c337850543e85a12 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sat, 21 Jan 2023 23:25:36 +0300 Subject: Type mismatch fix --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index b3206f80..65673a9d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -864,8 +864,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): img2img_sampler_name = self.sampler_name if self.sampler_name != 'PLMS' else 'DDIM' # PLMS does not support img2img so we just silently switch ot DDIM - if self.hr_sampler != 0 and sd_samplers.samplers[self.hr_sampler].name != 'PLMS': - img2img_sampler_name = sd_samplers.samplers[self.hr_sampler].name + if self.hr_sampler != 0 and self.hr_sampler != 'PLMS': + img2img_sampler_name = self.hr_sampler self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) -- cgit v1.2.3 From 9e1f49c4e5b169eecbf075c49a304774dfdc6035 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 00:03:16 +0300 Subject: PLMS edge-case handling fix --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 65673a9d..6e04b064 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -864,7 +864,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): img2img_sampler_name = self.sampler_name if self.sampler_name != 'PLMS' else 'DDIM' # PLMS does not support img2img so we just silently switch ot DDIM - if self.hr_sampler != 0 and self.hr_sampler != 'PLMS': + if self.hr_sampler != 0 and sd_samplers.samplers[self.hr_sampler].name != 'PLMS': img2img_sampler_name = self.hr_sampler self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) -- cgit v1.2.3 From 3ffe2e768b542c837927cfdf0310a2e0fdd48683 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 00:07:46 +0300 Subject: PLMS edge-case handling fix 2 --- modules/processing.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6e04b064..83c65948 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -710,7 +710,7 @@ def old_hires_fix_first_pass_dimensions(width, height): class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler: int = 0, **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler: int | str = 0, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength @@ -864,7 +864,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): img2img_sampler_name = self.sampler_name if self.sampler_name != 'PLMS' else 'DDIM' # PLMS does not support img2img so we just silently switch ot DDIM - if self.hr_sampler != 0 and sd_samplers.samplers[self.hr_sampler].name != 'PLMS': + if self.hr_sampler == 0: + pass + elif self.hr_sampler == 'PLMS': + img2img_sampler_name = 'DDIM' + else: img2img_sampler_name = self.hr_sampler self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) -- cgit v1.2.3 From 6cd7bf9f860dff3b61a50ec2d41915536cbcf448 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 00:08:58 +0300 Subject: PLMS edge-case handling fix 3 --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 83c65948..6f6efe06 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -710,7 +710,7 @@ def old_hires_fix_first_pass_dimensions(width, height): class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler: int | str = 0, **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler: str = '---', **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength @@ -864,7 +864,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): img2img_sampler_name = self.sampler_name if self.sampler_name != 'PLMS' else 'DDIM' # PLMS does not support img2img so we just silently switch ot DDIM - if self.hr_sampler == 0: + if self.hr_sampler == '---': pass elif self.hr_sampler == 'PLMS': img2img_sampler_name = 'DDIM' -- cgit v1.2.3 From 0f6862ef3041f3ee18e8236765b1c1958c84385b Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 00:11:05 +0300 Subject: PLMS edge-case handling fix 5 --- modules/processing.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6f6efe06..a873498b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -866,8 +866,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_sampler == '---': pass - elif self.hr_sampler == 'PLMS': - img2img_sampler_name = 'DDIM' else: img2img_sampler_name = self.hr_sampler -- cgit v1.2.3 From 8114959e7e937361b719cd85bec246ffd258dca6 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 14:28:53 +0300 Subject: Hr separate prompt test --- modules/processing.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index a873498b..f407e175 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -516,6 +516,23 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] + if type(p) == StableDiffusionProcessingTxt2Img: + if p.hr_enabled and p.is_hr_pass: + if p.hr_prompt: + if type(p.prompt) == list: + p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] + else: + p.all_prompts = p.batch_size * p.n_iter * [ + shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] + + if p.hr_negative_prompt: + if type(p.negative_prompt) == list: + p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in + p.hr_negative_prompt] + else: + p.all_negative_prompts = p.batch_size * p.n_iter * [ + shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] + if type(seed) == list: p.all_seeds = seed else: @@ -710,7 +727,7 @@ def old_hires_fix_first_pass_dimensions(width, height): class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler: str = '---', **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler: str = '---', hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength @@ -722,6 +739,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_upscale_to_x = hr_resize_x self.hr_upscale_to_y = hr_resize_y self.hr_sampler = hr_sampler + self.hr_prompt = hr_prompt if hr_prompt != '' else self.prompt + self.hr_negative_prompt = hr_negative_prompt if hr_negative_prompt != '' else self.negative_prompt + self.is_hr_pass = False if firstphase_width != 0 or firstphase_height != 0: self.hr_upscale_to_x = self.width @@ -808,6 +828,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not self.enable_hr: return samples + self.is_hr_pass = True target_width = self.hr_upscale_to_x target_height = self.hr_upscale_to_y -- cgit v1.2.3 From b331ca784ad831de088ad3915b45da39c2a76fa8 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 14:35:34 +0300 Subject: Fix --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index f407e175..161999d5 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -517,7 +517,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] if type(p) == StableDiffusionProcessingTxt2Img: - if p.hr_enabled and p.is_hr_pass: + if p.enable_hr and p.is_hr_pass: if p.hr_prompt: if type(p.prompt) == list: p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] -- cgit v1.2.3 From 81e0723d6559729f5b207ae04bc615318af5a11e Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 14:41:41 +0300 Subject: Logging for debugging --- modules/processing.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 161999d5..eeab4b0c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -518,12 +518,14 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr and p.is_hr_pass: + logging.info("Running hr pass with custom prompt") if p.hr_prompt: if type(p.prompt) == list: p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] else: p.all_prompts = p.batch_size * p.n_iter * [ shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] + logging.info(p.all_prompts) if p.hr_negative_prompt: if type(p.negative_prompt) == list: @@ -532,6 +534,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: p.all_negative_prompts = p.batch_size * p.n_iter * [ shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] + logging.info(p.all_negative_prompts) if type(seed) == list: p.all_seeds = seed -- cgit v1.2.3 From f774a8d24ec57cf0b795fedb0c54f0304b43b4d9 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 14:52:01 +0300 Subject: Hr-fix separate prompt experimentation --- modules/processing.py | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index eeab4b0c..1133619f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -516,25 +516,25 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.is_hr_pass: - logging.info("Running hr pass with custom prompt") - if p.hr_prompt: - if type(p.prompt) == list: - p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] - else: - p.all_prompts = p.batch_size * p.n_iter * [ - shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] - logging.info(p.all_prompts) - - if p.hr_negative_prompt: - if type(p.negative_prompt) == list: - p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in - p.hr_negative_prompt] - else: - p.all_negative_prompts = p.batch_size * p.n_iter * [ - shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] - logging.info(p.all_negative_prompts) + # if type(p) == StableDiffusionProcessingTxt2Img: + # if p.enable_hr and p.is_hr_pass: + # logging.info("Running hr pass with custom prompt") + # if p.hr_prompt: + # if type(p.prompt) == list: + # p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] + # else: + # p.all_prompts = p.batch_size * p.n_iter * [ + # shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] + # logging.info(p.all_prompts) + # + # if p.hr_negative_prompt: + # if type(p.negative_prompt) == list: + # p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in + # p.hr_negative_prompt] + # else: + # p.all_negative_prompts = p.batch_size * p.n_iter * [ + # shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] + # logging.info(p.all_negative_prompts) if type(seed) == list: p.all_seeds = seed @@ -744,7 +744,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_sampler = hr_sampler self.hr_prompt = hr_prompt if hr_prompt != '' else self.prompt self.hr_negative_prompt = hr_negative_prompt if hr_negative_prompt != '' else self.negative_prompt - self.is_hr_pass = False if firstphase_width != 0 or firstphase_height != 0: self.hr_upscale_to_x = self.width @@ -831,7 +830,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not self.enable_hr: return samples - self.is_hr_pass = True + self.prompt = self.hr_prompt + self.negative_prompt = self.hr_negative_prompt + target_width = self.hr_upscale_to_x target_height = self.hr_upscale_to_y -- cgit v1.2.3 From a9f0e7d53611cf11331e2befd34f0351b47795ee Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 15:12:00 +0300 Subject: hr conditioning --- modules/processing.py | 72 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 26 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 1133619f..21886bb5 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -235,7 +235,7 @@ class StableDiffusionProcessing: def init(self, all_prompts, all_seeds, all_subseeds): pass - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): + def sample(self, conditioning, unconditional_conditioning, hr_conditioning, hr_uconditional_conditioning, seeds, subseeds, subseed_strength, prompts): raise NotImplementedError() def close(self): @@ -516,25 +516,25 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] - # if type(p) == StableDiffusionProcessingTxt2Img: - # if p.enable_hr and p.is_hr_pass: - # logging.info("Running hr pass with custom prompt") - # if p.hr_prompt: - # if type(p.prompt) == list: - # p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] - # else: - # p.all_prompts = p.batch_size * p.n_iter * [ - # shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] - # logging.info(p.all_prompts) - # - # if p.hr_negative_prompt: - # if type(p.negative_prompt) == list: - # p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in - # p.hr_negative_prompt] - # else: - # p.all_negative_prompts = p.batch_size * p.n_iter * [ - # shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] - # logging.info(p.all_negative_prompts) + if type(p) == StableDiffusionProcessingTxt2Img: + if p.enable_hr and p.is_hr_pass: + logging.info("Running hr pass with custom prompt") + if p.hr_prompt: + if type(p.prompt) == list: + p.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] + else: + p.all_hr_prompts = p.batch_size * p.n_iter * [ + shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] + logging.info(p.all_prompts) + + if p.hr_negative_prompt: + if type(p.negative_prompt) == list: + p.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in + p.hr_negative_prompt] + else: + p.all_hr_negative_prompts = p.batch_size * p.n_iter * [ + shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] + logging.info(p.all_negative_prompts) if type(seed) == list: p.all_seeds = seed @@ -607,6 +607,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size] negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] + + if type(p) == StableDiffusionProcessingTxt2Img: + if p.enable_hr: + hr_prompts = p.all_hr_prompts[n * p.batch_size:(n + 1) * p.batch_size] + hr_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] + seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] @@ -620,6 +626,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) + if type(p) == StableDiffusionProcessingTxt2Img: + if p.enable_hr: + hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, + cached_uc) + hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, + cached_c) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: @@ -629,7 +641,16 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: shared.state.job = f"Batch {n+1} out of {p.n_iter}" with devices.autocast(): - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) + if type(p) == StableDiffusionProcessingTxt2Img: + if p.enable_hr: + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_uconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, + subseeds=subseeds, + subseed_strength=p.subseed_strength, prompts=prompts) + else: + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, + subseeds=subseeds, + subseed_strength=p.subseed_strength, prompts=prompts) x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))] for x in x_samples_ddim: @@ -744,6 +765,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_sampler = hr_sampler self.hr_prompt = hr_prompt if hr_prompt != '' else self.prompt self.hr_negative_prompt = hr_negative_prompt if hr_negative_prompt != '' else self.negative_prompt + self.all_hr_prompts = None + self.all_hr_negative_prompts = None if firstphase_width != 0 or firstphase_height != 0: self.hr_upscale_to_x = self.width @@ -817,7 +840,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_upscaler is not None: self.extra_generation_params["Hires upscaler"] = self.hr_upscaler - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): + def sample(self, conditioning, unconditional_conditioning, hr_conditioning, hr_uconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") @@ -830,9 +853,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not self.enable_hr: return samples - self.prompt = self.hr_prompt - self.negative_prompt = self.hr_negative_prompt - target_width = self.hr_upscale_to_x target_height = self.hr_upscale_to_y @@ -904,7 +924,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() - samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + samples = self.sampler.sample_img2img(self, samples, noise, hr_conditioning, hr_unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) return samples -- cgit v1.2.3 From 4e0cf7d4eda3d590df3e4b28654037d2dfb7ab94 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 15:15:08 +0300 Subject: hr conditioning --- modules/processing.py | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 21886bb5..6b49b4e3 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -517,24 +517,16 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.is_hr_pass: - logging.info("Running hr pass with custom prompt") - if p.hr_prompt: - if type(p.prompt) == list: - p.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] - else: - p.all_hr_prompts = p.batch_size * p.n_iter * [ - shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] - logging.info(p.all_prompts) - - if p.hr_negative_prompt: - if type(p.negative_prompt) == list: - p.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in - p.hr_negative_prompt] - else: - p.all_hr_negative_prompts = p.batch_size * p.n_iter * [ - shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] - logging.info(p.all_negative_prompts) + if p.enable_hr: + if type(p.prompt) == list: + p.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] + else: + p.all_hr_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] + + if type(p.negative_prompt) == list: + p.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.hr_negative_prompt] + else: + p.all_hr_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] if type(seed) == list: p.all_seeds = seed @@ -628,9 +620,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr: - hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, + hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, p.steps, cached_uc) - hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, + hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, p.steps, cached_c) if len(model_hijack.comments) > 0: @@ -840,7 +832,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_upscaler is not None: self.extra_generation_params["Hires upscaler"] = self.hr_upscaler - def sample(self, conditioning, unconditional_conditioning, hr_conditioning, hr_uconditional_conditioning, seeds, subseeds, subseed_strength, prompts): + def sample(self, conditioning, unconditional_conditioning, hr_conditioning, hr_unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") -- cgit v1.2.3 From c5d4c87c02ce4c503de149db5ee3e87af4402cf0 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 15:17:43 +0300 Subject: hr conditioning --- modules/processing.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6b49b4e3..a81ace6f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -635,10 +635,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.autocast(): if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_uconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, - subseeds=subseeds, - subseed_strength=p.subseed_strength, prompts=prompts) + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) + else: + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, + subseed_strength=p.subseed_strength, prompts=prompts) + else: samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, -- cgit v1.2.3 From 2ab2bce74d7b7affb80b55da9d1ffb219129c1ef Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 15:28:38 +0300 Subject: hr conditioning --- modules/processing.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index a81ace6f..bab1c101 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -235,7 +235,7 @@ class StableDiffusionProcessing: def init(self, all_prompts, all_seeds, all_subseeds): pass - def sample(self, conditioning, unconditional_conditioning, hr_conditioning, hr_uconditional_conditioning, seeds, subseeds, subseed_strength, prompts): + def sample(self, conditioning, unconditional_conditioning, hr_conditioning=None, hr_unconditional_conditioning=None, seeds, subseeds, subseed_strength, prompts): raise NotImplementedError() def close(self): @@ -517,7 +517,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr: + if p.enable_hr and p.hr_prompt != '': if type(p.prompt) == list: p.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] else: @@ -601,7 +601,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr: + if p.enable_hr and p.hr_prompt != '': hr_prompts = p.all_hr_prompts[n * p.batch_size:(n + 1) * p.batch_size] hr_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] @@ -619,7 +619,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr: + if p.enable_hr and p.hr_prompt != '': hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, p.steps, cached_uc) hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, p.steps, @@ -635,7 +635,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.autocast(): if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) + if p.hr_prompts != '': + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) + else: + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=c, + hr_unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, + subseed_strength=p.subseed_strength, prompts=prompts) else: samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) @@ -756,8 +761,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_upscale_to_x = hr_resize_x self.hr_upscale_to_y = hr_resize_y self.hr_sampler = hr_sampler - self.hr_prompt = hr_prompt if hr_prompt != '' else self.prompt - self.hr_negative_prompt = hr_negative_prompt if hr_negative_prompt != '' else self.negative_prompt + self.hr_prompt = hr_prompt if hr_prompt != '' else '' + self.hr_negative_prompt = hr_negative_prompt if hr_negative_prompt != '' else '' self.all_hr_prompts = None self.all_hr_negative_prompts = None -- cgit v1.2.3 From 125d5c8d96eb374a7ba214dba51da9cf30da6564 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 15:31:11 +0300 Subject: hr conditioning --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index bab1c101..f0073d1a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -235,7 +235,7 @@ class StableDiffusionProcessing: def init(self, all_prompts, all_seeds, all_subseeds): pass - def sample(self, conditioning, unconditional_conditioning, hr_conditioning=None, hr_unconditional_conditioning=None, seeds, subseeds, subseed_strength, prompts): + def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts, hr_conditioning=None, hr_unconditional_conditioning=None): raise NotImplementedError() def close(self): @@ -838,7 +838,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_upscaler is not None: self.extra_generation_params["Hires upscaler"] = self.hr_upscaler - def sample(self, conditioning, unconditional_conditioning, hr_conditioning, hr_unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): + def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts, hr_conditioning=None, hr_unconditional_conditioning=None): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") -- cgit v1.2.3 From 34f6d667429ec3d4e0f09c6148fd37e66c460cb7 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 15:32:47 +0300 Subject: hr conditioning --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index f0073d1a..b2de8f92 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -635,7 +635,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.autocast(): if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr: - if p.hr_prompts != '': + if p.hr_prompt != '': samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) else: samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=c, -- cgit v1.2.3 From bbb1e35ea29f8cfc6a6db3b28b10bfbc99033c70 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 15:44:59 +0300 Subject: UI and PNG info improvements --- modules/processing.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index b2de8f92..0cf4771e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -789,6 +789,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_resize_x == 0 and self.hr_resize_y == 0: self.extra_generation_params["Hires upscale"] = self.hr_scale + self.extra_generation_params["Hires sampler"] = self.hr_sampler + self.extra_generation_params["Hires prompt"] = self.hr_prompt + self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt self.hr_upscale_to_x = int(self.width * self.hr_scale) self.hr_upscale_to_y = int(self.height * self.hr_scale) else: -- cgit v1.2.3 From a5c2b5ed8991e0e1ffea7b595ab13f93715dfa5c Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 15:50:20 +0300 Subject: UI and PNG info improvements --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 0cf4771e..8b4b2399 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -790,8 +790,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_resize_x == 0 and self.hr_resize_y == 0: self.extra_generation_params["Hires upscale"] = self.hr_scale self.extra_generation_params["Hires sampler"] = self.hr_sampler - self.extra_generation_params["Hires prompt"] = self.hr_prompt - self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt + self.extra_generation_params["Hires prompt"] = f"({self.hr_prompt})" + self.extra_generation_params["Hires negative prompt"] = f"({self.hr_negative_prompt})" self.hr_upscale_to_x = int(self.width * self.hr_scale) self.hr_upscale_to_y = int(self.height * self.hr_scale) else: -- cgit v1.2.3 From 7f62300f7d496501a730d28f3aaaee885513998b Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 16:29:08 +0300 Subject: Gen params paste improvement --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 8b4b2399..c56717f6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -790,8 +790,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_resize_x == 0 and self.hr_resize_y == 0: self.extra_generation_params["Hires upscale"] = self.hr_scale self.extra_generation_params["Hires sampler"] = self.hr_sampler - self.extra_generation_params["Hires prompt"] = f"({self.hr_prompt})" - self.extra_generation_params["Hires negative prompt"] = f"({self.hr_negative_prompt})" + self.extra_generation_params["Hires prompt"] = f'"{self.hr_prompt}"' + self.extra_generation_params["Hires negative prompt"] = f'"{self.hr_negative_prompt}"' self.hr_upscale_to_x = int(self.width * self.hr_scale) self.hr_upscale_to_y = int(self.height * self.hr_scale) else: -- cgit v1.2.3 From 3bc8ee998db5f461b8011a72e6f167012ccb8bc1 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 22 Jan 2023 16:35:42 +0300 Subject: Gen params paste improvement --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index c56717f6..01c1b53c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -790,8 +790,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_resize_x == 0 and self.hr_resize_y == 0: self.extra_generation_params["Hires upscale"] = self.hr_scale self.extra_generation_params["Hires sampler"] = self.hr_sampler - self.extra_generation_params["Hires prompt"] = f'"{self.hr_prompt}"' - self.extra_generation_params["Hires negative prompt"] = f'"{self.hr_negative_prompt}"' + self.extra_generation_params["Hires prompt"] = f'({self.hr_prompt.replace(",", ";")})' + self.extra_generation_params["Hires negative prompt"] = f'({self.hr_negative_prompt.replace(",", ";")})' self.hr_upscale_to_x = int(self.width * self.hr_scale) self.hr_upscale_to_y = int(self.height * self.hr_scale) else: -- cgit v1.2.3 From c92ec3a9255a8364f4212d5e7434da1785752737 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 29 Jan 2023 19:07:00 +0300 Subject: Extra networks loading fix --- modules/processing.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 605c541e..da890fb3 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -555,6 +555,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: model_hijack.embedding_db.load_textual_inversion_embeddings() _, extra_network_data = extra_networks.parse_prompts(p.all_prompts[0:1]) + if type(p) == StableDiffusionProcessingTxt2Img: + if p.enable_hr and p.hr_prompt != '': + _, hr_extra_network_data = extra_networks.parse_prompts(p.all_hr_prompts[0:1]) + if p.scripts is not None: p.scripts.process(p) @@ -594,7 +598,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: sd_vae_approx.model() if not p.disable_extra_networks: - extra_networks.activate(p, extra_network_data) + if type(p) == StableDiffusionProcessingTxt2Img: + if p.enable_hr and p.hr_prompt != '': + extra_networks.activate(p, extra_network_data + hr_extra_network_data) + else: + extra_networks.activate(p, extra_network_data) with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: processed = Processed(p, [], p.seed, "") @@ -745,7 +753,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) if not p.disable_extra_networks: - extra_networks.deactivate(p, extra_network_data) + if type(p) == StableDiffusionProcessingTxt2Img: + if p.enable_hr and p.hr_prompt != '': + extra_networks.deactivate(p, extra_network_data + hr_extra_network_data) + else: + extra_networks.deactivate(p, extra_network_data) devices.torch_gc() -- cgit v1.2.3 From 6127d2ff1bc5c3bc81d876e6dbc9a60f79aec898 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 29 Jan 2023 19:13:27 +0300 Subject: Extra networks loading fix --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index da890fb3..a56915c5 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -600,7 +600,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not p.disable_extra_networks: if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr and p.hr_prompt != '': - extra_networks.activate(p, extra_network_data + hr_extra_network_data) + extra_networks.activate(p, extra_network_data | hr_extra_network_data) else: extra_networks.activate(p, extra_network_data) @@ -755,7 +755,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not p.disable_extra_networks: if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr and p.hr_prompt != '': - extra_networks.deactivate(p, extra_network_data + hr_extra_network_data) + extra_networks.deactivate(p, extra_network_data | hr_extra_network_data) else: extra_networks.deactivate(p, extra_network_data) -- cgit v1.2.3 From 9beeef6267ecbcb16f8b24e0092194f0f00142a6 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 29 Jan 2023 19:16:17 +0300 Subject: Extra networks loading fix --- modules/processing.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index a56915c5..98e83c69 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -558,6 +558,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr and p.hr_prompt != '': _, hr_extra_network_data = extra_networks.parse_prompts(p.all_hr_prompts[0:1]) + extra_network_data.update(hr_extra_network_data) if p.scripts is not None: @@ -598,11 +599,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: sd_vae_approx.model() if not p.disable_extra_networks: - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.hr_prompt != '': - extra_networks.activate(p, extra_network_data | hr_extra_network_data) - else: - extra_networks.activate(p, extra_network_data) + extra_networks.activate(p, extra_network_data) with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: processed = Processed(p, [], p.seed, "") @@ -753,11 +750,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) if not p.disable_extra_networks: - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.hr_prompt != '': - extra_networks.deactivate(p, extra_network_data | hr_extra_network_data) - else: - extra_networks.deactivate(p, extra_network_data) + extra_networks.deactivate(p, extra_network_data) devices.torch_gc() -- cgit v1.2.3 From 425eab34641e8e378db610d6a2b23f3ababe1058 Mon Sep 17 00:00:00 2001 From: invincibledude <> Date: Sun, 29 Jan 2023 19:26:31 +0300 Subject: Extra network in hr abomination fix --- modules/processing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 98e83c69..a2a91a5b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -558,7 +558,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr and p.hr_prompt != '': _, hr_extra_network_data = extra_networks.parse_prompts(p.all_hr_prompts[0:1]) - extra_network_data.update(hr_extra_network_data) + if p.all_hr_prompts != p.all_prompts: + extra_network_data.update(hr_extra_network_data) if p.scripts is not None: -- cgit v1.2.3 From c3bd113a0b969732a324aff415c49ea9ad434707 Mon Sep 17 00:00:00 2001 From: InvincibleDude <81354513+InvincibleDude@users.noreply.github.com> Date: Sun, 5 Feb 2023 15:24:41 +0000 Subject: Image info fix --- modules/processing.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 61e6dfcd..43fac21c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -808,6 +808,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: + if self.hr_sampler != '---': + self.extra_generation_params["Hires sampler"] = self.hr_sampler + + if self.hr_prompt != '': + self.extra_generation_params["Hires prompt"] = f'({self.hr_prompt.replace(",", ";")})' + self.extra_generation_params["Hires negative prompt"] = f'({self.hr_negative_prompt.replace(",", ";")})' + if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): self.hr_resize_x = self.width self.hr_resize_y = self.height @@ -819,9 +826,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_resize_x == 0 and self.hr_resize_y == 0: self.extra_generation_params["Hires upscale"] = self.hr_scale - self.extra_generation_params["Hires sampler"] = self.hr_sampler - self.extra_generation_params["Hires prompt"] = f'({self.hr_prompt.replace(",", ";")})' - self.extra_generation_params["Hires negative prompt"] = f'({self.hr_negative_prompt.replace(",", ";")})' self.hr_upscale_to_x = int(self.width * self.hr_scale) self.hr_upscale_to_y = int(self.height * self.hr_scale) else: -- cgit v1.2.3 From 51f81efb02876d24c9e6d844e8c0cbd2384f6514 Mon Sep 17 00:00:00 2001 From: InvincibleDude <81354513+InvincibleDude@users.noreply.github.com> Date: Wed, 1 Mar 2023 21:30:20 +0300 Subject: Image processing changes Image processing changes --- modules/processing.py | 76 +++++++++++++++++++++++++-------------------------- 1 file changed, 38 insertions(+), 38 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 43fac21c..72dd3f6f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -528,7 +528,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.hr_prompt != '': + if p.enable_hr and p.hr_prompt == '': + p.all_hr_prompts, p.all_hr_negative_prompts = p.all_prompts, p.all_negative_prompts + elif p.enable_hr and p.hr_prompt != '': if type(p.prompt) == list: p.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] else: @@ -555,14 +557,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() - _, extra_network_data = extra_networks.parse_prompts(p.all_prompts[0:1]) - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.hr_prompt != '': - _, hr_extra_network_data = extra_networks.parse_prompts(p.all_hr_prompts[0:1]) - if p.all_hr_prompts != p.all_prompts: - extra_network_data.update(hr_extra_network_data) - - if p.scripts is not None: p.scripts.process(p) @@ -600,13 +594,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": sd_vae_approx.model() - if not p.disable_extra_networks: - extra_networks.activate(p, extra_network_data) - - with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: - processed = Processed(p, [], p.seed, "") - file.write(processed.infotext(p, 0)) - if state.job_count == -1: state.job_count = p.n_iter @@ -623,9 +610,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.hr_prompt != '': - hr_prompts = p.all_hr_prompts[n * p.batch_size:(n + 1) * p.batch_size] - hr_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] + if p.enable_hr: + if p.hr_prompt == '': + hr_prompts, hr_negative_prompts = prompts, negative_prompts + else: + hr_prompts = p.all_hr_prompts[n * p.batch_size:(n + 1) * p.batch_size] + hr_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] @@ -633,19 +623,40 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if len(prompts) == 0: break - prompts, _ = extra_networks.parse_prompts(prompts) + prompts, extra_network_data = extra_networks.parse_prompts(prompts) + if type(p) == StableDiffusionProcessingTxt2Img: + if p.enable_hr and hr_prompts != prompts: + _, hr_extra_network_data = extra_networks.parse_prompts(hr_prompts) + extra_network_data.update(hr_extra_network_data) + + + + if not p.disable_extra_networks: + with devices.autocast(): + extra_networks.activate(p, extra_network_data) if p.scripts is not None: p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) + # params.txt should be saved after scripts.process_batch, since the + # infotext could be modified by that callback + # Example: a wildcard processed by process_batch sets an extra model + # strength, which is saved as "Model Strength: 1.0" in the infotext + if n == 0: + with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: + processed = Processed(p, [], p.seed, "") + file.write(processed.infotext(p, 0)) + uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) + if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.hr_prompt != '': - hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, p.steps, - cached_uc) - hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, p.steps, - cached_c) + if p.enable_hr: + if prompts != hr_prompts: + hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, p.steps, cached_uc) + hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, p.steps, cached_c) + else: + hr_uc, hr_c = uc, c if len(model_hijack.comments) > 0: for comment in model_hijack.comments: @@ -658,20 +669,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr: - if p.hr_prompt != '': - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) - else: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=c, - hr_unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, - subseed_strength=p.subseed_strength, prompts=prompts) - else: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, - subseed_strength=p.subseed_strength, prompts=prompts) - + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) else: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, - subseeds=subseeds, - subseed_strength=p.subseed_strength, prompts=prompts) + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))] for x in x_samples_ddim: -- cgit v1.2.3 From b9fdb9f701e576ce48458fdf756187c2d1725649 Mon Sep 17 00:00:00 2001 From: InvincibleDude <81354513+InvincibleDude@users.noreply.github.com> Date: Sat, 4 Mar 2023 18:09:05 +0000 Subject: Fix crash when hr is disabled --- modules/processing.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 25479c06..f5cc2433 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -670,6 +670,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if type(p) == StableDiffusionProcessingTxt2Img: if p.enable_hr: samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) + else: + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) else: samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) -- cgit v1.2.3 From f6e27378404631d951656388fc178b784fe1495b Mon Sep 17 00:00:00 2001 From: InvincibleDude <81354513+InvincibleDude@users.noreply.github.com> Date: Fri, 10 Mar 2023 12:13:55 +0000 Subject: Negative prompt fix --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index f5cc2433..694e2637 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -615,7 +615,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: hr_prompts, hr_negative_prompts = prompts, negative_prompts else: hr_prompts = p.all_hr_prompts[n * p.batch_size:(n + 1) * p.batch_size] - hr_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] + hr_negative_prompts = p.all_hr_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] -- cgit v1.2.3 From a609bd56b4206460d1df3c3022025fc78b66718f Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 22:18:35 -0500 Subject: Transition to using settings through UI instead of cmd line args. Added feature to only apply to hr-fix. Install package using requirements_versions.txt --- modules/processing.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..e115aadd 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -29,6 +29,7 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType +import tomesd # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 @@ -500,9 +501,28 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() + if opts.token_merging: + + if p.hr_second_pass_steps < 1 and not opts.token_merging_hr_only: + tomesd.apply_patch( + p.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) + res = process_images_inner(p) finally: + # undo model optimizations made by tomesd + if opts.token_merging: + tomesd.remove_patch(p.sd_model) + # restore opts to original state if p.override_settings_restore_afterwards: for k, v in stored_opts.items(): @@ -938,6 +958,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() + # apply token merging optimizations from tomesd for high-res pass + # check if hr_only so we don't redundantly apply patch + if opts.token_merging and opts.token_merging_hr_only: + tomesd.apply_patch( + self.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) + samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) return samples -- cgit v1.2.3 From c707b7df95a61b66a05be94e805e1be9a432e294 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 23:47:10 -0500 Subject: remove excess condition --- modules/processing.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index e115aadd..55735572 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -501,26 +501,26 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging: - - if p.hr_second_pass_steps < 1 and not opts.token_merging_hr_only: - tomesd.apply_patch( - p.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + if opts.token_merging and not opts.token_merging_hr_only: + print("applying token merging to all passes") + tomesd.apply_patch( + p.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) res = process_images_inner(p) finally: # undo model optimizations made by tomesd if opts.token_merging: + print('removing token merging model optimizations') tomesd.remove_patch(p.sd_model) # restore opts to original state @@ -961,6 +961,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # apply token merging optimizations from tomesd for high-res pass # check if hr_only so we don't redundantly apply patch if opts.token_merging and opts.token_merging_hr_only: + print("applying token merging for high-res pass") tomesd.apply_patch( self.sd_model, ratio=opts.token_merging_ratio, -- cgit v1.2.3 From 5c8e53d5e98da0eabf384318955c57842d612c07 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Tue, 4 Apr 2023 02:26:44 -0500 Subject: Allow different merge ratios to be used for each pass. Make toggle cmd flag work again. Remove ratio flag. Remove warning about controlnet being incompatible --- modules/processing.py | 44 +++++++++++++++----------------------------- 1 file changed, 15 insertions(+), 29 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 55735572..670a7a28 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -501,26 +501,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging and not opts.token_merging_hr_only: - print("applying token merging to all passes") - tomesd.apply_patch( - p.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: + print("\nApplying token merging\n") + sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) res = process_images_inner(p) finally: # undo model optimizations made by tomesd - if opts.token_merging: - print('removing token merging model optimizations') + if opts.token_merging or cmd_opts.token_merging: + print('\nRemoving token merging model optimizations\n') tomesd.remove_patch(p.sd_model) # restore opts to original state @@ -959,20 +949,16 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): devices.torch_gc() # apply token merging optimizations from tomesd for high-res pass - # check if hr_only so we don't redundantly apply patch - if opts.token_merging and opts.token_merging_hr_only: - print("applying token merging for high-res pass") - tomesd.apply_patch( - self.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + # check if hr_only so we are not redundantly patching + if (cmd_opts.token_merging or opts.token_merging) and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): + # case where user wants to use separate merge ratios + if not opts.token_merging_hr_only: + # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) + print('Temporarily reverting token merging optimizations in preparation for next pass') + tomesd.remove_patch(self.sd_model) + + print("\nApplying token merging for high-res pass\n") + sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) -- cgit v1.2.3 From 1c1106260300ca3956d9619875e28278b148adab Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Mon, 10 Apr 2023 03:37:15 -0500 Subject: add token merging options to infotext when necessary. Bump tomesd version --- modules/processing.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 670a7a28..95058d0b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -31,6 +31,12 @@ from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType import tomesd +# add a logger for the processing module +logger = logging.getLogger(__name__) +# manually set output level here since there is no option to do so yet through launch options +# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') + + # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 opt_f = 8 @@ -477,6 +483,14 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Token merging ratio": None if not (opts.token_merging or cmd_opts.token_merging) or opts.token_merging_hr_only else opts.token_merging_ratio, + "Token merging ratio hr": None if not (opts.token_merging or cmd_opts.token_merging) else opts.token_merging_ratio_hr, + "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, + "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, + "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, + "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, + "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, + "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y } generation_params.update(p.extra_generation_params) @@ -502,16 +516,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: sd_vae.reload_vae_weights() if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: - print("\nApplying token merging\n") sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) + logger.debug('Token merging applied') res = process_images_inner(p) finally: # undo model optimizations made by tomesd if opts.token_merging or cmd_opts.token_merging: - print('\nRemoving token merging model optimizations\n') tomesd.remove_patch(p.sd_model) + logger.debug('Token merging model optimizations removed') # restore opts to original state if p.override_settings_restore_afterwards: @@ -954,11 +968,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # case where user wants to use separate merge ratios if not opts.token_merging_hr_only: # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) - print('Temporarily reverting token merging optimizations in preparation for next pass') tomesd.remove_patch(self.sd_model) + logger.debug('Temporarily removed token merging optimizations in preparation for next pass') - print("\nApplying token merging for high-res pass\n") sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) + logger.debug('Applied token merging for high-res pass') samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) -- cgit v1.2.3 From e960781511eb175943be09b314ac2be46b6fc684 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Wed, 3 May 2023 13:12:43 -0500 Subject: fix maximum downsampling option --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index d5d1da5a..6807a301 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -495,6 +495,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y, + "Token merging maximum down sampling": None if opts.token_merging_maximum_down_sampling == 1 else opts.token_merging_maximum_down_sampling, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, -- cgit v1.2.3 From 96d6ca4199e7c5eee8d451618de5161cea317c40 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:25:25 +0300 Subject: manual fixes for ruff --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 1a76e552..6f5233c1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -664,7 +664,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not shared.opts.dont_fix_second_order_samplers_schedule: try: step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except: + except Exception: pass uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) -- cgit v1.2.3 From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- modules/processing.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6f5233c1..c3932d6b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -2,7 +2,6 @@ import json import math import os import sys -import warnings import hashlib import torch @@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared -- cgit v1.2.3 From 49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 18:28:15 +0300 Subject: Autofix Ruff W (not W605) (mostly whitespace) --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index c3932d6b..f902b9df 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -164,7 +164,7 @@ class StableDiffusionProcessing: self.all_subseeds = None self.iteration = 0 self.is_hr_pass = False - + @property def sd_model(self): -- cgit v1.2.3 From 55e52c878ab669d5b11b001a4152ee1a3b8d4880 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 09:24:56 -0500 Subject: remove command line option --- modules/processing.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 8ba3a96b..6828e898 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -496,8 +496,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Token merging ratio": None if not (opts.token_merging or cmd_opts.token_merging) or opts.token_merging_hr_only else opts.token_merging_ratio, - "Token merging ratio hr": None if not (opts.token_merging or cmd_opts.token_merging) else opts.token_merging_ratio_hr, + "Token merging ratio": None if not opts.token_merging or opts.token_merging_hr_only else opts.token_merging_ratio, + "Token merging ratio hr": None if not opts.token_merging else opts.token_merging_ratio_hr, "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, @@ -538,7 +538,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: + if opts.token_merging and not opts.token_merging_hr_only: sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) logger.debug('Token merging applied') @@ -546,7 +546,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: finally: # undo model optimizations made by tomesd - if opts.token_merging or cmd_opts.token_merging: + if opts.token_merging: tomesd.remove_patch(p.sd_model) logger.debug('Token merging model optimizations removed') @@ -1004,7 +1004,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # apply token merging optimizations from tomesd for high-res pass # check if hr_only so we are not redundantly patching - if (cmd_opts.token_merging or opts.token_merging) and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): + if opts.token_merging and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): # case where user wants to use separate merge ratios if not opts.token_merging_hr_only: # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) -- cgit v1.2.3 From ac83627a31daac06f4d48b0e7db223ef807fe8e5 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 10:23:42 -0500 Subject: heavily simplify --- modules/processing.py | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 6828e898..32ff61e9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -34,7 +34,7 @@ import tomesd # add a logger for the processing module logger = logging.getLogger(__name__) # manually set output level here since there is no option to do so yet through launch options -# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') +logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -496,15 +496,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Token merging ratio": None if not opts.token_merging or opts.token_merging_hr_only else opts.token_merging_ratio, - "Token merging ratio hr": None if not opts.token_merging else opts.token_merging_ratio_hr, - "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, - "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, - "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, - "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, - "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, - "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y, - "Token merging maximum down sampling": None if opts.token_merging_maximum_down_sampling == 1 else opts.token_merging_maximum_down_sampling, + "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, + "Token merging ratio hr": None if not p.enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, @@ -538,15 +531,15 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging and not opts.token_merging_hr_only: + if opts.token_merging_ratio > 0: sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) - logger.debug('Token merging applied') + logger.debug(f"Token merging applied to first pass. Ratio: '{opts.token_merging_ratio}'") res = process_images_inner(p) finally: # undo model optimizations made by tomesd - if opts.token_merging: + if opts.token_merging_ratio > 0: tomesd.remove_patch(p.sd_model) logger.debug('Token merging model optimizations removed') @@ -1003,19 +996,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): devices.torch_gc() # apply token merging optimizations from tomesd for high-res pass - # check if hr_only so we are not redundantly patching - if opts.token_merging and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): - # case where user wants to use separate merge ratios - if not opts.token_merging_hr_only: - # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) + if opts.token_merging_ratio_hr > 0: + # in case the user has used separate merge ratios + if opts.token_merging_ratio > 0: tomesd.remove_patch(self.sd_model) - logger.debug('Temporarily removed token merging optimizations in preparation for next pass') + logger.debug('Adjusting token merging ratio for high-res pass') sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) - logger.debug('Applied token merging for high-res pass') + logger.debug(f"Applied token merging for high-res pass. Ratio: '{opts.token_merging_ratio_hr}'") samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + if opts.token_merging_ratio_hr > 0 or opts.token_merging_ratio > 0: + tomesd.remove_patch(self.sd_model) + logger.debug('Removed token merging optimizations from model') + self.is_hr_pass = False return samples -- cgit v1.2.3 From c2fdb44880e07f43aee2f7edc1dc36a9516501e8 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 11:11:02 -0500 Subject: fix for img2img --- modules/processing.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 32ff61e9..94fe2625 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -34,7 +34,7 @@ import tomesd # add a logger for the processing module logger = logging.getLogger(__name__) # manually set output level here since there is no option to do so yet through launch options -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') +# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -478,6 +478,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter index = position_in_batch + iteration * p.batch_size clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) + enable_hr = getattr(p, 'enable_hr', False) generation_params = { "Steps": p.steps, @@ -497,7 +498,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, - "Token merging ratio hr": None if not p.enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, + "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, -- cgit v1.2.3 From cdac5ace1456ba779d5a0171ff8757f31955bfee Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 11:54:02 +0300 Subject: suppress ENSD infotext for samplers that don't use it --- modules/processing.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 94fe2625..15806f78 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -13,7 +13,7 @@ from skimage import exposure from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -480,6 +480,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) enable_hr = getattr(p, 'enable_hr', False) + uses_ensd = opts.eta_noise_seed_delta != 0 + if uses_ensd: + uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p) + generation_params = { "Steps": p.steps, "Sampler": p.sampler_name, @@ -496,17 +500,16 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Denoising strength": getattr(p, 'denoising_strength', None), "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, - "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "ENSD": opts.eta_noise_seed_delta if uses_ensd else None, "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, } - generation_params.update(p.extra_generation_params) - generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" -- cgit v1.2.3 From a61cbef02c7652f96d333b28e01f5230e225224e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 12:36:15 +0300 Subject: add second_order field to sampler config --- modules/processing.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 15806f78..678c4468 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -681,12 +681,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) - step_multiplier = 1 - if not shared.opts.dont_fix_second_order_samplers_schedule: - try: - step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except Exception: - pass + sampler_config = sd_samplers.find_sampler_config(p.sampler_name) + step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) -- cgit v1.2.3 From 315f109427a5dbbf48b0da560640523800fe3c1d Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 17 May 2023 10:26:32 +0300 Subject: Copy s_min_uncond to Processed Should fix #10416 --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 678c4468..cd63b9a6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -316,6 +316,7 @@ class Processed: self.s_tmin = p.s_tmin self.s_tmax = p.s_tmax self.s_noise = p.s_noise + self.s_min_uncond = p.s_min_uncond self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0] self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0] -- cgit v1.2.3 From 9fd6c1e3430f5947add23e2e94ac816c2546481c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 20:22:38 +0300 Subject: move some settings to the new Optimization page add slider for token merging for img2img rework StableDiffusionProcessing to have the token_merging_ratio field fix a bug with applying png optimizations for live previews when they shouldn't be applied --- modules/processing.py | 52 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 27 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index cd63b9a6..2b8dd361 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -29,12 +29,6 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType -import tomesd - -# add a logger for the processing module -logger = logging.getLogger(__name__) -# manually set output level here since there is no option to do so yet through launch options -# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -156,6 +150,8 @@ class StableDiffusionProcessing: self.override_settings_restore_afterwards = override_settings_restore_afterwards self.is_using_inpainting_conditioning = False self.disable_extra_networks = False + self.token_merging_ratio = 0 + self.token_merging_ratio_hr = 0 if not seed_enable_extras: self.subseed = -1 @@ -171,6 +167,7 @@ class StableDiffusionProcessing: self.all_subseeds = None self.iteration = 0 self.is_hr_pass = False + self.sampler = None @property @@ -280,6 +277,12 @@ class StableDiffusionProcessing: def close(self): self.sampler = None + def get_token_merging_ratio(self, for_hr=False): + if for_hr: + return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio + + return self.token_merging_ratio or opts.token_merging_ratio + class Processed: def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): @@ -309,6 +312,8 @@ class Processed: self.styles = p.styles self.job_timestamp = state.job_timestamp self.clip_skip = opts.CLIP_stop_at_last_layers + self.token_merging_ratio = p.token_merging_ratio + self.token_merging_ratio_hr = p.token_merging_ratio_hr self.eta = p.eta self.ddim_discretize = p.ddim_discretize @@ -367,6 +372,9 @@ class Processed: def infotext(self, p: StableDiffusionProcessing, index): return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) + def get_token_merging_ratio(self, for_hr=False): + return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio + # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 def slerp(val, low, high): @@ -480,6 +488,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) enable_hr = getattr(p, 'enable_hr', False) + token_merging_ratio = p.get_token_merging_ratio() + token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True) uses_ensd = opts.eta_noise_seed_delta != 0 if uses_ensd: @@ -502,8 +512,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": opts.eta_noise_seed_delta if uses_ensd else None, - "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, - "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, + "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio, + "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, @@ -536,17 +546,12 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging_ratio > 0: - sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) - logger.debug(f"Token merging applied to first pass. Ratio: '{opts.token_merging_ratio}'") + sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio()) res = process_images_inner(p) finally: - # undo model optimizations made by tomesd - if opts.token_merging_ratio > 0: - tomesd.remove_patch(p.sd_model) - logger.debug('Token merging model optimizations removed') + sd_models.apply_token_merging(p.sd_model, 0) # restore opts to original state if p.override_settings_restore_afterwards: @@ -996,21 +1001,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() - # apply token merging optimizations from tomesd for high-res pass - if opts.token_merging_ratio_hr > 0: - # in case the user has used separate merge ratios - if opts.token_merging_ratio > 0: - tomesd.remove_patch(self.sd_model) - logger.debug('Adjusting token merging ratio for high-res pass') - - sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) - logger.debug(f"Applied token merging for high-res pass. Ratio: '{opts.token_merging_ratio_hr}'") + sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True)) samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) - if opts.token_merging_ratio_hr > 0 or opts.token_merging_ratio > 0: - tomesd.remove_patch(self.sd_model) - logger.debug('Removed token merging optimizations from model') + sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.is_hr_pass = False @@ -1173,3 +1168,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): devices.torch_gc() return samples + + def get_token_merging_ratio(self, for_hr=False): + return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio -- cgit v1.2.3 From 44c37f94e176667ccdfeb74916e4640fa9dc586d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 18 May 2023 16:36:30 +0300 Subject: add messages about Loras that failed to load to UI --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 2b8dd361..7ee6da28 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -808,7 +808,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: images_list=output_images, seed=p.all_seeds[0], info=infotext(), - comments="".join(f"\n\n{comment}" for comment in comments), + comments="".join(f"{comment}\n" for comment in comments), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts, -- cgit v1.2.3 From ff0e17174f8d93a71fdd5a4a80a4629bbf97f822 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 18 May 2023 20:16:09 +0300 Subject: rework hires prompts/sampler code to among other things support different extra networks in first/second pass rework quoting for infotext items that have commas in them to use json (should be backwards compatible except for cases where it didn't work previously) add some locals from processing function into the Processing class as fields --- modules/processing.py | 261 ++++++++++++++++++++++++++++---------------------- 1 file changed, 149 insertions(+), 112 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index dd14c486..29a3743f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -169,6 +169,16 @@ class StableDiffusionProcessing: self.is_hr_pass = False self.sampler = None + self.prompts = None + self.negative_prompts = None + self.seeds = None + self.subseeds = None + + self.step_multiplier = 1 + self.cached_uc = [None, None] + self.cached_c = [None, None] + self.uc = None + self.c = None @property def sd_model(self): @@ -271,11 +281,15 @@ class StableDiffusionProcessing: def init(self, all_prompts, all_seeds, all_subseeds): pass - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts, hr_conditioning=None, hr_unconditional_conditioning=None): + def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): raise NotImplementedError() def close(self): self.sampler = None + self.c = None + self.uc = None + self.cached_c = [None, None] + self.cached_uc = [None, None] def get_token_merging_ratio(self, for_hr=False): if for_hr: @@ -283,6 +297,52 @@ class StableDiffusionProcessing: return self.token_merging_ratio or opts.token_merging_ratio + def setup_prompts(self): + if type(self.prompt) == list: + self.all_prompts = self.prompt + else: + self.all_prompts = self.batch_size * self.n_iter * [self.prompt] + + if type(self.negative_prompt) == list: + self.all_negative_prompts = self.negative_prompt + else: + self.all_negative_prompts = self.batch_size * self.n_iter * [self.negative_prompt] + + self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts] + self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts] + + def get_conds_with_caching(self, function, required_prompts, steps, cache): + """ + Returns the result of calling function(shared.sd_model, required_prompts, steps) + using a cache to store the result if the same arguments have been used before. + + cache is an array containing two elements. The first element is a tuple + representing the previously used arguments, or None if no arguments + have been used before. The second element is where the previously + computed result is stored. + """ + + if cache[0] is not None and (required_prompts, steps) == cache[0]: + return cache[1] + + with devices.autocast(): + cache[1] = function(shared.sd_model, required_prompts, steps) + + cache[0] = (required_prompts, steps) + return cache[1] + + def setup_conds(self): + sampler_config = sd_samplers.find_sampler_config(self.sampler_name) + self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 + + self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.negative_prompts, self.steps * self.step_multiplier, self.cached_uc) + self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.prompts, self.steps * self.step_multiplier, self.cached_c) + + def parse_extra_network_prompts(self): + self.prompts, extra_network_data = extra_networks.parse_prompts(self.prompts) + + return extra_network_data + class Processed: def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): @@ -582,29 +642,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: comments = {} - if type(p.prompt) == list: - p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.prompt] - else: - p.all_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles)] - - if type(p.negative_prompt) == list: - p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.negative_prompt] - else: - p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] - - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and p.hr_prompt == '': - p.all_hr_prompts, p.all_hr_negative_prompts = p.all_prompts, p.all_negative_prompts - elif p.enable_hr and p.hr_prompt != '': - if type(p.prompt) == list: - p.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] - else: - p.all_hr_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_styles_to_prompt(p.hr_prompt, p.styles)] - - if type(p.negative_prompt) == list: - p.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.hr_negative_prompt] - else: - p.all_hr_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.hr_negative_prompt, p.styles)] + p.setup_prompts() if type(seed) == list: p.all_seeds = seed @@ -628,29 +666,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] - cached_uc = [None, None] - cached_c = [None, None] - - def get_conds_with_caching(function, required_prompts, steps, cache): - """ - Returns the result of calling function(shared.sd_model, required_prompts, steps) - using a cache to store the result if the same arguments have been used before. - - cache is an array containing two elements. The first element is a tuple - representing the previously used arguments, or None if no arguments - have been used before. The second element is where the previously - computed result is stored. - """ - - if cache[0] is not None and (required_prompts, steps) == cache[0]: - return cache[1] - - with devices.autocast(): - cache[1] = function(shared.sd_model, required_prompts, steps) - - cache[0] = (required_prompts, steps) - return cache[1] - with torch.no_grad(), p.sd_model.ema_scope(): with devices.autocast(): p.init(p.all_prompts, p.all_seeds, p.all_subseeds) @@ -672,40 +687,25 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if state.interrupted: break - prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size] - negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] - - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr: - if p.hr_prompt == '': - hr_prompts, hr_negative_prompts = prompts, negative_prompts - else: - hr_prompts = p.all_hr_prompts[n * p.batch_size:(n + 1) * p.batch_size] - hr_negative_prompts = p.all_hr_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] - - seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] - subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] + p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size] + p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] + p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] + p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] if p.scripts is not None: - p.scripts.before_process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) + p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds) - if len(prompts) == 0: + if len(p.prompts) == 0: break - prompts, extra_network_data = extra_networks.parse_prompts(prompts) - - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr and hr_prompts != prompts: - _, hr_extra_network_data = extra_networks.parse_prompts(hr_prompts) - extra_network_data.update(hr_extra_network_data) - + extra_network_data = p.parse_extra_network_prompts() if not p.disable_extra_networks: with devices.autocast(): extra_networks.activate(p, extra_network_data) if p.scripts is not None: - p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) + p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds) # params.txt should be saved after scripts.process_batch, since the # infotext could be modified by that callback @@ -716,18 +716,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) - sampler_config = sd_samplers.find_sampler_config(p.sampler_name) - step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 - uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) - c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) - - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr: - if prompts != hr_prompts: - hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, p.steps, cached_uc) - hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, p.steps, cached_c) - else: - hr_uc, hr_c = uc, c + p.setup_conds() if len(model_hijack.comments) > 0: for comment in model_hijack.comments: @@ -736,15 +725,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" - with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): - if type(p) == StableDiffusionProcessingTxt2Img: - if p.enable_hr: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) - else: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) - else: - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) + samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))] for x in x_samples_ddim: @@ -771,7 +753,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.restore_faces: if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration: - images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration") + images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration") devices.torch_gc() @@ -788,13 +770,13 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.color_corrections is not None and i < len(p.color_corrections): if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images) - images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") + images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) image = apply_overlay(image, p.paste_to, i, p.overlay_images) if opts.samples_save and not p.do_not_save_samples: - images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p) + images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p) text = infotext(n, i) infotexts.append(text) @@ -807,10 +789,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') if opts.save_mask: - images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") + images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") if opts.save_mask_composite: - images.save_image(image_mask_composite, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite") + images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite") if opts.return_mask: output_images.append(image_mask) @@ -879,7 +861,7 @@ def old_hires_fix_first_pass_dimensions(width, height): class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler: str = '---', hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength @@ -890,9 +872,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_resize_y = hr_resize_y self.hr_upscale_to_x = hr_resize_x self.hr_upscale_to_y = hr_resize_y - self.hr_sampler = hr_sampler - self.hr_prompt = hr_prompt if hr_prompt != '' else '' - self.hr_negative_prompt = hr_negative_prompt if hr_negative_prompt != '' else '' + self.hr_sampler_name = hr_sampler_name + self.hr_prompt = hr_prompt + self.hr_negative_prompt = hr_negative_prompt self.all_hr_prompts = None self.all_hr_negative_prompts = None @@ -906,14 +888,23 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.truncate_y = 0 self.applied_old_hires_behavior_to = None + self.hr_prompts = None + self.hr_negative_prompts = None + self.hr_extra_network_data = None + + self.hr_c = None + self.hr_uc = None + def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: - if self.hr_sampler != '---': - self.extra_generation_params["Hires sampler"] = self.hr_sampler + if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: + self.extra_generation_params["Hires sampler"] = self.hr_sampler_name + + if tuple(self.hr_prompt) != tuple(self.prompt): + self.extra_generation_params["Hires prompt"] = self.hr_prompt - if self.hr_prompt != '': - self.extra_generation_params["Hires prompt"] = f'({self.hr_prompt.replace(",", ";")})' - self.extra_generation_params["Hires negative prompt"] = f'({self.hr_negative_prompt.replace(",", ";")})' + if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt): + self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): self.hr_resize_x = self.width @@ -975,7 +966,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_upscaler is not None: self.extra_generation_params["Hires upscaler"] = self.hr_upscaler - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts, hr_conditioning=None, hr_unconditional_conditioning=None): + def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") @@ -1044,16 +1035,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): shared.state.nextjob() - img2img_sampler_name = self.sampler_name + img2img_sampler_name = self.hr_sampler_name or self.sampler_name if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM img2img_sampler_name = 'DDIM' - if self.hr_sampler == '---': - pass - else: - img2img_sampler_name = self.hr_sampler - self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] @@ -1064,9 +1050,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() + if not self.disable_extra_networks: + with devices.autocast(): + extra_networks.activate(self, self.hr_extra_network_data) + sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True)) - samples = self.sampler.sample_img2img(self, samples, noise, hr_conditioning, hr_unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) @@ -1074,6 +1064,53 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples + def close(self): + self.hr_c = None + self.hr_uc = None + + def setup_prompts(self): + super().setup_prompts() + + if not self.enable_hr: + return + + if self.hr_prompt == '': + self.hr_prompt = self.prompt + + if self.hr_negative_prompt == '': + self.hr_negative_prompt = self.negative_prompt + + if type(self.hr_prompt) == list: + self.all_hr_prompts = self.hr_prompt + else: + self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt] + + if type(self.hr_negative_prompt) == list: + self.all_hr_negative_prompts = self.hr_negative_prompt + else: + self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt] + + self.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_hr_prompts] + self.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_hr_negative_prompts] + + def setup_conds(self): + super().setup_conds() + + if self.enable_hr: + self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, self.cached_uc) + self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, self.cached_c) + + def parse_extra_network_prompts(self): + res = super().parse_extra_network_prompts() + + if self.enable_hr: + self.hr_prompts = self.all_hr_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size] + self.hr_negative_prompts = self.all_hr_negative_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size] + + self.hr_prompts, self.hr_extra_network_data = extra_networks.parse_prompts(self.hr_prompts) + + return res + class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None -- cgit v1.2.3 From 339b5315700a469f4a9f0d5afc08ca2aca60c579 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 27 May 2023 15:47:33 +0300 Subject: custom unet support --- modules/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 29a3743f..b75f2515 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -13,7 +13,7 @@ from skimage import exposure from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -674,6 +674,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": sd_vae_approx.model() + sd_unet.apply_unet() + if state.job_count == -1: state.job_count = p.n_iter -- cgit v1.2.3 From 00dfe27f59727407c5b408a80ff2a262934df495 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 08:54:13 +0300 Subject: Add & use modules.errors.print_error where currently printing exception info by hand --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index b75f2515..5c9bcce8 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,4 +1,5 @@ import json +import logging import math import os import sys @@ -23,7 +24,6 @@ import modules.images as images import modules.styles import modules.sd_models as sd_models import modules.sd_vae as sd_vae -import logging from ldm.data.util import AddMiDaS from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion -- cgit v1.2.3 From 4a449375a20267035402eed5d93074c2f0a91bc8 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 30 May 2023 01:07:35 +0900 Subject: fix get_conds_with_caching() --- modules/processing.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index b75f2515..395c851f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -321,14 +321,13 @@ class StableDiffusionProcessing: have been used before. The second element is where the previously computed result is stored. """ - - if cache[0] is not None and (required_prompts, steps) == cache[0]: + if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]: return cache[1] with devices.autocast(): cache[1] = function(shared.sd_model, required_prompts, steps) - cache[0] = (required_prompts, steps) + cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) return cache[1] def setup_conds(self): -- cgit v1.2.3 From df02498d03e4296b7d7581aff69571a49be1d27a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 31 May 2023 22:40:09 +0300 Subject: add an option to show selected setting in main txt2img/img2img UI split some code from ui.py into ui_settings.py ui_gradio_edxtensions.py add before_process callback for scripts add ability for alwayson scripts to specify section and let user reorder those sections --- modules/processing.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index f628d88b..baa9b278 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -588,11 +588,15 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter def process_images(p: StableDiffusionProcessing) -> Processed: + if p.scripts is not None: + p.scripts.before_process(p) + stored_opts = {k: opts.data[k] for k in p.override_settings.keys()} try: # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint - if sd_models.checkpoint_alisases.get(p.override_settings.get('sd_model_checkpoint')) is None: + override_checkpoint = p.override_settings.get('sd_model_checkpoint') + if override_checkpoint is not None and sd_models.checkpoint_alisases.get(override_checkpoint) is None: p.override_settings.pop('sd_model_checkpoint', None) sd_models.reload_model_weights() -- cgit v1.2.3 From c63d46ceb8dd26d0ec3b27feedcffc3903358a0a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 31 May 2023 18:47:24 +0300 Subject: Merge pull request #10804 from AUTOMATIC1111/fix-xyz-clip Fix get_conds_with_caching() --- modules/processing.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 29a3743f..d22b353f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -321,14 +321,13 @@ class StableDiffusionProcessing: have been used before. The second element is where the previously computed result is stored. """ - - if cache[0] is not None and (required_prompts, steps) == cache[0]: + if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]: return cache[1] with devices.autocast(): cache[1] = function(shared.sd_model, required_prompts, steps) - cache[0] = (required_prompts, steps) + cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) return cache[1] def setup_conds(self): -- cgit v1.2.3 From a9674359ca4cd5ed8063a4dd23034450d80ba097 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 1 Jun 2023 19:52:04 +0300 Subject: revert the erroneous change for model setting added in df02498d --- modules/processing.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index baa9b278..362ab4c2 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -595,8 +595,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: try: # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint - override_checkpoint = p.override_settings.get('sd_model_checkpoint') - if override_checkpoint is not None and sd_models.checkpoint_alisases.get(override_checkpoint) is None: + if sd_models.checkpoint_alisases.get(p.override_settings.get('sd_model_checkpoint')) is None: p.override_settings.pop('sd_model_checkpoint', None) sd_models.reload_model_weights() -- cgit v1.2.3 From 51864790fd72386fbbbb015d24a43ce501ecaa4b Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Fri, 2 Jun 2023 14:58:10 +0300 Subject: Simplify a bunch of `len(x) > 0`/`len(x) == 0` style expressions --- modules/processing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 362ab4c2..9ebdb549 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -975,7 +975,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") if self.enable_hr and latent_scale_mode is None: - assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}" + if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): + raise Exception(f"could not find upscaler named {self.hr_upscaler}") x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) -- cgit v1.2.3 From f098e726d3e63aae8a6276ce83c55ac905c4379c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 4 Jun 2023 04:24:44 +0900 Subject: fix conds caching with extra network --- modules/processing.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 362ab4c2..22ddc256 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -171,6 +171,7 @@ class StableDiffusionProcessing: self.prompts = None self.negative_prompts = None + self.extra_network_data = None self.seeds = None self.subseeds = None @@ -311,7 +312,7 @@ class StableDiffusionProcessing: self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts] self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts] - def get_conds_with_caching(self, function, required_prompts, steps, cache): + def get_conds_with_caching(self, function, required_prompts, steps, cache, extra_network_data): """ Returns the result of calling function(shared.sd_model, required_prompts, steps) using a cache to store the result if the same arguments have been used before. @@ -321,21 +322,21 @@ class StableDiffusionProcessing: have been used before. The second element is where the previously computed result is stored. """ - if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]: + if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info, extra_network_data) == cache[0]: return cache[1] with devices.autocast(): cache[1] = function(shared.sd_model, required_prompts, steps) - cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) + cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info, extra_network_data) return cache[1] def setup_conds(self): sampler_config = sd_samplers.find_sampler_config(self.sampler_name) self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 - self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.negative_prompts, self.steps * self.step_multiplier, self.cached_uc) - self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.prompts, self.steps * self.step_multiplier, self.cached_c) + self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.negative_prompts, self.steps * self.step_multiplier, self.cached_uc, self.extra_network_data) + self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.prompts, self.steps * self.step_multiplier, self.cached_c, self.extra_network_data) def parse_extra_network_prompts(self): self.prompts, extra_network_data = extra_networks.parse_prompts(self.prompts) @@ -681,7 +682,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if state.job_count == -1: state.job_count = p.n_iter - extra_network_data = None for n in range(p.n_iter): p.iteration = n @@ -702,11 +702,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if len(p.prompts) == 0: break - extra_network_data = p.parse_extra_network_prompts() + p.extra_network_data = p.parse_extra_network_prompts() if not p.disable_extra_networks: with devices.autocast(): - extra_networks.activate(p, extra_network_data) + extra_networks.activate(p, p.extra_network_data) if p.scripts is not None: p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds) @@ -828,8 +828,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.grid_save: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) - if not p.disable_extra_networks and extra_network_data: - extra_networks.deactivate(p, extra_network_data) + if not p.disable_extra_networks and p.extra_network_data: + extra_networks.deactivate(p, p.extra_network_data) devices.torch_gc() @@ -1101,8 +1101,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): super().setup_conds() if self.enable_hr: - self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, self.cached_uc) - self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, self.cached_c) + self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, self.cached_uc, self.hr_extra_network_data) + self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, self.cached_c, self.hr_extra_network_data) def parse_extra_network_prompts(self): res = super().parse_extra_network_prompts() -- cgit v1.2.3 From 1c9d1b0ee06cffe0ba1b33a5795755cf39e5dde2 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 4 Jun 2023 05:19:34 +0900 Subject: simplify self.extra_network_data --- modules/processing.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 22ddc256..fae83788 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -339,9 +339,7 @@ class StableDiffusionProcessing: self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.prompts, self.steps * self.step_multiplier, self.cached_c, self.extra_network_data) def parse_extra_network_prompts(self): - self.prompts, extra_network_data = extra_networks.parse_prompts(self.prompts) - - return extra_network_data + self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts) class Processed: @@ -702,7 +700,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if len(p.prompts) == 0: break - p.extra_network_data = p.parse_extra_network_prompts() + p.parse_extra_network_prompts() if not p.disable_extra_networks: with devices.autocast(): -- cgit v1.2.3 From 1ca5e76f7b122ba35fc807350624a8d3fc25058a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 4 Jun 2023 13:07:22 +0300 Subject: fix for conds of second hires fox pass being calculated using first pass's networks, and add an option to revert to old behavior --- modules/processing.py | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index fae83788..b65c7beb 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -739,7 +739,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: del samples_ddim - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + if lowvram.is_enabled(shared.sd_model): lowvram.send_everything_to_cpu() devices.torch_gc() @@ -894,6 +894,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_negative_prompts = None self.hr_extra_network_data = None + self.cached_hr_uc = [None, None] + self.cached_hr_c = [None, None] self.hr_c = None self.hr_uc = None @@ -1056,6 +1058,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): with devices.autocast(): extra_networks.activate(self, self.hr_extra_network_data) + with devices.autocast(): + self.calculate_hr_conds() + sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True)) samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) @@ -1067,6 +1072,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples def close(self): + self.cached_hr_uc = [None, None] + self.cached_hr_c = [None, None] self.hr_c = None self.hr_uc = None @@ -1095,12 +1102,31 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_hr_prompts] self.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_hr_negative_prompts] + def calculate_hr_conds(self): + if self.hr_c is not None: + return + + self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, self.cached_hr_uc, self.hr_extra_network_data) + self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, self.cached_hr_c, self.hr_extra_network_data) + def setup_conds(self): super().setup_conds() + self.hr_uc = None + self.hr_c = None + if self.enable_hr: - self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, self.cached_uc, self.hr_extra_network_data) - self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, self.cached_c, self.hr_extra_network_data) + if shared.opts.hires_fix_use_firstpass_conds: + self.calculate_hr_conds() + + elif lowvram.is_enabled(shared.sd_model): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded + with devices.autocast(): + extra_networks.activate(self, self.hr_extra_network_data) + + self.calculate_hr_conds() + + with devices.autocast(): + extra_networks.activate(self, self.extra_network_data) def parse_extra_network_prompts(self): res = super().parse_extra_network_prompts() -- cgit v1.2.3 From fbf88343deff2f0d6c1c375ec858c094ed9fa260 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 4 Jun 2023 16:29:02 +0300 Subject: prevent calculating cons for second pass of hires fix when they are the same as for the first pass --- modules/processing.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index b65c7beb..230053e4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -312,7 +312,7 @@ class StableDiffusionProcessing: self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts] self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts] - def get_conds_with_caching(self, function, required_prompts, steps, cache, extra_network_data): + def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data): """ Returns the result of calling function(shared.sd_model, required_prompts, steps) using a cache to store the result if the same arguments have been used before. @@ -321,9 +321,15 @@ class StableDiffusionProcessing: representing the previously used arguments, or None if no arguments have been used before. The second element is where the previously computed result is stored. + + caches is a list with items described above. """ - if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info, extra_network_data) == cache[0]: - return cache[1] + + for cache in caches: + if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info, extra_network_data) == cache[0]: + return cache[1] + + cache = caches[0] with devices.autocast(): cache[1] = function(shared.sd_model, required_prompts, steps) @@ -335,8 +341,8 @@ class StableDiffusionProcessing: sampler_config = sd_samplers.find_sampler_config(self.sampler_name) self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 - self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.negative_prompts, self.steps * self.step_multiplier, self.cached_uc, self.extra_network_data) - self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.prompts, self.steps * self.step_multiplier, self.cached_c, self.extra_network_data) + self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data) + self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data) def parse_extra_network_prompts(self): self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts) @@ -1106,8 +1112,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_c is not None: return - self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, self.cached_hr_uc, self.hr_extra_network_data) - self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, self.cached_hr_c, self.hr_extra_network_data) + self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) + self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) def setup_conds(self): super().setup_conds() -- cgit v1.2.3 From 7f2214aa2b9a88bc1631f3f1d164a75a3583c851 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 8 Jun 2023 13:53:02 +0900 Subject: persistent conds cache Update shared.py --- modules/processing.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index f8225b83..1169bd7f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -106,6 +106,9 @@ class StableDiffusionProcessing: """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing """ + cached_uc = [None, None] + cached_c = [None, None] + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -176,8 +179,8 @@ class StableDiffusionProcessing: self.subseeds = None self.step_multiplier = 1 - self.cached_uc = [None, None] - self.cached_c = [None, None] + self.cached_uc = StableDiffusionProcessing.cached_uc + self.cached_c = StableDiffusionProcessing.cached_c self.uc = None self.c = None @@ -289,8 +292,9 @@ class StableDiffusionProcessing: self.sampler = None self.c = None self.uc = None - self.cached_c = [None, None] - self.cached_uc = [None, None] + if not opts.experimental_persistent_cond_cache: + StableDiffusionProcessing.cached_c = [None, None] + StableDiffusionProcessing.cached_uc = [None, None] def get_token_merging_ratio(self, for_hr=False): if for_hr: @@ -324,7 +328,6 @@ class StableDiffusionProcessing: caches is a list with items described above. """ - for cache in caches: if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info, extra_network_data) == cache[0]: return cache[1] @@ -340,7 +343,6 @@ class StableDiffusionProcessing: def setup_conds(self): sampler_config = sd_samplers.find_sampler_config(self.sampler_name) self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 - self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data) self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data) @@ -868,6 +870,8 @@ def old_hires_fix_first_pass_dimensions(width, height): class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None + cached_hr_uc = [None, None] + cached_hr_c = [None, None] def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): super().__init__(**kwargs) @@ -900,8 +904,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_negative_prompts = None self.hr_extra_network_data = None - self.cached_hr_uc = [None, None] - self.cached_hr_c = [None, None] + self.cached_hr_uc = StableDiffusionProcessingTxt2Img.cached_hr_uc + self.cached_hr_c = StableDiffusionProcessingTxt2Img.cached_hr_c self.hr_c = None self.hr_uc = None @@ -1079,10 +1083,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples def close(self): - self.cached_hr_uc = [None, None] - self.cached_hr_c = [None, None] + super().close() self.hr_c = None self.hr_uc = None + if not opts.experimental_persistent_cond_cache: + StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None] + StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None] def setup_prompts(self): super().setup_prompts() -- cgit v1.2.3 From 1503af60b0550d1a57c95a500df1fe1390178a27 Mon Sep 17 00:00:00 2001 From: Splendide Imaginarius <119545140+Splendide-Imaginarius@users.noreply.github.com> Date: Thu, 11 May 2023 22:03:44 +0000 Subject: Split mask blur into X and Y components Prequisite to fixing Outpainting MK2 mask blur bug. --- modules/processing.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index f8225b83..73ff10f4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1150,7 +1150,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None - def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): + def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = None, mask_blur_x: int = 4, mask_blur_y: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): super().__init__(**kwargs) self.init_images = init_images @@ -1161,7 +1161,11 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.image_mask = mask self.latent_mask = None self.mask_for_overlay = None - self.mask_blur = mask_blur + if mask_blur is not None: + mask_blur_x = mask_blur + mask_blur_y = mask_blur + self.mask_blur_x = mask_blur_x + self.mask_blur_y = mask_blur_y self.inpainting_fill = inpainting_fill self.inpaint_full_res = inpaint_full_res self.inpaint_full_res_padding = inpaint_full_res_padding @@ -1183,8 +1187,17 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.inpainting_mask_invert: image_mask = ImageOps.invert(image_mask) - if self.mask_blur > 0: - image_mask = image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur)) + if self.mask_blur_x > 0: + np_mask = np.array(image_mask) + kernel_size = 2 * int(4 * self.mask_blur_x + 0.5) + 1 + np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x) + image_mask = Image.fromarray(np_mask) + + if self.mask_blur_y > 0: + np_mask = np.array(image_mask) + kernel_size = 2 * int(4 * self.mask_blur_y + 0.5) + 1 + np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y) + image_mask = Image.fromarray(np_mask) if self.inpaint_full_res: self.mask_for_overlay = image_mask -- cgit v1.2.3 From cfdd1b941897eee994e92fc2149c0de7da6eed4a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 9 Jun 2023 22:47:27 +0300 Subject: linter --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index b700a718..8da73884 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -7,7 +7,7 @@ import hashlib import torch import numpy as np -from PIL import Image, ImageFilter, ImageOps +from PIL import Image, ImageOps import random import cv2 from skimage import exposure -- cgit v1.2.3 From d3c86e5178725b11a4679097f0aefb0a9fc90014 Mon Sep 17 00:00:00 2001 From: Jared Deckard Date: Wed, 14 Jun 2023 14:03:44 -0500 Subject: Note the Gradio user in the Exif data --- modules/processing.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index d22b353f..3e8d153e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -180,6 +180,8 @@ class StableDiffusionProcessing: self.uc = None self.c = None + self.user = None + @property def sd_model(self): return shared.sd_model @@ -578,6 +580,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, + "User": p.user, } generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) -- cgit v1.2.3 From f603275d84301b5ee952683e951dd1aad72ba615 Mon Sep 17 00:00:00 2001 From: Jared Deckard Date: Thu, 15 Jun 2023 10:55:53 -0500 Subject: Add an opt-in infotext user name setting --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 3e8d153e..a0cc8db2 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -580,7 +580,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, - "User": p.user, + "User": p.user if opts.add_user_name_to_info else None, } generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) -- cgit v1.2.3 From 27e9e3f6fa41c10eb5256662ddf3643dee933810 Mon Sep 17 00:00:00 2001 From: stablegeniusdiffuser Date: Mon, 19 Jun 2023 20:36:44 +0200 Subject: Add use_main_prompt parameter to use proper metadata for batch run grids or individual images --- modules/processing.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 8da73884..1d97e95e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -549,7 +549,7 @@ def program_version(): return res -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): +def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False): index = position_in_batch + iteration * p.batch_size clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) @@ -589,9 +589,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) + prompt_text = p.prompt if use_main_prompt else all_prompts[index] negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" - return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() + return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip() def process_images(p: StableDiffusionProcessing) -> Processed: @@ -663,8 +664,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))] - def infotext(iteration=0, position_in_batch=0): - return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch) + def infotext(iteration=0, position_in_batch=0, use_main_prompt=False): + return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch, use_main_prompt) if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() @@ -824,7 +825,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: grid = images.image_grid(output_images, p.batch_size) if opts.return_grid: - text = infotext() + text = infotext(use_main_prompt=True) infotexts.insert(0, text) if opts.enable_pnginfo: grid.info["parameters"] = text @@ -832,7 +833,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: index_of_first_image = 1 if opts.grid_save: - images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) + images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True) if not p.disable_extra_networks and p.extra_network_data: extra_networks.deactivate(p, p.extra_network_data) -- cgit v1.2.3 From b0ec69b360835a901a1aa57df1f7c8c9d55bf31c Mon Sep 17 00:00:00 2001 From: hako-mikan <122196982+hako-mikan@users.noreply.github.com> Date: Wed, 28 Jun 2023 18:37:08 +0900 Subject: add 'before_hr callback' script callback --- modules/processing.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 8da73884..35463c37 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1074,6 +1074,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True)) + if self.scripts is not None: + self.scripts.before_hr(self) + samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) -- cgit v1.2.3 From 7f46f81dd7b517e829395734750f0eb8360675d4 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 1 Jul 2023 17:20:56 -0600 Subject: Change default seed_resize to 0 --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 8da73884..9e838aad 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -109,7 +109,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = 0, seed_resize_from_w: int = 0, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) -- cgit v1.2.3 From f731a728c68035ee36317ed0096ac5ecbfd50553 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Mon, 3 Jul 2023 11:41:10 -0600 Subject: Check seed_resize_from <= 0 --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/processing.py') diff --git a/modules/processing.py b/modules/processing.py index 9e838aad..dc552121 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -109,7 +109,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = 0, seed_resize_from_w: int = 0, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -573,7 +573,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), - "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), + "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Denoising strength": getattr(p, 'denoising_strength', None), "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, -- cgit v1.2.3