diff options
author | catboxanon <122327233+catboxanon@users.noreply.github.com> | 2023-05-14 01:49:41 +0000 |
---|---|---|
committer | catboxanon <122327233+catboxanon@users.noreply.github.com> | 2023-05-14 01:49:41 +0000 |
commit | 3078001439d25b66ef5627c9e3d431aa23bbed73 (patch) | |
tree | 6e955fb65126d40c84693b801eaa9c29de4981e3 /modules | |
parent | e8eea1bb7a8454c5b2453695fff23d4afb88f2b1 (diff) | |
download | stable-diffusion-webui-gfx803-3078001439d25b66ef5627c9e3d431aa23bbed73.tar.gz stable-diffusion-webui-gfx803-3078001439d25b66ef5627c9e3d431aa23bbed73.tar.bz2 stable-diffusion-webui-gfx803-3078001439d25b66ef5627c9e3d431aa23bbed73.zip |
Add/modify CFG callbacks
Required by self-attn guidance extension
https://github.com/ashen-sensored/sd_webui_SAG
Diffstat (limited to 'modules')
-rw-r--r-- | modules/script_callbacks.py | 35 | ||||
-rw-r--r-- | modules/sd_samplers_kdiffusion.py | 8 |
2 files changed, 42 insertions, 1 deletions
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 7d9dd736..e83c6ecf 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -53,6 +53,21 @@ class CFGDenoiserParams: class CFGDenoisedParams:
+ def __init__(self, x, sampling_step, total_sampling_steps, inner_model):
+ self.x = x
+ """Latent image representation in the process of being denoised"""
+
+ self.sampling_step = sampling_step
+ """Current Sampling step number"""
+
+ self.total_sampling_steps = total_sampling_steps
+ """Total number of sampling steps planned"""
+
+ self.inner_model = inner_model
+ """Inner model reference that is being used for denoising"""
+
+
+class AfterCFGCallbackParams:
def __init__(self, x, sampling_step, total_sampling_steps):
self.x = x
"""Latent image representation in the process of being denoised"""
@@ -63,6 +78,9 @@ class CFGDenoisedParams: self.total_sampling_steps = total_sampling_steps
"""Total number of sampling steps planned"""
+ self.output_altered = False
+ """A flag for CFGDenoiser that indicates whether the output has been altered by the callback"""
+
class UiTrainTabParams:
def __init__(self, txt2img_preview_params):
@@ -87,6 +105,7 @@ callback_map = dict( callbacks_image_saved=[],
callbacks_cfg_denoiser=[],
callbacks_cfg_denoised=[],
+ callbacks_cfg_after_cfg=[],
callbacks_before_component=[],
callbacks_after_component=[],
callbacks_image_grid=[],
@@ -186,6 +205,14 @@ def cfg_denoised_callback(params: CFGDenoisedParams): report_exception(c, 'cfg_denoised_callback')
+def cfg_after_cfg_callback(params: AfterCFGCallbackParams):
+ for c in callback_map['callbacks_cfg_after_cfg']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'cfg_after_cfg_callback')
+
+
def before_component_callback(component, **kwargs):
for c in callback_map['callbacks_before_component']:
try:
@@ -332,6 +359,14 @@ def on_cfg_denoised(callback): add_callback(callback_map['callbacks_cfg_denoised'], callback)
+def on_cfg_after_cfg(callback):
+ """register a function to be called in the kdiffussion cfg_denoiser method after cfg calculations has completed.
+ The callback is called with one argument:
+ - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details.
+ """
+ add_callback(callback_map['callbacks_cfg_after_cfg'], callback)
+
+
def on_before_component(callback):
"""register a function to be called before a component is created.
The callback is called with arguments:
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9e41818..55f0d3a3 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -8,6 +8,7 @@ from modules.shared import opts, state import modules.shared as shared
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
+from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}),
@@ -160,7 +161,7 @@ class CFGDenoiser(torch.nn.Module): fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes])
x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be
- denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps)
+ denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model)
cfg_denoised_callback(denoised_params)
devices.test_for_nans(x_out, "unet")
@@ -180,6 +181,11 @@ class CFGDenoiser(torch.nn.Module): if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
+ after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps)
+ cfg_after_cfg_callback(after_cfg_callback_params)
+ if after_cfg_callback_params.output_altered:
+ denoised = after_cfg_callback_params.x
+
self.step += 1
return denoised
|