diff options
author | Philpax <me@philpax.me> | 2022-12-25 09:17:49 +0000 |
---|---|---|
committer | Philpax <me@philpax.me> | 2022-12-25 09:17:49 +0000 |
commit | fa931733f6acc94e058a1d3d4655846e33ae34be (patch) | |
tree | 3e0db0a74631e5775199e7d01698a0e98de07975 /modules | |
parent | c6f347b81f584b6c0d44af7a209983284dbb52d2 (diff) | |
download | stable-diffusion-webui-gfx803-fa931733f6acc94e058a1d3d4655846e33ae34be.tar.gz stable-diffusion-webui-gfx803-fa931733f6acc94e058a1d3d4655846e33ae34be.tar.bz2 stable-diffusion-webui-gfx803-fa931733f6acc94e058a1d3d4655846e33ae34be.zip |
fix(api): assign sd_model after settings change
Diffstat (limited to 'modules')
-rw-r--r-- | modules/api/api.py | 2 | ||||
-rw-r--r-- | modules/processing.py | 6 |
2 files changed, 4 insertions, 4 deletions
diff --git a/modules/api/api.py b/modules/api/api.py index 1ceba75d..0a1a1905 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -121,7 +121,6 @@ class Api: def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): populate = txt2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True @@ -153,7 +152,6 @@ class Api: mask = decode_base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True, diff --git a/modules/processing.py b/modules/processing.py index 4a406084..0b270278 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -50,9 +50,9 @@ def apply_color_correction(correction, original_image): correction,
channel_axis=2
), cv2.COLOR_LAB2RGB).astype("uint8"))
-
+
image = blendLayers(image, original_image, BlendType.LUMINOSITY)
-
+
return image
@@ -466,6 +466,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_model_checkpoint': sd_models.reload_model_weights() # make onchange call for changing SD model
if k == 'sd_vae': sd_vae.reload_vae_weights() # make onchange call for changing VAE
+ # Assign sd_model here to ensure that it reflects the model after any changes
+ p.sd_model = shared.sd_model
res = process_images_inner(p)
finally:
|