diff options
author | d8ahazard <d8ahazard@gmail.com> | 2022-09-29 22:46:23 +0000 |
---|---|---|
committer | d8ahazard <d8ahazard@gmail.com> | 2022-09-29 22:46:23 +0000 |
commit | 0dce0df1ee63b2f158805c1a1f1a3743cc4a104b (patch) | |
tree | dfcec33656d06835e71961b117b63e510cb9bff2 /modules/extras.py | |
parent | 31ad536c331df14dd785bfd2a1f93f91a8f7839e (diff) | |
download | stable-diffusion-webui-gfx803-0dce0df1ee63b2f158805c1a1f1a3743cc4a104b.tar.gz stable-diffusion-webui-gfx803-0dce0df1ee63b2f158805c1a1f1a3743cc4a104b.tar.bz2 stable-diffusion-webui-gfx803-0dce0df1ee63b2f158805c1a1f1a3743cc4a104b.zip |
Holy $hit.
Yep.
Fix gfpgan_model_arch requirement(s).
Add Upscaler base class, move from images.
Add a lot of methods to Upscaler.
Re-work all the child upscalers to be proper classes.
Add BSRGAN scaler.
Add ldsr_model_arch class, removing the dependency for another repo that just uses regular latent-diffusion stuff.
Add one universal method that will always find and load new upscaler models without having to add new "setup_model" calls. Still need to add command line params, but that could probably be automated.
Add a "self.scale" property to all Upscalers so the scalers themselves can do "things" in response to the requested upscaling size.
Ensure LDSR doesn't get stuck in a longer loop of "upscale/downscale/upscale" as we try to reach the target upscale size.
Add typehints for IDE sanity.
PEP-8 improvements.
Moar.
Diffstat (limited to 'modules/extras.py')
-rw-r--r-- | modules/extras.py | 35 |
1 files changed, 17 insertions, 18 deletions
diff --git a/modules/extras.py b/modules/extras.py index af6e631f..d7d0fa54 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -66,29 +66,28 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v info += f"CodeFormer w: {round(codeformer_weight, 2)}, CodeFormer visibility:{round(codeformer_visibility, 2)}\n"
image = res
- if upscaling_resize != 1.0:
- def upscale(image, scaler_index, resize):
- small = image.crop((image.width // 2, image.height // 2, image.width // 2 + 10, image.height // 2 + 10))
- pixels = tuple(np.array(small).flatten().tolist())
- key = (resize, scaler_index, image.width, image.height, gfpgan_visibility, codeformer_visibility, codeformer_weight) + pixels
+ def upscale(image, scaler_index, resize):
+ small = image.crop((image.width // 2, image.height // 2, image.width // 2 + 10, image.height // 2 + 10))
+ pixels = tuple(np.array(small).flatten().tolist())
+ key = (resize, scaler_index, image.width, image.height, gfpgan_visibility, codeformer_visibility, codeformer_weight) + pixels
- c = cached_images.get(key)
- if c is None:
- upscaler = shared.sd_upscalers[scaler_index]
- c = upscaler.upscale(image, image.width * resize, image.height * resize)
- cached_images[key] = c
+ c = cached_images.get(key)
+ if c is None:
+ upscaler = shared.sd_upscalers[scaler_index]
+ c = upscaler.scaler.upscale(image, resize, upscaler.data_path)
+ cached_images[key] = c
- return c
+ return c
- info += f"Upscale: {round(upscaling_resize, 3)}, model:{shared.sd_upscalers[extras_upscaler_1].name}\n"
- res = upscale(image, extras_upscaler_1, upscaling_resize)
+ info += f"Upscale: {round(upscaling_resize, 3)}, model:{shared.sd_upscalers[extras_upscaler_1].name}\n"
+ res = upscale(image, extras_upscaler_1, upscaling_resize)
- if extras_upscaler_2 != 0 and extras_upscaler_2_visibility > 0:
- res2 = upscale(image, extras_upscaler_2, upscaling_resize)
- info += f"Upscale: {round(upscaling_resize, 3)}, visibility: {round(extras_upscaler_2_visibility, 3)}, model:{shared.sd_upscalers[extras_upscaler_2].name}\n"
- res = Image.blend(res, res2, extras_upscaler_2_visibility)
+ if extras_upscaler_2 != 0 and extras_upscaler_2_visibility > 0:
+ res2 = upscale(image, extras_upscaler_2, upscaling_resize)
+ info += f"Upscale: {round(upscaling_resize, 3)}, visibility: {round(extras_upscaler_2_visibility, 3)}, model:{shared.sd_upscalers[extras_upscaler_2].name}\n"
+ res = Image.blend(res, res2, extras_upscaler_2_visibility)
- image = res
+ image = res
while len(cached_images) > 2:
del cached_images[next(iter(cached_images.keys()))]
|