diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-07-18 05:08:19 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-18 05:08:19 +0000 |
commit | 871b8687a82bb2ca907d8a49c87aed7635b8fc33 (patch) | |
tree | 1667c35ac0fde25ee60183de4b01a6c0581f5f73 /modules/sd_hijack_unet.py | |
parent | 20c41364ccba1319e68e6b4a58f53f110c5d4828 (diff) | |
parent | f0e2098f1a533c88396536282c1d6cd7d847a51c (diff) | |
download | stable-diffusion-webui-gfx803-871b8687a82bb2ca907d8a49c87aed7635b8fc33.tar.gz stable-diffusion-webui-gfx803-871b8687a82bb2ca907d8a49c87aed7635b8fc33.tar.bz2 stable-diffusion-webui-gfx803-871b8687a82bb2ca907d8a49c87aed7635b8fc33.zip |
Merge pull request #11846 from brkirch/sd-xl-upcast-sampling-fix
Add support for using `--upcast-sampling` with SD XL
Diffstat (limited to 'modules/sd_hijack_unet.py')
-rw-r--r-- | modules/sd_hijack_unet.py | 8 |
1 files changed, 7 insertions, 1 deletions
diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index ca1daf45..2101f1a0 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -39,7 +39,10 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs): if isinstance(cond, dict):
for y in cond.keys():
- cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ if isinstance(cond[y], list):
+ cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ else:
+ cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
with devices.autocast():
return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
@@ -77,3 +80,6 @@ first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devi CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
+
+CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast)
+CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
|