diff options
author | brkirch <brkirch@users.noreply.github.com> | 2023-01-07 12:04:59 +0000 |
---|---|---|
committer | brkirch <brkirch@users.noreply.github.com> | 2023-01-07 12:04:59 +0000 |
commit | df3b31eb559ab9fabf7e513bdeddd5282c16f124 (patch) | |
tree | a58f630c89c6ff95dab50fa0adeac22bfeb6fe32 /modules | |
parent | 151233399c4b79934bdbb7c12a97eeb6499572fb (diff) | |
download | stable-diffusion-webui-gfx803-df3b31eb559ab9fabf7e513bdeddd5282c16f124.tar.gz stable-diffusion-webui-gfx803-df3b31eb559ab9fabf7e513bdeddd5282c16f124.tar.bz2 stable-diffusion-webui-gfx803-df3b31eb559ab9fabf7e513bdeddd5282c16f124.zip |
In-place operations can break gradient calculation
Diffstat (limited to 'modules')
-rw-r--r-- | modules/sd_hijack_clip.py | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 5520c9b2..852afc66 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -247,9 +247,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers = torch.asarray(batch_multipliers).to(devices.device)
original_mean = z.mean()
- z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
+ z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
- z *= original_mean / new_mean
+ z = z * (original_mean / new_mean)
return z
|