diff options
author | wangshuai09 <391746016@qq.com> | 2024-01-30 11:15:41 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-01-30 11:15:41 +0000 |
commit | 74ff85a1a1ee4cce432b1c7d33c1eda831f68d48 (patch) | |
tree | 99b70e0fef8422c8f603bf7faa1a393091cb2a8b /extensions-builtin/Lora/network_oft.py | |
parent | ec124607f47371a6cfd61a795f86a7f1cbd44651 (diff) | |
parent | ce168ab5dbc8b54b7245f352a2eaa55a37019b91 (diff) | |
download | stable-diffusion-webui-gfx803-74ff85a1a1ee4cce432b1c7d33c1eda831f68d48.tar.gz stable-diffusion-webui-gfx803-74ff85a1a1ee4cce432b1c7d33c1eda831f68d48.tar.bz2 stable-diffusion-webui-gfx803-74ff85a1a1ee4cce432b1c7d33c1eda831f68d48.zip |
Merge branch 'dev' into npu_support
Diffstat (limited to 'extensions-builtin/Lora/network_oft.py')
-rw-r--r-- | extensions-builtin/Lora/network_oft.py | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index fa647020..d1c46a4b 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -56,17 +56,17 @@ class NetworkModuleOFT(network.NetworkModule): self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) def calc_updown(self, orig_weight): - oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) - eye = torch.eye(self.block_size, device=self.oft_blocks.device) + oft_blocks = self.oft_blocks.to(orig_weight.device) + eye = torch.eye(self.block_size, device=oft_blocks.device) if self.is_kohya: block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix norm_Q = torch.norm(block_Q.flatten()) - new_norm_Q = torch.clamp(norm_Q, max=self.constraint) + new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse()) - R = oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype) + R = oft_blocks.to(orig_weight.device) # This errors out for MultiheadAttention, might need to be handled up-stream merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) @@ -77,6 +77,6 @@ class NetworkModuleOFT(network.NetworkModule): ) merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') - updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight + updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype) output_shape = orig_weight.shape return self.finalize_updown(updown, orig_weight, output_shape) |