aboutsummaryrefslogtreecommitdiffstats
path: root/modules/processing.py
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2022-09-13 18:49:58 +0000
committerAUTOMATIC <16777216c@gmail.com>2022-09-13 18:49:58 +0000
commit9d40212485febe05a662dd0346e6def83e456288 (patch)
treec56b55041ae4513ea5762cf07215f377175440d2 /modules/processing.py
parent85b97cc49c4766cb47306e71e552871a0791ea29 (diff)
downloadstable-diffusion-webui-gfx803-9d40212485febe05a662dd0346e6def83e456288.tar.gz
stable-diffusion-webui-gfx803-9d40212485febe05a662dd0346e6def83e456288.tar.bz2
stable-diffusion-webui-gfx803-9d40212485febe05a662dd0346e6def83e456288.zip
first attempt to produce crrect seeds in batch
Diffstat (limited to 'modules/processing.py')
-rw-r--r--modules/processing.py18
1 files changed, 16 insertions, 2 deletions
diff --git a/modules/processing.py b/modules/processing.py
index f33560ee..aab72903 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -119,8 +119,14 @@ def slerp(val, low, high):
return res
-def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0):
+def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
xs = []
+
+ if p is not None and p.sampler is not None and len(seeds) > 1:
+ sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
+ else:
+ sampler_noises = None
+
for i, seed in enumerate(seeds):
noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
@@ -155,9 +161,17 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
noise = x
+ if sampler_noises is not None:
+ cnt = p.sampler.number_of_needed_noises(p)
+ for j in range(cnt):
+ sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
xs.append(noise)
+
+ if sampler_noises is not None:
+ p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
+
x = torch.stack(xs).to(shared.device)
return x
@@ -254,7 +268,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
comments += model_hijack.comments
# we manually generate all input noises because each one should have a specific seed
- x = create_random_tensors([opt_C, p.height // opt_f, p.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w)
+ x = create_random_tensors([opt_C, p.height // opt_f, p.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"