diff options
author | papuSpartan <30642826+papuSpartan@users.noreply.github.com> | 2022-11-07 03:05:28 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-11-07 03:05:28 +0000 |
commit | 00ebc26c4e2962a31e067539d89cd695d999539a (patch) | |
tree | 4c2d46e00ffcc5b606f4841926b3a61fed903f00 /modules/ldsr_model_arch.py | |
parent | 86d35526a13a0e2432ab71d1d40b191615d3e343 (diff) | |
parent | 804d9fb83d0c63ca3acd36378707ce47b8f12599 (diff) | |
download | stable-diffusion-webui-gfx803-00ebc26c4e2962a31e067539d89cd695d999539a.tar.gz stable-diffusion-webui-gfx803-00ebc26c4e2962a31e067539d89cd695d999539a.tar.bz2 stable-diffusion-webui-gfx803-00ebc26c4e2962a31e067539d89cd695d999539a.zip |
Merge branch 'AUTOMATIC1111:master' into master
Diffstat (limited to 'modules/ldsr_model_arch.py')
-rw-r--r-- | modules/ldsr_model_arch.py | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/modules/ldsr_model_arch.py b/modules/ldsr_model_arch.py index 14db5076..90e0a2f0 100644 --- a/modules/ldsr_model_arch.py +++ b/modules/ldsr_model_arch.py @@ -101,8 +101,8 @@ class LDSR: down_sample_rate = target_scale / 4 wd = width_og * down_sample_rate hd = height_og * down_sample_rate - width_downsampled_pre = int(wd) - height_downsampled_pre = int(hd) + width_downsampled_pre = int(np.ceil(wd)) + height_downsampled_pre = int(np.ceil(hd)) if down_sample_rate != 1: print( @@ -110,7 +110,12 @@ class LDSR: im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS) else: print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)") - logs = self.run(model["model"], im_og, diffusion_steps, eta) + + # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts + pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size + im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge')) + + logs = self.run(model["model"], im_padded, diffusion_steps, eta) sample = logs["sample"] sample = sample.detach().cpu() @@ -120,6 +125,9 @@ class LDSR: sample = np.transpose(sample, (0, 2, 3, 1)) a = Image.fromarray(sample[0]) + # remove padding + a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4)) + del model gc.collect() torch.cuda.empty_cache() |