diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-08-04 06:09:09 +0000 |
---|---|---|
committer | AUTOMATIC1111 <16777216c@gmail.com> | 2023-08-04 06:13:46 +0000 |
commit | f0c1063a707a4a43823b0ed00e2a8eeb22a9ed0a (patch) | |
tree | 34a1afac76c34a13adf346850e7e97f91254494f /modules/sd_samplers_common.py | |
parent | 09165916fa2b16d8f1d622ef1743e37565cc39f3 (diff) | |
download | stable-diffusion-webui-gfx803-f0c1063a707a4a43823b0ed00e2a8eeb22a9ed0a.tar.gz stable-diffusion-webui-gfx803-f0c1063a707a4a43823b0ed00e2a8eeb22a9ed0a.tar.bz2 stable-diffusion-webui-gfx803-f0c1063a707a4a43823b0ed00e2a8eeb22a9ed0a.zip |
resolve some of circular import issues for kohaku
Diffstat (limited to 'modules/sd_samplers_common.py')
-rw-r--r-- | modules/sd_samplers_common.py | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 5deda761..b3d344e7 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np
import torch
from PIL import Image
-from modules import devices, processing, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared
+from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared
from modules.shared import opts, state
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
@@ -35,7 +35,7 @@ def single_sample_to_image(sample, approximation=None): x_sample = sample * 1.5
x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
else:
- x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5
+ x_sample = decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5
x_sample = torch.clamp(x_sample, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
@@ -44,6 +44,12 @@ def single_sample_to_image(sample, approximation=None): return Image.fromarray(x_sample)
+def decode_first_stage(model, x):
+ x = model.decode_first_stage(x.to(devices.dtype_vae))
+
+ return x
+
+
def sample_to_image(samples, index=0, approximation=None):
return single_sample_to_image(samples[index], approximation)
|