aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authord8ahazard <d8ahazard@gmail.com>2022-09-30 13:55:04 +0000
committerd8ahazard <d8ahazard@gmail.com>2022-09-30 13:55:04 +0000
commit8d60645106d7e2daa0da89c5b21d7ffdac61cf9e (patch)
treeedb87c4e2a86cb5b2a9abeedd0543391d1982443
parent64c6b13312ff3a20f48781c4c3780355c4b7b2af (diff)
downloadstable-diffusion-webui-gfx803-8d60645106d7e2daa0da89c5b21d7ffdac61cf9e.tar.gz
stable-diffusion-webui-gfx803-8d60645106d7e2daa0da89c5b21d7ffdac61cf9e.tar.bz2
stable-diffusion-webui-gfx803-8d60645106d7e2daa0da89c5b21d7ffdac61cf9e.zip
Fix model paths, ensure we have the right files.
Also, clean up logging in the ldsr arch file.
-rw-r--r--modules/ldsr_model.py9
-rw-r--r--modules/ldsr_model_arch.py3
2 files changed, 8 insertions, 4 deletions
diff --git a/modules/ldsr_model.py b/modules/ldsr_model.py
index 4d8687c2..7dff0a9c 100644
--- a/modules/ldsr_model.py
+++ b/modules/ldsr_model.py
@@ -24,13 +24,18 @@ class UpscalerLDSR(Upscaler):
def load_model(self, path: str):
# Remove incorrect project.yaml file if too big
yaml_path = os.path.join(self.model_path, "project.yaml")
+ old_model_path = os.path.join(self.model_path, "model.pth")
+ new_model_path = os.path.join(self.model_path, "model.ckpt")
if os.path.exists(yaml_path):
statinfo = os.stat(yaml_path)
- if statinfo.st_size <= 10485760:
+ if statinfo.st_size >= 10485760:
print("Removing invalid LDSR YAML file.")
os.remove(yaml_path)
+ if os.path.exists(old_model_path):
+ print("Renaming model from model.pth to model.ckpt")
+ os.rename(old_model_path, new_model_path)
model = load_file_from_url(url=self.model_url, model_dir=self.model_path,
- file_name="model.pth", progress=True)
+ file_name="model.ckpt", progress=True)
yaml = load_file_from_url(url=self.yaml_url, model_dir=self.model_path,
file_name="project.yaml", progress=True)
diff --git a/modules/ldsr_model_arch.py b/modules/ldsr_model_arch.py
index 7faac6e1..093a3210 100644
--- a/modules/ldsr_model_arch.py
+++ b/modules/ldsr_model_arch.py
@@ -100,7 +100,6 @@ class LDSR:
# If we can adjust the max upscale size, then the 4 below should be our variable
print("Foo")
down_sample_rate = target_scale / 4
- print(f"Downsample rate is {down_sample_rate}")
wd = width_og * down_sample_rate
hd = height_og * down_sample_rate
width_downsampled_pre = int(wd)
@@ -111,7 +110,7 @@ class LDSR:
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
else:
- print(f"Down sample rate is 1 from {target_scale} / 4")
+ print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
logs = self.run(model["model"], im_og, diffusion_steps, eta)
sample = logs["sample"]