diff options
author | AUTOMATIC <16777216c@gmail.com> | 2022-10-04 09:32:22 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2022-10-04 09:32:22 +0000 |
commit | 6c6ae28bf5fd1e8bc3e8f64a3430b6f29f338f77 (patch) | |
tree | 5c221ca22288633f8814cc1f304590bf5f26b73c /modules/processing.py | |
parent | 556c36b9607e3f4eacdddc85f8e7a78b29476ea7 (diff) | |
download | stable-diffusion-webui-gfx803-6c6ae28bf5fd1e8bc3e8f64a3430b6f29f338f77.tar.gz stable-diffusion-webui-gfx803-6c6ae28bf5fd1e8bc3e8f64a3430b6f29f338f77.tar.bz2 stable-diffusion-webui-gfx803-6c6ae28bf5fd1e8bc3e8f64a3430b6f29f338f77.zip |
send all three of GFPGAN's and codeformer's models to CPU memory instead of just one for #1283
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/modules/processing.py b/modules/processing.py index 0a4b6198..9cbecdd8 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,4 +1,3 @@ -import contextlib
import json
import math
import os
@@ -330,9 +329,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: infotexts = []
output_images = []
- precision_scope = torch.autocast if cmd_opts.precision == "autocast" else contextlib.nullcontext
- ema_scope = (contextlib.nullcontext if cmd_opts.lowvram else p.sd_model.ema_scope)
- with torch.no_grad(), precision_scope("cuda"), ema_scope():
+
+ with torch.no_grad():
p.init(all_prompts, all_seeds, all_subseeds)
if state.job_count == -1:
@@ -351,8 +349,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: #uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt])
#c = p.sd_model.get_learned_conditioning(prompts)
- uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps)
- c = prompt_parser.get_learned_conditioning(prompts, p.steps)
+ with devices.autocast():
+ uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps)
+ c = prompt_parser.get_learned_conditioning(prompts, p.steps)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
@@ -361,7 +360,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
- samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
+ with devices.autocast():
+ samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength).to(devices.dtype)
+
if state.interrupted:
# if we are interruped, sample returns just noise
@@ -386,6 +387,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: devices.torch_gc()
x_sample = modules.face_restoration.restore_faces(x_sample)
+ devices.torch_gc()
image = Image.fromarray(x_sample)
|