aboutsummaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/codeformer_model.py40
-rw-r--r--modules/devices.py17
-rw-r--r--modules/extras.py4
-rw-r--r--modules/gfpgan_model.py15
-rw-r--r--modules/images.py18
-rw-r--r--modules/processing.py19
-rw-r--r--modules/shared.py3
-rw-r--r--modules/ui.py6
8 files changed, 81 insertions, 41 deletions
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index 946b4a30..6cd29c83 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -5,7 +5,7 @@ import traceback
import cv2
import torch
-from modules import shared
+from modules import shared, devices
from modules.paths import script_path
import modules.shared
import modules.face_restoration
@@ -47,23 +47,26 @@ def setup_codeformer():
def __init__(self):
self.net = None
self.face_helper = None
+ if shared.device.type == 'mps': # CodeFormer currently does not support mps backend
+ shared.device_codeformer = torch.device('cpu')
def create_models(self):
if self.net is not None and self.face_helper is not None:
+ self.net.to(shared.device)
return self.net, self.face_helper
- net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(shared.device)
+ net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(shared.device_codeformer)
ckpt_path = load_file_from_url(url=pretrain_model_url, model_dir=os.path.join(path, 'weights/CodeFormer'), progress=True)
checkpoint = torch.load(ckpt_path)['params_ema']
net.load_state_dict(checkpoint)
net.eval()
- face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=shared.device)
+ face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=shared.device_codeformer)
- if not cmd_opts.unload_gfpgan:
- self.net = net
- self.face_helper = face_helper
+ self.net = net
+ self.face_helper = face_helper
+ self.net.to(shared.device)
return net, face_helper
@@ -72,20 +75,20 @@ def setup_codeformer():
original_resolution = np_image.shape[0:2]
- net, face_helper = self.create_models()
- face_helper.clean_all()
- face_helper.read_image(np_image)
- face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
- face_helper.align_warp_face()
+ self.create_models()
+ self.face_helper.clean_all()
+ self.face_helper.read_image(np_image)
+ self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
+ self.face_helper.align_warp_face()
- for idx, cropped_face in enumerate(face_helper.cropped_faces):
+ for idx, cropped_face in enumerate(self.face_helper.cropped_faces):
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
- cropped_face_t = cropped_face_t.unsqueeze(0).to(shared.device)
+ cropped_face_t = cropped_face_t.unsqueeze(0).to(shared.device_codeformer)
try:
with torch.no_grad():
- output = net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
+ output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
del output
torch.cuda.empty_cache()
@@ -94,16 +97,19 @@ def setup_codeformer():
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
restored_face = restored_face.astype('uint8')
- face_helper.add_restored_face(restored_face)
+ self.face_helper.add_restored_face(restored_face)
- face_helper.get_inverse_affine(None)
+ self.face_helper.get_inverse_affine(None)
- restored_img = face_helper.paste_faces_to_input_image()
+ restored_img = self.face_helper.paste_faces_to_input_image()
restored_img = restored_img[:, :, ::-1]
if original_resolution != restored_img.shape[0:2]:
restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR)
+ if shared.opts.face_restoration_unload:
+ self.net.to(devices.cpu)
+
return restored_img
global have_codeformer
diff --git a/modules/devices.py b/modules/devices.py
index a93a245b..e4430e1a 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -31,3 +31,20 @@ def enable_tf32():
errors.run(enable_tf32, "Enabling TF32")
+
+
+device = get_optimal_device()
+device_codeformer = cpu if has_mps else device
+
+
+def randn(seed, shape):
+ # Pytorch currently doesn't handle setting randomness correctly when the metal backend is used.
+ if device.type == 'mps':
+ generator = torch.Generator(device=cpu)
+ generator.manual_seed(seed)
+ noise = torch.randn(shape, generator=generator, device=cpu).to(device)
+ return noise
+
+ torch.manual_seed(seed)
+ return torch.randn(shape, device=device)
+
diff --git a/modules/extras.py b/modules/extras.py
index 40935f98..596cd172 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -13,6 +13,8 @@ cached_images = {}
def run_extras(image, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
devices.torch_gc()
+ existing_pnginfo = image.info or {}
+
image = image.convert("RGB")
info = ""
@@ -65,7 +67,7 @@ def run_extras(image, gfpgan_visibility, codeformer_visibility, codeformer_weigh
while len(cached_images) > 2:
del cached_images[next(iter(cached_images.keys()))]
- images.save_image(image, outpath, "", None, info=info, extension=opts.samples_format, short_filename=True, no_prompt=True, pnginfo_section_name="extras")
+ images.save_image(image, outpath, "", None, info=info, extension=opts.samples_format, short_filename=True, no_prompt=True, pnginfo_section_name="extras", existing_info=existing_pnginfo)
return image, plaintext_to_html(info), ''
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index f697326c..0af97123 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -2,7 +2,7 @@ import os
import sys
import traceback
-from modules import shared
+from modules import shared, devices
from modules.shared import cmd_opts
from modules.paths import script_path
import modules.face_restoration
@@ -28,24 +28,29 @@ def gfpgan():
global loaded_gfpgan_model
if loaded_gfpgan_model is not None:
+ loaded_gfpgan_model.gfpgan.to(shared.device)
return loaded_gfpgan_model
if gfpgan_constructor is None:
return None
model = gfpgan_constructor(model_path=gfpgan_model_path(), upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
-
- if not cmd_opts.unload_gfpgan:
- loaded_gfpgan_model = model
+ model.gfpgan.to(shared.device)
+ loaded_gfpgan_model = model
return model
def gfpgan_fix_faces(np_image):
+ model = gfpgan()
+
np_image_bgr = np_image[:, :, ::-1]
- cropped_faces, restored_faces, gfpgan_output_bgr = gfpgan().enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True)
+ cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True)
np_image = gfpgan_output_bgr[:, :, ::-1]
+ if shared.opts.face_restoration_unload:
+ model.gfpgan.to(devices.cpu)
+
return np_image
diff --git a/modules/images.py b/modules/images.py
index 334f8fec..d742ed98 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -135,7 +135,12 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
fontsize = (width + height) // 25
line_spacing = fontsize // 2
- fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
+
+ try:
+ fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
+ except Exception:
+ fnt = ImageFont.truetype(Roboto, fontsize)
+
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
@@ -247,7 +252,7 @@ def sanitize_filename_part(text, replace_spaces=True):
return text.translate({ord(x): '' for x in invalid_filename_chars})[:128]
-def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, pnginfo_section_name='parameters', p=None):
+def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, pnginfo_section_name='parameters', p=None, existing_info=None):
# would be better to add this as an argument in future, but will do for now
is_a_grid = basename != ""
@@ -258,7 +263,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
else:
file_decoration = opts.samples_filename_format or "[seed]-[prompt_spaces]"
- file_decoration = "-" + file_decoration.lower()
+ if file_decoration != "":
+ file_decoration = "-" + file_decoration.lower()
+
if seed is not None:
file_decoration = file_decoration.replace("[seed]", str(seed))
if prompt is not None:
@@ -273,6 +280,11 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()
+
+ if existing_info is not None:
+ for k, v in existing_info.items():
+ pnginfo.add_text(k, v)
+
pnginfo.add_text(pnginfo_section_name, info)
else:
pnginfo = None
diff --git a/modules/processing.py b/modules/processing.py
index aaecb104..23b0c08f 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -65,6 +65,7 @@ class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed, info):
self.images = images_list
self.prompt = p.prompt
+ self.negative_prompt = p.negative_prompt
self.seed = seed
self.info = info
self.width = p.width
@@ -76,6 +77,7 @@ class Processed:
def js(self):
obj = {
"prompt": self.prompt if type(self.prompt) != list else self.prompt[0],
+ "negative_prompt": self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0],
"seed": int(self.seed if type(self.seed) != list else self.seed[0]),
"width": self.width,
"height": self.height,
@@ -104,15 +106,14 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
subnoise = None
if subseeds is not None:
subseed = 0 if i >= len(subseeds) else subseeds[i]
- torch.manual_seed(subseed)
- subnoise = torch.randn(noise_shape, device=shared.device)
+
+ subnoise = devices.randn(subseed, noise_shape)
# randn results depend on device; gpu and cpu get different results for same seed;
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
# but the original script had it like this, so I do not dare change it for now because
# it will break everyone's seeds.
- torch.manual_seed(seed)
- noise = torch.randn(noise_shape, device=shared.device)
+ noise = devices.randn(seed, noise_shape)
if subnoise is not None:
#noise = subnoise * subseed_strength + noise * (1 - subseed_strength)
@@ -120,12 +121,8 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
if noise_shape != shape:
#noise = torch.nn.functional.interpolate(noise.unsqueeze(1), size=shape[1:], mode="bilinear").squeeze()
- # noise_shape = (64, 80)
- # shape = (64, 72)
-
- torch.manual_seed(seed)
- x = torch.randn(shape, device=shared.device)
- dx = (shape[2] - noise_shape[2]) // 2 # -4
+ x = devices.randn(seed, shape)
+ dx = (shape[2] - noise_shape[2]) // 2
dy = (shape[1] - noise_shape[1]) // 2
w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
@@ -463,7 +460,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
if self.image_mask is not None:
init_mask = latent_mask
latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
- latmask = np.moveaxis(np.array(latmask, dtype=np.float64), 2, 0) / 255
+ latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
latmask = latmask[0]
latmask = np.around(latmask)
latmask = np.tile(latmask[None], (4, 1, 1))
diff --git a/modules/shared.py b/modules/shared.py
index afee573b..ea1c879b 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -30,7 +30,7 @@ parser.add_argument("--allow-code", action='store_true', help="allow custom scri
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="a workaround test; may help with speed if you use --lowvram")
-parser.add_argument("--unload-gfpgan", action='store_true', help="unload GFPGAN every time after processing images. Warning: seems to cause memory leaks")
+parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
@@ -133,6 +133,7 @@ class Options:
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
+ "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
diff --git a/modules/ui.py b/modules/ui.py
index 3a28bdab..535afaeb 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -93,7 +93,7 @@ def save_files(js_data, images):
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
- writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename"])
+ writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
@@ -108,7 +108,7 @@ def save_files(js_data, images):
filenames.append(filename)
- writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0]])
+ writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
@@ -384,8 +384,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'Loopback', 'SD upscale'], value='Redraw whole image', type="index", show_label=False)
init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil")
init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA")
- init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to another img2img mode above and back</small>", visible=False)
init_mask = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
+ init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to another img2img mode above and back</small>", visible=False)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")