From 315bd7c9e8a20a28fa7fd1ddd5fddbf3b5a9b41c Mon Sep 17 00:00:00 2001 From: Keith Dreibelbis Date: Tue, 1 Nov 2022 19:45:35 -0700 Subject: prompts_from_file: allow random seeds to be preserved for the list of prompts --- scripts/prompts_from_file.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'scripts/prompts_from_file.py') diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 1be22960..8d4911ae 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -96,6 +96,7 @@ class Script(scripts.Script): def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False) + checkbox_iterate_batch = gr.Checkbox(label="Preserve random seed across lines (for use with \"Generate Forever\")", value=False) prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1) file = gr.File(label="Upload prompt inputs", type='bytes') @@ -106,9 +107,9 @@ class Script(scripts.Script): # We don't shrink back to 1, because that causes the control to ignore [enter], and it may # be unclear to the user that shift-enter is needed. prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt]) - return [checkbox_iterate, file, prompt_txt] + return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt] - def run(self, p, checkbox_iterate, file, prompt_txt: str): + def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str): lines = [x.strip() for x in prompt_txt.splitlines()] lines = [x for x in lines if len(x) > 0] @@ -137,7 +138,7 @@ class Script(scripts.Script): jobs.append(args) print(f"Will process {len(lines)} lines in {job_count} jobs.") - if (checkbox_iterate and p.seed == -1): + if ((checkbox_iterate or checkbox_iterate_batch) and p.seed == -1): p.seed = int(random.randrange(4294967294)) state.job_count = job_count -- cgit v1.2.3 From 55688c48806f9383f3a56f6b9a0ab8fbf205edd2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 2 Nov 2022 07:02:45 +0300 Subject: rename the seed option from #4146 --- scripts/prompts_from_file.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'scripts/prompts_from_file.py') diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 8d4911ae..d187cd9c 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -96,7 +96,7 @@ class Script(scripts.Script): def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False) - checkbox_iterate_batch = gr.Checkbox(label="Preserve random seed across lines (for use with \"Generate Forever\")", value=False) + checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False) prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1) file = gr.File(label="Upload prompt inputs", type='bytes') @@ -138,7 +138,7 @@ class Script(scripts.Script): jobs.append(args) print(f"Will process {len(lines)} lines in {job_count} jobs.") - if ((checkbox_iterate or checkbox_iterate_batch) and p.seed == -1): + if (checkbox_iterate or checkbox_iterate_batch) and p.seed == -1: p.seed = int(random.randrange(4294967294)) state.job_count = job_count @@ -154,7 +154,7 @@ class Script(scripts.Script): proc = process_images(copy_p) images += proc.images - if (checkbox_iterate): + if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) -- cgit v1.2.3 From 4dd898b8c15e342f817d3fb1c8dc9f2d5d111022 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 4 Nov 2022 08:38:11 +0300 Subject: do not mess with components' visibility for scripts; instead create group components and show/hide those; this will break scripts that create invisible components and rely on UI but the earlier i make this change the better --- modules/scripts.py | 34 ++++++++++++++++++---------------- scripts/custom_code.py | 2 +- scripts/outpainting_mk_2.py | 2 +- scripts/poor_mans_outpainting.py | 4 ++-- scripts/prompts_from_file.py | 10 +++++----- scripts/sd_upscale.py | 4 ++-- scripts/xy_grid.py | 8 ++++---- 7 files changed, 33 insertions(+), 31 deletions(-) (limited to 'scripts/prompts_from_file.py') diff --git a/modules/scripts.py b/modules/scripts.py index 533db45c..28ce07f4 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -18,6 +18,9 @@ class Script: args_to = None alwayson = False + """A gr.Group component that has all script's UI inside it""" + group = None + infotext_fields = None """if set in ui(), this is a list of pairs of gradio component + text; the text will be used when parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example @@ -218,8 +221,6 @@ class ScriptRunner: for control in controls: control.custom_script_source = os.path.basename(script.filename) - if not script.alwayson: - control.visible = False if script.infotext_fields is not None: self.infotext_fields += script.infotext_fields @@ -229,40 +230,41 @@ class ScriptRunner: script.args_to = len(inputs) for script in self.alwayson_scripts: - with gr.Group(): + with gr.Group() as group: create_script_ui(script, inputs, inputs_alwayson) + script.group = group + dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index") dropdown.save_to_config = True inputs[0] = dropdown for script in self.selectable_scripts: - create_script_ui(script, inputs, inputs_alwayson) + with gr.Group(visible=False) as group: + create_script_ui(script, inputs, inputs_alwayson) + + script.group = group def select_script(script_index): - if 0 < script_index <= len(self.selectable_scripts): - script = self.selectable_scripts[script_index-1] - args_from = script.args_from - args_to = script.args_to - else: - args_from = 0 - args_to = 0 + selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None - return [ui.gr_show(True if i == 0 else args_from <= i < args_to or is_alwayson) for i, is_alwayson in enumerate(inputs_alwayson)] + return [gr.update(visible=selected_script == s) for s in self.selectable_scripts] def init_field(title): + """called when an initial value is set from ui-config.json to show script's UI components""" + if title == 'None': return + script_index = self.titles.index(title) - script = self.selectable_scripts[script_index] - for i in range(script.args_from, script.args_to): - inputs[i].visible = True + self.selectable_scripts[script_index].group.visible = True dropdown.init_field = init_field + dropdown.change( fn=select_script, inputs=[dropdown], - outputs=inputs + outputs=[script.group for script in self.selectable_scripts] ) return inputs diff --git a/scripts/custom_code.py b/scripts/custom_code.py index a9b10c09..22e7b77a 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -14,7 +14,7 @@ class Script(scripts.Script): return cmd_opts.allow_code def ui(self, is_img2img): - code = gr.Textbox(label="Python code", visible=False, lines=1) + code = gr.Textbox(label="Python code", lines=1) return [code] diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 2afd4aa5..cf71cb92 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -132,7 +132,7 @@ class Script(scripts.Script): info = gr.HTML("

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

") pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8) direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0) color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05) diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index b0469110..ea45beb0 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -22,8 +22,8 @@ class Script(scripts.Script): return None pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False) - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4) + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index") direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) return [pixels, mask_blur, inpainting_fill, direction] diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index d187cd9c..3388bc77 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -83,13 +83,14 @@ def cmdargs(line): def load_prompt_file(file): - if (file is None): + if file is None: lines = [] else: lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")] return None, "\n".join(lines), gr.update(lines=7) + class Script(scripts.Script): def title(self): return "Prompts from file or textbox" @@ -107,9 +108,9 @@ class Script(scripts.Script): # We don't shrink back to 1, because that causes the control to ignore [enter], and it may # be unclear to the user that shift-enter is needed. prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt]) - return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt] + return [checkbox_iterate, checkbox_iterate_batch, prompt_txt] - def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str): + def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str): lines = [x.strip() for x in prompt_txt.splitlines()] lines = [x for x in lines if len(x) > 0] @@ -157,5 +158,4 @@ class Script(scripts.Script): if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) - - return Processed(p, images, p.seed, "") \ No newline at end of file + return Processed(p, images, p.seed, "") diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index cb37ff7e..01074291 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -18,8 +18,8 @@ class Script(scripts.Script): def ui(self, is_img2img): info = gr.HTML("

Will upscale the image to twice the dimensions; use width and height sliders to set tile size

") - overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False) - upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False) + overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64) + upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") return [info, overlap, upscaler_index] diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index f5255786..417ed0d4 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -263,12 +263,12 @@ class Script(scripts.Script): current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img] with gr.Row(): - x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type") - x_values = gr.Textbox(label="X values", visible=False, lines=1) + x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type") + x_values = gr.Textbox(label="X values", lines=1) with gr.Row(): - y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, visible=False, type="index", elem_id="y_type") - y_values = gr.Textbox(label="Y values", visible=False, lines=1) + y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type") + y_values = gr.Textbox(label="Y values", lines=1) draw_legend = gr.Checkbox(label='Draw legend', value=True) include_lone_images = gr.Checkbox(label='Include Separate Images', value=False) -- cgit v1.2.3 From 81f2575df91a50e4aa9ca816e02e3f77342eedc8 Mon Sep 17 00:00:00 2001 From: Liam Date: Wed, 9 Nov 2022 15:24:31 -0500 Subject: updating the displayed generation info when user clicks images in the gallery. feature request 4415 --- javascript/ui.js | 10 +++++++++- modules/ui.py | 20 ++++++++++++++++++++ scripts/prompt_matrix.py | 2 ++ scripts/prompts_from_file.py | 6 +++++- 4 files changed, 36 insertions(+), 2 deletions(-) (limited to 'scripts/prompts_from_file.py') diff --git a/javascript/ui.js b/javascript/ui.js index 95cfd106..443d1642 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -179,9 +179,17 @@ onUiUpdate(function(){ img2img_textarea = gradioApp().querySelector("#img2img_prompt > label > textarea"); img2img_textarea?.addEventListener("input", () => update_token_counter("img2img_token_button")); } + if (!txt2img_gallery) { + txt2img_gallery = gradioApp().querySelector('#txt2img_gallery') + txt2img_gallery?.addEventListener('click', () => gradioApp().getElementById("txt2img_generation_info_button").click()); + } + if (!img2img_gallery) { + img2img_gallery = gradioApp().querySelector('#img2img_gallery') + img2img_gallery?.addEventListener('click', () => gradioApp().getElementById("img2img_generation_info_button").click()); + } }) -let txt2img_textarea, img2img_textarea = undefined; +let txt2img_textarea, img2img_textarea, txt2img_gallery, img2img_gallery = undefined; let wait_time = 800 let token_timeout; diff --git a/modules/ui.py b/modules/ui.py index 7ea1177f..756499d1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -566,6 +566,17 @@ def apply_setting(key, value): return value +def update_generation_info(args): + generation_info, html_info, img_index = args + try: + generation_info = json.loads(generation_info) + return plaintext_to_html(generation_info["infotexts"][img_index]) + except Exception: + pass + # if the json parse or anything else fails, just return the old html_info + return html_info + + def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id): def refresh(): refresh_method() @@ -638,6 +649,15 @@ Requested path was: {f} with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) + if tabname == 'txt2img' or tabname == 'img2img': + generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button") + generation_info_button.click( + fn=update_generation_info, + _js="(x, y) => [x, y, selected_gallery_index()]", + inputs=[generation_info, html_info], + outputs=[html_info], + preprocess=False + ) save.click( fn=wrap_gradio_call(save_files), diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index e49c9b20..4d1e152d 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -80,6 +80,8 @@ class Script(scripts.Script): grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts) processed.images.insert(0, grid) + processed.index_of_first_image = 1 + processed.infotexts.insert(0, processed.infotexts[0]) if opts.grid_save: images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", prompt=original_prompt, seed=processed.seed, grid=True, p=p) diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 3388bc77..32fe6bdb 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -145,6 +145,8 @@ class Script(scripts.Script): state.job_count = job_count images = [] + all_prompts = [] + infotexts = [] for n, args in enumerate(jobs): state.job = f"{state.job_no + 1} out of {state.job_count}" @@ -157,5 +159,7 @@ class Script(scripts.Script): if checkbox_iterate: p.seed = p.seed + (p.batch_size * p.n_iter) + all_prompts += proc.all_prompts + infotexts += proc.infotexts - return Processed(p, images, p.seed, "") + return Processed(p, images, p.seed, "", all_prompts=all_prompts, infotexts=infotexts) -- cgit v1.2.3 From 27c0504bc4d17eec6e58148ab33c75f5ed2e6f00 Mon Sep 17 00:00:00 2001 From: David Vorick Date: Tue, 13 Dec 2022 12:03:16 -0500 Subject: add support for prompts, negative prompts, and sampler-by-name in text file script --- scripts/prompts_from_file.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'scripts/prompts_from_file.py') diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 32fe6bdb..6e118ddb 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -9,6 +9,7 @@ import shlex import modules.scripts as scripts import gradio as gr +from modules import sd_samplers from modules.processing import Processed, process_images from PIL import Image from modules.shared import opts, cmd_opts, state @@ -44,6 +45,7 @@ prompt_tags = { "seed_resize_from_h": process_int_tag, "seed_resize_from_w": process_int_tag, "sampler_index": process_int_tag, + "sampler_name": process_string_tag, "batch_size": process_int_tag, "n_iter": process_int_tag, "steps": process_int_tag, @@ -66,14 +68,28 @@ def cmdargs(line): arg = args[pos] assert arg.startswith("--"), f'must start with "--": {arg}' + assert pos+1 < len(args), f'missing argument for command line option {arg}' + tag = arg[2:] + if tag == "prompt" or tag == "negative_prompt": + pos += 1 + prompt = args[pos] + pos += 1 + while pos < len(args) and not args[pos].startswith("--"): + prompt += " " + prompt += args[pos] + pos += 1 + res[tag] = prompt + continue + + func = prompt_tags.get(tag, None) assert func, f'unknown commandline option: {arg}' - assert pos+1 < len(args), f'missing argument for command line option {arg}' - val = args[pos+1] + if tag == "sampler_name": + val = sd_samplers.samplers_map.get(val.lower(), None) res[tag] = func(val) -- cgit v1.2.3 From 3bf5591efe9a9f219c6088be322a87adc4f48f95 Mon Sep 17 00:00:00 2001 From: Yuval Aboulafia Date: Sat, 24 Dec 2022 21:35:29 +0200 Subject: fix F541 f-string without any placeholders --- extensions-builtin/LDSR/ldsr_model_arch.py | 2 +- modules/codeformer/vqgan_arch.py | 4 ++-- modules/hypernetworks/hypernetwork.py | 4 ++-- modules/images.py | 2 +- modules/interrogate.py | 2 +- modules/safe.py | 8 ++++---- modules/sd_models.py | 8 ++++---- modules/sd_vae.py | 2 +- modules/textual_inversion/textual_inversion.py | 2 +- scripts/prompts_from_file.py | 2 +- 10 files changed, 18 insertions(+), 18 deletions(-) (limited to 'scripts/prompts_from_file.py') diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index f5bd8ae4..0ad49f4e 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -26,7 +26,7 @@ class LDSR: global cached_ldsr_model if shared.opts.ldsr_cached and cached_ldsr_model is not None: - print(f"Loading model from cache") + print("Loading model from cache") model: torch.nn.Module = cached_ldsr_model else: print(f"Loading model from {self.modelPath}") diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index c06c590c..e7293683 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -382,7 +382,7 @@ class VQAutoEncoder(nn.Module): self.load_state_dict(torch.load(model_path, map_location='cpu')['params']) logger.info(f'vqgan is loaded from: {model_path} [params]') else: - raise ValueError(f'Wrong params!') + raise ValueError('Wrong params!') def forward(self, x): @@ -431,7 +431,7 @@ class VQGANDiscriminator(nn.Module): elif 'params' in chkpt: self.load_state_dict(torch.load(model_path, map_location='cpu')['params']) else: - raise ValueError(f'Wrong params!') + raise ValueError('Wrong params!') def forward(self, x): return self.main(x) \ No newline at end of file diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index c406ffb3..9d3034ae 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -277,7 +277,7 @@ def load_hypernetwork(filename): print(traceback.format_exc(), file=sys.stderr) else: if shared.loaded_hypernetwork is not None: - print(f"Unloading hypernetwork") + print("Unloading hypernetwork") shared.loaded_hypernetwork = None @@ -417,7 +417,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, initial_step = hypernetwork.step or 0 if initial_step >= steps: - shared.state.textinfo = f"Model has already been trained beyond specified max steps" + shared.state.textinfo = "Model has already been trained beyond specified max steps" return hypernetwork, filename scheduler = LearnRateScheduler(learn_rate, steps, initial_step) diff --git a/modules/images.py b/modules/images.py index 809ad9f7..31d4528d 100644 --- a/modules/images.py +++ b/modules/images.py @@ -599,7 +599,7 @@ def read_info_from_image(image): Negative prompt: {json_info["uc"]} Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337""" except Exception: - print(f"Error parsing NovelAI image generation parameters:", file=sys.stderr) + print("Error parsing NovelAI image generation parameters:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) return geninfo, items diff --git a/modules/interrogate.py b/modules/interrogate.py index 0068b81c..46935210 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -172,7 +172,7 @@ class InterrogateModels: res += ", " + match except Exception: - print(f"Error interrogating", file=sys.stderr) + print("Error interrogating", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) res += "" diff --git a/modules/safe.py b/modules/safe.py index 479c8b86..1d4c20b9 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -137,15 +137,15 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs): except pickle.UnpicklingError: print(f"Error verifying pickled file from {filename}:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - print(f"-----> !!!! The file is most likely corrupted !!!! <-----", file=sys.stderr) - print(f"You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", file=sys.stderr) + print("-----> !!!! The file is most likely corrupted !!!! <-----", file=sys.stderr) + print("You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", file=sys.stderr) return None except Exception: print(f"Error verifying pickled file from {filename}:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - print(f"\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr) - print(f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr) + print("\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr) + print("You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr) return None return unsafe_torch_load(filename, *args, **kwargs) diff --git a/modules/sd_models.py b/modules/sd_models.py index 6ca06211..ecdd91c5 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -117,13 +117,13 @@ def select_checkpoint(): return checkpoint_info if len(checkpoints_list) == 0: - print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr) + print("No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr) if shared.cmd_opts.ckpt is not None: print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr) print(f" - directory {model_path}", file=sys.stderr) if shared.cmd_opts.ckpt_dir is not None: print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr) - print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr) + print("Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr) exit(1) checkpoint_info = next(iter(checkpoints_list.values())) @@ -324,7 +324,7 @@ def load_model(checkpoint_info=None): script_callbacks.model_loaded_callback(sd_model) - print(f"Model loaded.") + print("Model loaded.") return sd_model @@ -359,5 +359,5 @@ def reload_model_weights(sd_model=None, info=None): if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: sd_model.to(devices.device) - print(f"Weights loaded.") + print("Weights loaded.") return sd_model diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 25638a83..3856418e 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -208,5 +208,5 @@ def reload_vae_weights(sd_model=None, vae_file="auto"): if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: sd_model.to(devices.device) - print(f"VAE Weights loaded.") + print("VAE Weights loaded.") return sd_model diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index daf3997b..f6112578 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -263,7 +263,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ initial_step = embedding.step or 0 if initial_step >= steps: - shared.state.textinfo = f"Model has already been trained beyond specified max steps" + shared.state.textinfo = "Model has already been trained beyond specified max steps" return embedding, filename scheduler = LearnRateScheduler(learn_rate, steps, initial_step) diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 6e118ddb..e8386ed2 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -140,7 +140,7 @@ class Script(scripts.Script): try: args = cmdargs(line) except Exception: - print(f"Error parsing line [line] as commandline:", file=sys.stderr) + print(f"Error parsing line {line} as commandline:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) args = {"prompt": line} else: -- cgit v1.2.3