diff options
author | AUTOMATIC <16777216c@gmail.com> | 2022-10-08 11:25:47 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2022-10-08 11:25:47 +0000 |
commit | 4999eb2ef9b30e8c42ca7e4a94d4bbffe4d1f015 (patch) | |
tree | dbedb75c84751c4111e078b58f6f29ddababb009 /modules/processing.py | |
parent | 00117a07efbbe8482add12262a179326541467de (diff) | |
download | stable-diffusion-webui-gfx803-4999eb2ef9b30e8c42ca7e4a94d4bbffe4d1f015.tar.gz stable-diffusion-webui-gfx803-4999eb2ef9b30e8c42ca7e4a94d4bbffe4d1f015.tar.bz2 stable-diffusion-webui-gfx803-4999eb2ef9b30e8c42ca7e4a94d4bbffe4d1f015.zip |
do not let user choose his own prompt token count limit
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 5 |
1 files changed, 0 insertions, 5 deletions
diff --git a/modules/processing.py b/modules/processing.py index 3657fe69..d5162ddc 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -123,7 +123,6 @@ class Processed: self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
- self.max_prompt_tokens = opts.max_prompt_tokens
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
@@ -171,7 +170,6 @@ class Processed: "infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
- "max_prompt_tokens": self.max_prompt_tokens,
}
return json.dumps(obj)
@@ -269,8 +267,6 @@ def fix_seed(p): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
- max_tokens = getattr(p, 'max_prompt_tokens', opts.max_prompt_tokens)
-
generation_params = {
"Steps": p.steps,
"Sampler": sd_samplers.samplers[p.sampler_index].name,
@@ -286,7 +282,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
- "Max tokens": (None if max_tokens == shared.vanilla_max_prompt_tokens else max_tokens)
}
generation_params.update(p.extra_generation_params)
|