diff options
author | AUTOMATIC <16777216c@gmail.com> | 2023-01-06 22:45:28 +0000 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2023-01-06 22:46:13 +0000 |
commit | 79e39fae6110c20a3ee6255e2841c877f65e8cbd (patch) | |
tree | 8211b701f49da2b970d653789b3b008ef69a4c38 /modules/ui.py | |
parent | 3246a2d6b898da6a98fe9df4dc67944635a41bd3 (diff) | |
download | stable-diffusion-webui-gfx803-79e39fae6110c20a3ee6255e2841c877f65e8cbd.tar.gz stable-diffusion-webui-gfx803-79e39fae6110c20a3ee6255e2841c877f65e8cbd.tar.bz2 stable-diffusion-webui-gfx803-79e39fae6110c20a3ee6255e2841c877f65e8cbd.zip |
CLIP hijack rework
Diffstat (limited to 'modules/ui.py')
-rw-r--r-- | modules/ui.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/modules/ui.py b/modules/ui.py index b79d24ee..5d2f5bad 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -368,7 +368,7 @@ def update_token_counter(text, steps): flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step, prompt_text in flat_prompts]
- tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
+ token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
|