aboutsummaryrefslogtreecommitdiffstats
path: root/modules/prompt_parser.py
diff options
context:
space:
mode:
authorDepFA <35278260+dfaker@users.noreply.github.com>2022-10-05 21:11:30 +0000
committerGitHub <noreply@github.com>2022-10-05 21:11:30 +0000
commit34c358d10d52817f7a889ae4c52096ee654f3fe6 (patch)
treecc9db251da66f6147b903ac5cd2cda44933ce3b5 /modules/prompt_parser.py
parentf8e41a96bb30a04dd5e294c7e1178c1c3b09d481 (diff)
downloadstable-diffusion-webui-gfx803-34c358d10d52817f7a889ae4c52096ee654f3fe6.tar.gz
stable-diffusion-webui-gfx803-34c358d10d52817f7a889ae4c52096ee654f3fe6.tar.bz2
stable-diffusion-webui-gfx803-34c358d10d52817f7a889ae4c52096ee654f3fe6.zip
use typing.list in prompt_parser.py for wider python version support
Diffstat (limited to 'modules/prompt_parser.py')
-rw-r--r--modules/prompt_parser.py8
1 files changed, 4 insertions, 4 deletions
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 800b12c7..fdfa21ae 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -1,6 +1,6 @@
import re
from collections import namedtuple
-
+from typing import List
import lark
# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]"
@@ -175,14 +175,14 @@ def get_multicond_prompt_list(prompts):
class ComposableScheduledPromptConditioning:
def __init__(self, schedules, weight=1.0):
- self.schedules: list[ScheduledPromptConditioning] = schedules
+ self.schedules: List[ScheduledPromptConditioning] = schedules
self.weight: float = weight
class MulticondLearnedConditioning:
def __init__(self, shape, batch):
self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS
- self.batch: list[list[ComposableScheduledPromptConditioning]] = batch
+ self.batch: List[List[ComposableScheduledPromptConditioning]] = batch
def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning:
@@ -203,7 +203,7 @@ def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearne
return MulticondLearnedConditioning(shape=(len(prompts),), batch=res)
-def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step):
+def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step):
param = c[0][0].cond
res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
for i, cond_schedule in enumerate(c):