From b02926df1393df311db734af149fb9faf4389cbe Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 22 Oct 2022 20:24:04 -0300 Subject: Moved moodels to their own file and extracted base64 conversion to its own function --- modules/api/models.py | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 modules/api/models.py (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py new file mode 100644 index 00000000..a7d247d8 --- /dev/null +++ b/modules/api/models.py @@ -0,0 +1,8 @@ +from pydantic import BaseModel, Field, Json + +class TextToImageResponse(BaseModel): + images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + parameters: Json + info: Json + + \ No newline at end of file -- cgit v1.2.3 From 28e26c2bef217ae82eb9e980cceb3f67ef22e109 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 22 Oct 2022 23:13:32 -0300 Subject: Add "extra" single image operation - Separate extra modes into 3 endpoints so the user ddoesn't ahve to handle so many unused parameters. - Add response model for codumentation --- modules/api/api.py | 43 ++++++++++++++++++++++++++++++++++++++----- modules/api/models.py | 26 +++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 6 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index c17d7580..3b804373 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -8,20 +8,42 @@ import json import io import base64 from modules.api.models import * +from PIL import Image +from modules.extras import run_extras + +def upscaler_to_index(name: str): + try: + return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) + except: + raise HTTPException(status_code=400, detail="Upscaler not found") sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) -def img_to_base64(img): +def img_to_base64(img: str): buffer = io.BytesIO() img.save(buffer, format="png") return base64.b64encode(buffer.getvalue()) +def base64_to_bytes(base64Img: str): + if "," in base64Img: + base64Img = base64Img.split(",")[1] + return io.BytesIO(base64.b64decode(base64Img)) + +def base64_to_images(base64Imgs: list[str]): + imgs = [] + for img in base64Imgs: + img = Image.open(base64_to_bytes(img)) + imgs.append(img) + return imgs + + class Api: def __init__(self, app, queue_lock): self.router = APIRouter() self.app = app self.queue_lock = queue_lock - self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) + self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) def text2imgapi(self, txt2imgreq: StableDiffusionProcessingAPI ): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -45,12 +67,23 @@ class Api: return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info)) - def img2imgapi(self): raise NotImplementedError - def extrasapi(self): - raise NotImplementedError + def extras_single_image_api(self, req: ExtrasSingleImageRequest): + upscaler1Index = upscaler_to_index(req.upscaler_1) + upscaler2Index = upscaler_to_index(req.upscaler_2) + + reqDict = vars(req) + reqDict.pop('upscaler_1') + reqDict.pop('upscaler_2') + + reqDict['image'] = base64_to_images([reqDict['image']])[0] + + with self.queue_lock: + result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=0, image_folder="", input_dir="", output_dir="") + + return ExtrasSingleImageResponse(image="data:image/png;base64,"+img_to_base64(result[0]), html_info_x=result[1], html_info=result[2]) def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index a7d247d8..dcf1ab54 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,8 +1,32 @@ from pydantic import BaseModel, Field, Json +from typing_extensions import Literal +from modules.shared import sd_upscalers class TextToImageResponse(BaseModel): images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: Json info: Json - \ No newline at end of file +class ExtrasBaseRequest(BaseModel): + resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.") + show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?") + gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.") + codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.") + codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.") + upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.") + upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") + upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") + upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?") + upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") + upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") + extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") + +class ExtraBaseResponse(BaseModel): + html_info_x: str + html_info: str + +class ExtrasSingleImageRequest(ExtrasBaseRequest): + image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") + +class ExtrasSingleImageResponse(ExtraBaseResponse): + image: str = Field(default=None, title="Image", description="The generated image in base64 format.") \ No newline at end of file -- cgit v1.2.3 From 4ff852ffb50859f2eae75375cab94dd790a46886 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 13:07:59 -0300 Subject: Add batch processing "extras" endpoint --- modules/api/api.py | 25 +++++++++++++++++++++++-- modules/api/models.py | 15 ++++++++++++++- 2 files changed, 37 insertions(+), 3 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 3b804373..528134a8 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -10,6 +10,7 @@ import base64 from modules.api.models import * from PIL import Image from modules.extras import run_extras +from gradio import processing_utils def upscaler_to_index(name: str): try: @@ -44,6 +45,7 @@ class Api: self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) + self.app.add_api_route("/sdapi/v1/extra-batch-image", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) def text2imgapi(self, txt2imgreq: StableDiffusionProcessingAPI ): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -78,12 +80,31 @@ class Api: reqDict.pop('upscaler_1') reqDict.pop('upscaler_2') - reqDict['image'] = base64_to_images([reqDict['image']])[0] + reqDict['image'] = processing_utils.decode_base64_to_file(reqDict['image']) with self.queue_lock: result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=0, image_folder="", input_dir="", output_dir="") - return ExtrasSingleImageResponse(image="data:image/png;base64,"+img_to_base64(result[0]), html_info_x=result[1], html_info=result[2]) + return ExtrasSingleImageResponse(image=processing_utils.encode_pil_to_base64(result[0]), html_info_x=result[1], html_info=result[2]) + + def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): + upscaler1Index = upscaler_to_index(req.upscaler_1) + upscaler2Index = upscaler_to_index(req.upscaler_2) + + reqDict = vars(req) + reqDict.pop('upscaler_1') + reqDict.pop('upscaler_2') + + reqDict['image_folder'] = list(map(processing_utils.decode_base64_to_file, reqDict['imageList'])) + reqDict.pop('imageList') + + with self.queue_lock: + result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=1, image="", input_dir="", output_dir="") + + return ExtrasBatchImagesResponse(images=list(map(processing_utils.encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) + + def extras_folder_processing_api(self): + raise NotImplementedError def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index dcf1ab54..bbd0ef53 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -29,4 +29,17 @@ class ExtrasSingleImageRequest(ExtrasBaseRequest): image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") class ExtrasSingleImageResponse(ExtraBaseResponse): - image: str = Field(default=None, title="Image", description="The generated image in base64 format.") \ No newline at end of file + image: str = Field(default=None, title="Image", description="The generated image in base64 format.") + +class SerializableImage(BaseModel): + path: str = Field(title="Path", description="The image's path ()") + +class ImageItem(BaseModel): + data: str = Field(title="image data") + name: str = Field(title="filename") + +class ExtrasBatchImagesRequest(ExtrasBaseRequest): + imageList: list[str] = Field(title="Images", description="List of images to work on. Must be Base64 strings") + +class ExtrasBatchImagesResponse(ExtraBaseResponse): + images: list[str] = Field(title="Images", description="The generated images in base64 format.") \ No newline at end of file -- cgit v1.2.3 From e0ca4dfbc10e0af8dfc4185e5e758f33fd2f0d81 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 15:13:37 -0300 Subject: Update endpoints to use gradio's own utils functions --- modules/api/api.py | 75 +++++++++++++++++++++++++-------------------------- modules/api/models.py | 4 +-- 2 files changed, 38 insertions(+), 41 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 3f490ce2..3acb1f36 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -20,27 +20,27 @@ def upscaler_to_index(name: str): sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) -def img_to_base64(img: str): - buffer = io.BytesIO() - img.save(buffer, format="png") - return base64.b64encode(buffer.getvalue()) - -def base64_to_bytes(base64Img: str): - if "," in base64Img: - base64Img = base64Img.split(",")[1] - return io.BytesIO(base64.b64decode(base64Img)) - -def base64_to_images(base64Imgs: list[str]): - imgs = [] - for img in base64Imgs: - img = Image.open(base64_to_bytes(img)) - imgs.append(img) - return imgs +# def img_to_base64(img: str): +# buffer = io.BytesIO() +# img.save(buffer, format="png") +# return base64.b64encode(buffer.getvalue()) + +# def base64_to_bytes(base64Img: str): +# if "," in base64Img: +# base64Img = base64Img.split(",")[1] +# return io.BytesIO(base64.b64decode(base64Img)) + +# def base64_to_images(base64Imgs: list[str]): +# imgs = [] +# for img in base64Imgs: +# img = Image.open(base64_to_bytes(img)) +# imgs.append(img) +# return imgs class ImageToImageResponse(BaseModel): images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: Json - info: Json + parameters: dict + info: str class Api: @@ -49,17 +49,17 @@ class Api: self.app = app self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) - self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-image", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) - def __base64_to_image(self, base64_string): - # if has a comma, deal with prefix - if "," in base64_string: - base64_string = base64_string.split(",")[1] - imgdata = base64.b64decode(base64_string) - # convert base64 to PIL image - return Image.open(io.BytesIO(imgdata)) + # def __base64_to_image(self, base64_string): + # # if has a comma, deal with prefix + # if "," in base64_string: + # base64_string = base64_string.split(",")[1] + # imgdata = base64.b64decode(base64_string) + # # convert base64 to PIL image + # return Image.open(io.BytesIO(imgdata)) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -79,11 +79,9 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = list(map(img_to_base64, processed.images)) - - return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info)) - + b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) + return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.info) def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): sampler_index = sampler_to_index(img2imgreq.sampler_index) @@ -98,7 +96,7 @@ class Api: mask = img2imgreq.mask if mask: - mask = self.__base64_to_image(mask) + mask = processing_utils.decode_base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params @@ -113,7 +111,7 @@ class Api: imgs = [] for img in init_images: - img = self.__base64_to_image(img) + img = processing_utils.decode_base64_to_image(img) imgs = [img] * p.batch_size p.init_images = imgs @@ -121,13 +119,12 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = [] - for i in processed.images: - buffer = io.BytesIO() - i.save(buffer, format="png") - b64images.append(base64.b64encode(buffer.getvalue())) - - return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=json.dumps(processed.info)) + b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) + # for i in processed.images: + # buffer = io.BytesIO() + # i.save(buffer, format="png") + # b64images.append(base64.b64encode(buffer.getvalue())) + return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.info) def extras_single_image_api(self, req: ExtrasSingleImageRequest): upscaler1Index = upscaler_to_index(req.upscaler_1) diff --git a/modules/api/models.py b/modules/api/models.py index bbd0ef53..209f8af5 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -4,8 +4,8 @@ from modules.shared import sd_upscalers class TextToImageResponse(BaseModel): images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: Json - info: Json + parameters: str + info: str class ExtrasBaseRequest(BaseModel): resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.") -- cgit v1.2.3 From 866b36d705a338d299aba385788729d60f7d48c8 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 15:35:49 -0300 Subject: Move processing's models into models.py It didn't make sense to have two differente files for the same and "models" is a more descriptive name. --- modules/api/api.py | 57 ++++------------------- modules/api/models.py | 112 +++++++++++++++++++++++++++++++++++++++++++++- modules/api/processing.py | 106 ------------------------------------------- 3 files changed, 119 insertions(+), 156 deletions(-) delete mode 100644 modules/api/processing.py (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 3acb1f36..20e85e82 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,16 +1,11 @@ -from modules.api.processing import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI -from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -from modules.sd_samplers import all_samplers -import modules.shared as shared import uvicorn +from gradio import processing_utils from fastapi import APIRouter, HTTPException -import json -import io -import base64 +import modules.shared as shared from modules.api.models import * -from PIL import Image +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images +from modules.sd_samplers import all_samplers from modules.extras import run_extras -from gradio import processing_utils def upscaler_to_index(name: str): try: @@ -20,29 +15,6 @@ def upscaler_to_index(name: str): sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) -# def img_to_base64(img: str): -# buffer = io.BytesIO() -# img.save(buffer, format="png") -# return base64.b64encode(buffer.getvalue()) - -# def base64_to_bytes(base64Img: str): -# if "," in base64Img: -# base64Img = base64Img.split(",")[1] -# return io.BytesIO(base64.b64decode(base64Img)) - -# def base64_to_images(base64Imgs: list[str]): -# imgs = [] -# for img in base64Imgs: -# img = Image.open(base64_to_bytes(img)) -# imgs.append(img) -# return imgs - -class ImageToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: dict - info: str - - class Api: def __init__(self, app, queue_lock): self.router = APIRouter() @@ -51,15 +23,7 @@ class Api: self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) - self.app.add_api_route("/sdapi/v1/extra-batch-image", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) - - # def __base64_to_image(self, base64_string): - # # if has a comma, deal with prefix - # if "," in base64_string: - # base64_string = base64_string.split(",")[1] - # imgdata = base64.b64decode(base64_string) - # # convert base64 to PIL image - # return Image.open(io.BytesIO(imgdata)) + self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -81,7 +45,7 @@ class Api: b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) - return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.info) + return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.info) def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): sampler_index = sampler_to_index(img2imgreq.sampler_index) @@ -120,10 +84,7 @@ class Api: processed = process_images(p) b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) - # for i in processed.images: - # buffer = io.BytesIO() - # i.save(buffer, format="png") - # b64images.append(base64.b64encode(buffer.getvalue())) + return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.info) def extras_single_image_api(self, req: ExtrasSingleImageRequest): @@ -134,12 +95,12 @@ class Api: reqDict.pop('upscaler_1') reqDict.pop('upscaler_2') - reqDict['image'] = processing_utils.decode_base64_to_file(reqDict['image']) + reqDict['image'] = processing_utils.decode_base64_to_image(reqDict['image']) with self.queue_lock: result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=0, image_folder="", input_dir="", output_dir="") - return ExtrasSingleImageResponse(image=processing_utils.encode_pil_to_base64(result[0]), html_info_x=result[1], html_info=result[2]) + return ExtrasSingleImageResponse(image=processing_utils.encode_pil_to_base64(result[0][0]), html_info_x=result[1], html_info=result[2]) def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): upscaler1Index = upscaler_to_index(req.upscaler_1) diff --git a/modules/api/models.py b/modules/api/models.py index 209f8af5..362e6277 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,10 +1,118 @@ -from pydantic import BaseModel, Field, Json +import inspect +from pydantic import BaseModel, Field, Json, create_model +from typing import Any, Optional from typing_extensions import Literal +from inflection import underscore +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img from modules.shared import sd_upscalers +API_NOT_ALLOWED = [ + "self", + "kwargs", + "sd_model", + "outpath_samples", + "outpath_grids", + "sampler_index", + "do_not_save_samples", + "do_not_save_grid", + "extra_generation_params", + "overlay_images", + "do_not_reload_embeddings", + "seed_enable_extras", + "prompt_for_display", + "sampler_noise_scheduler_override", + "ddim_discretize" +] + +class ModelDef(BaseModel): + """Assistance Class for Pydantic Dynamic Model Generation""" + + field: str + field_alias: str + field_type: Any + field_value: Any + + +class PydanticModelGenerator: + """ + Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about: + source_data is a snapshot of the default values produced by the class + params are the names of the actual keys required by __init__ + """ + + def __init__( + self, + model_name: str = None, + class_instance = None, + additional_fields = None, + ): + def field_type_generator(k, v): + # field_type = str if not overrides.get(k) else overrides[k]["type"] + # print(k, v.annotation, v.default) + field_type = v.annotation + + return Optional[field_type] + + def merge_class_params(class_): + all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_))) + parameters = {} + for classes in all_classes: + parameters = {**parameters, **inspect.signature(classes.__init__).parameters} + return parameters + + + self._model_name = model_name + self._class_data = merge_class_params(class_instance) + self._model_def = [ + ModelDef( + field=underscore(k), + field_alias=k, + field_type=field_type_generator(k, v), + field_value=v.default + ) + for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED + ] + + for fields in additional_fields: + self._model_def.append(ModelDef( + field=underscore(fields["key"]), + field_alias=fields["key"], + field_type=fields["type"], + field_value=fields["default"])) + + def generate_model(self): + """ + Creates a pydantic BaseModel + from the json and overrides provided at initialization + """ + fields = { + d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def + } + DynamicModel = create_model(self._model_name, **fields) + DynamicModel.__config__.allow_population_by_field_name = True + DynamicModel.__config__.allow_mutation = True + return DynamicModel + +StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingTxt2Img", + StableDiffusionProcessingTxt2Img, + [{"key": "sampler_index", "type": str, "default": "Euler"}] +).generate_model() + +StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingImg2Img", + StableDiffusionProcessingImg2Img, + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] +).generate_model() + class TextToImageResponse(BaseModel): images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: str + parameters: dict + info: str + +class ImageToImageResponse(BaseModel): + images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + parameters: dict info: str class ExtrasBaseRequest(BaseModel): diff --git a/modules/api/processing.py b/modules/api/processing.py deleted file mode 100644 index f551fa35..00000000 --- a/modules/api/processing.py +++ /dev/null @@ -1,106 +0,0 @@ -from array import array -from inflection import underscore -from typing import Any, Dict, Optional -from pydantic import BaseModel, Field, create_model -from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img -import inspect - - -API_NOT_ALLOWED = [ - "self", - "kwargs", - "sd_model", - "outpath_samples", - "outpath_grids", - "sampler_index", - "do_not_save_samples", - "do_not_save_grid", - "extra_generation_params", - "overlay_images", - "do_not_reload_embeddings", - "seed_enable_extras", - "prompt_for_display", - "sampler_noise_scheduler_override", - "ddim_discretize" -] - -class ModelDef(BaseModel): - """Assistance Class for Pydantic Dynamic Model Generation""" - - field: str - field_alias: str - field_type: Any - field_value: Any - - -class PydanticModelGenerator: - """ - Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about: - source_data is a snapshot of the default values produced by the class - params are the names of the actual keys required by __init__ - """ - - def __init__( - self, - model_name: str = None, - class_instance = None, - additional_fields = None, - ): - def field_type_generator(k, v): - # field_type = str if not overrides.get(k) else overrides[k]["type"] - # print(k, v.annotation, v.default) - field_type = v.annotation - - return Optional[field_type] - - def merge_class_params(class_): - all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_))) - parameters = {} - for classes in all_classes: - parameters = {**parameters, **inspect.signature(classes.__init__).parameters} - return parameters - - - self._model_name = model_name - self._class_data = merge_class_params(class_instance) - self._model_def = [ - ModelDef( - field=underscore(k), - field_alias=k, - field_type=field_type_generator(k, v), - field_value=v.default - ) - for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED - ] - - for fields in additional_fields: - self._model_def.append(ModelDef( - field=underscore(fields["key"]), - field_alias=fields["key"], - field_type=fields["type"], - field_value=fields["default"])) - - def generate_model(self): - """ - Creates a pydantic BaseModel - from the json and overrides provided at initialization - """ - fields = { - d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def - } - DynamicModel = create_model(self._model_name, **fields) - DynamicModel.__config__.allow_population_by_field_name = True - DynamicModel.__config__.allow_mutation = True - return DynamicModel - -StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( - "StableDiffusionProcessingTxt2Img", - StableDiffusionProcessingTxt2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}] -).generate_model() - -StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( - "StableDiffusionProcessingImg2Img", - StableDiffusionProcessingImg2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] -).generate_model() \ No newline at end of file -- cgit v1.2.3 From 1e625624ba6ab3dfc167f0a5226780bb9b50fb58 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 16:01:16 -0300 Subject: Add folder processing endpoint Also minor refactor --- modules/api/api.py | 56 +++++++++++++++++++++++++++------------------------ modules/api/models.py | 6 +++++- 2 files changed, 35 insertions(+), 27 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 20e85e82..7b4fbe29 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,5 +1,5 @@ import uvicorn -from gradio import processing_utils +from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image from fastapi import APIRouter, HTTPException import modules.shared as shared from modules.api.models import * @@ -11,10 +11,18 @@ def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) except: - raise HTTPException(status_code=400, detail="Upscaler not found") + raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}") sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) +def setUpscalers(req: dict): + reqDict = vars(req) + reqDict['extras_upscaler_1'] = upscaler_to_index(req.upscaler_1) + reqDict['extras_upscaler_2'] = upscaler_to_index(req.upscaler_2) + reqDict.pop('upscaler_1') + reqDict.pop('upscaler_2') + return reqDict + class Api: def __init__(self, app, queue_lock): self.router = APIRouter() @@ -24,6 +32,7 @@ class Api: self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) + self.app.add_api_route("/sdapi/v1/extra-folder-images", self.extras_folder_processing_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -43,7 +52,7 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) + b64images = list(map(encode_pil_to_base64, processed.images)) return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.info) @@ -60,7 +69,7 @@ class Api: mask = img2imgreq.mask if mask: - mask = processing_utils.decode_base64_to_image(mask) + mask = decode_base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params @@ -75,7 +84,7 @@ class Api: imgs = [] for img in init_images: - img = processing_utils.decode_base64_to_image(img) + img = decode_base64_to_image(img) imgs = [img] * p.batch_size p.init_images = imgs @@ -83,43 +92,38 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) + b64images = list(map(encode_pil_to_base64, processed.images)) return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.info) def extras_single_image_api(self, req: ExtrasSingleImageRequest): - upscaler1Index = upscaler_to_index(req.upscaler_1) - upscaler2Index = upscaler_to_index(req.upscaler_2) - - reqDict = vars(req) - reqDict.pop('upscaler_1') - reqDict.pop('upscaler_2') + reqDict = setUpscalers(req) - reqDict['image'] = processing_utils.decode_base64_to_image(reqDict['image']) + reqDict['image'] = decode_base64_to_image(reqDict['image']) with self.queue_lock: - result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=0, image_folder="", input_dir="", output_dir="") + result = run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", **reqDict) - return ExtrasSingleImageResponse(image=processing_utils.encode_pil_to_base64(result[0][0]), html_info_x=result[1], html_info=result[2]) + return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info_x=result[1], html_info=result[2]) def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): - upscaler1Index = upscaler_to_index(req.upscaler_1) - upscaler2Index = upscaler_to_index(req.upscaler_2) + reqDict = setUpscalers(req) - reqDict = vars(req) - reqDict.pop('upscaler_1') - reqDict.pop('upscaler_2') - - reqDict['image_folder'] = list(map(processing_utils.decode_base64_to_file, reqDict['imageList'])) + reqDict['image_folder'] = list(map(decode_base64_to_file, reqDict['imageList'])) reqDict.pop('imageList') with self.queue_lock: - result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=1, image="", input_dir="", output_dir="") + result = run_extras(extras_mode=1, image="", input_dir="", output_dir="", **reqDict) - return ExtrasBatchImagesResponse(images=list(map(processing_utils.encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) + return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) - def extras_folder_processing_api(self): - raise NotImplementedError + def extras_folder_processing_api(self, req:ExtrasFoldersRequest): + reqDict = setUpscalers(req) + + with self.queue_lock: + result = run_extras(extras_mode=2, image=None, image_folder=None, **reqDict) + + return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index 362e6277..6f096807 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -150,4 +150,8 @@ class ExtrasBatchImagesRequest(ExtrasBaseRequest): imageList: list[str] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): - images: list[str] = Field(title="Images", description="The generated images in base64 format.") \ No newline at end of file + images: list[str] = Field(title="Images", description="The generated images in base64 format.") + +class ExtrasFoldersRequest(ExtrasBaseRequest): + input_dir: str = Field(title="Input directory", description="Directory path from where to take the images") + output_dir: str = Field(title="Output directory", description="Directory path to put the processsed images into") -- cgit v1.2.3 From 90f02c75220d187e075203a4e3b450bfba392c4d Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 16:03:30 -0300 Subject: Remove unused field and class --- modules/api/api.py | 6 +++--- modules/api/models.py | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 7b4fbe29..799e3701 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -104,7 +104,7 @@ class Api: with self.queue_lock: result = run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", **reqDict) - return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info_x=result[1], html_info=result[2]) + return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): reqDict = setUpscalers(req) @@ -115,7 +115,7 @@ class Api: with self.queue_lock: result = run_extras(extras_mode=1, image="", input_dir="", output_dir="", **reqDict) - return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) + return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) def extras_folder_processing_api(self, req:ExtrasFoldersRequest): reqDict = setUpscalers(req) @@ -123,7 +123,7 @@ class Api: with self.queue_lock: result = run_extras(extras_mode=2, image=None, image_folder=None, **reqDict) - return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) + return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index 6f096807..e461d397 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -130,8 +130,7 @@ class ExtrasBaseRequest(BaseModel): extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") class ExtraBaseResponse(BaseModel): - html_info_x: str - html_info: str + html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.") class ExtrasSingleImageRequest(ExtrasBaseRequest): image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") @@ -139,9 +138,6 @@ class ExtrasSingleImageRequest(ExtrasBaseRequest): class ExtrasSingleImageResponse(ExtraBaseResponse): image: str = Field(default=None, title="Image", description="The generated image in base64 format.") -class SerializableImage(BaseModel): - path: str = Field(title="Path", description="The image's path ()") - class ImageItem(BaseModel): data: str = Field(title="image data") name: str = Field(title="filename") -- cgit v1.2.3 From 2c05e06ea770b013de55636f15aae0b8e60e0590 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 24 Oct 2022 14:11:14 +0300 Subject: rename api/processing to api/models for #3511 --- modules/api/api.py | 2 +- modules/api/models.py | 106 ++++++++++++++++++++++++++++++++++++++++++++++ modules/api/processing.py | 106 ---------------------------------------------- 3 files changed, 107 insertions(+), 107 deletions(-) create mode 100644 modules/api/models.py delete mode 100644 modules/api/processing.py (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 3caa83a4..a860a964 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,4 +1,4 @@ -from modules.api.processing import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI +from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.sd_samplers import all_samplers from modules.extras import run_pnginfo diff --git a/modules/api/models.py b/modules/api/models.py new file mode 100644 index 00000000..f551fa35 --- /dev/null +++ b/modules/api/models.py @@ -0,0 +1,106 @@ +from array import array +from inflection import underscore +from typing import Any, Dict, Optional +from pydantic import BaseModel, Field, create_model +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img +import inspect + + +API_NOT_ALLOWED = [ + "self", + "kwargs", + "sd_model", + "outpath_samples", + "outpath_grids", + "sampler_index", + "do_not_save_samples", + "do_not_save_grid", + "extra_generation_params", + "overlay_images", + "do_not_reload_embeddings", + "seed_enable_extras", + "prompt_for_display", + "sampler_noise_scheduler_override", + "ddim_discretize" +] + +class ModelDef(BaseModel): + """Assistance Class for Pydantic Dynamic Model Generation""" + + field: str + field_alias: str + field_type: Any + field_value: Any + + +class PydanticModelGenerator: + """ + Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about: + source_data is a snapshot of the default values produced by the class + params are the names of the actual keys required by __init__ + """ + + def __init__( + self, + model_name: str = None, + class_instance = None, + additional_fields = None, + ): + def field_type_generator(k, v): + # field_type = str if not overrides.get(k) else overrides[k]["type"] + # print(k, v.annotation, v.default) + field_type = v.annotation + + return Optional[field_type] + + def merge_class_params(class_): + all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_))) + parameters = {} + for classes in all_classes: + parameters = {**parameters, **inspect.signature(classes.__init__).parameters} + return parameters + + + self._model_name = model_name + self._class_data = merge_class_params(class_instance) + self._model_def = [ + ModelDef( + field=underscore(k), + field_alias=k, + field_type=field_type_generator(k, v), + field_value=v.default + ) + for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED + ] + + for fields in additional_fields: + self._model_def.append(ModelDef( + field=underscore(fields["key"]), + field_alias=fields["key"], + field_type=fields["type"], + field_value=fields["default"])) + + def generate_model(self): + """ + Creates a pydantic BaseModel + from the json and overrides provided at initialization + """ + fields = { + d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def + } + DynamicModel = create_model(self._model_name, **fields) + DynamicModel.__config__.allow_population_by_field_name = True + DynamicModel.__config__.allow_mutation = True + return DynamicModel + +StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingTxt2Img", + StableDiffusionProcessingTxt2Img, + [{"key": "sampler_index", "type": str, "default": "Euler"}] +).generate_model() + +StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingImg2Img", + StableDiffusionProcessingImg2Img, + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] +).generate_model() \ No newline at end of file diff --git a/modules/api/processing.py b/modules/api/processing.py deleted file mode 100644 index f551fa35..00000000 --- a/modules/api/processing.py +++ /dev/null @@ -1,106 +0,0 @@ -from array import array -from inflection import underscore -from typing import Any, Dict, Optional -from pydantic import BaseModel, Field, create_model -from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img -import inspect - - -API_NOT_ALLOWED = [ - "self", - "kwargs", - "sd_model", - "outpath_samples", - "outpath_grids", - "sampler_index", - "do_not_save_samples", - "do_not_save_grid", - "extra_generation_params", - "overlay_images", - "do_not_reload_embeddings", - "seed_enable_extras", - "prompt_for_display", - "sampler_noise_scheduler_override", - "ddim_discretize" -] - -class ModelDef(BaseModel): - """Assistance Class for Pydantic Dynamic Model Generation""" - - field: str - field_alias: str - field_type: Any - field_value: Any - - -class PydanticModelGenerator: - """ - Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about: - source_data is a snapshot of the default values produced by the class - params are the names of the actual keys required by __init__ - """ - - def __init__( - self, - model_name: str = None, - class_instance = None, - additional_fields = None, - ): - def field_type_generator(k, v): - # field_type = str if not overrides.get(k) else overrides[k]["type"] - # print(k, v.annotation, v.default) - field_type = v.annotation - - return Optional[field_type] - - def merge_class_params(class_): - all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_))) - parameters = {} - for classes in all_classes: - parameters = {**parameters, **inspect.signature(classes.__init__).parameters} - return parameters - - - self._model_name = model_name - self._class_data = merge_class_params(class_instance) - self._model_def = [ - ModelDef( - field=underscore(k), - field_alias=k, - field_type=field_type_generator(k, v), - field_value=v.default - ) - for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED - ] - - for fields in additional_fields: - self._model_def.append(ModelDef( - field=underscore(fields["key"]), - field_alias=fields["key"], - field_type=fields["type"], - field_value=fields["default"])) - - def generate_model(self): - """ - Creates a pydantic BaseModel - from the json and overrides provided at initialization - """ - fields = { - d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def - } - DynamicModel = create_model(self._model_name, **fields) - DynamicModel.__config__.allow_population_by_field_name = True - DynamicModel.__config__.allow_mutation = True - return DynamicModel - -StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( - "StableDiffusionProcessingTxt2Img", - StableDiffusionProcessingTxt2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}] -).generate_model() - -StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( - "StableDiffusionProcessingImg2Img", - StableDiffusionProcessingImg2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] -).generate_model() \ No newline at end of file -- cgit v1.2.3 From 595dca85af9e26b5d76cd64659a5bdd9da4f2b89 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Mon, 24 Oct 2022 08:32:18 -0300 Subject: Reverse run_extras change Update serialization on the batch images endpoint --- modules/api/api.py | 7 ++++++- modules/api/models.py | 8 ++++---- modules/extras.py | 2 +- 3 files changed, 11 insertions(+), 6 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 799e3701..67b783de 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -109,7 +109,12 @@ class Api: def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): reqDict = setUpscalers(req) - reqDict['image_folder'] = list(map(decode_base64_to_file, reqDict['imageList'])) + def prepareFiles(file): + file = decode_base64_to_file(file.data, file_path=file.name) + file.orig_name = file.name + return file + + reqDict['image_folder'] = list(map(prepareFiles, reqDict['imageList'])) reqDict.pop('imageList') with self.queue_lock: diff --git a/modules/api/models.py b/modules/api/models.py index e461d397..fca2f991 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -138,12 +138,12 @@ class ExtrasSingleImageRequest(ExtrasBaseRequest): class ExtrasSingleImageResponse(ExtraBaseResponse): image: str = Field(default=None, title="Image", description="The generated image in base64 format.") -class ImageItem(BaseModel): - data: str = Field(title="image data") - name: str = Field(title="filename") +class FileData(BaseModel): + data: str = Field(title="File data", description="Base64 representation of the file") + name: str = Field(title="File name") class ExtrasBatchImagesRequest(ExtrasBaseRequest): - imageList: list[str] = Field(title="Images", description="List of images to work on. Must be Base64 strings") + imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): images: list[str] = Field(title="Images", description="The generated images in base64 format.") diff --git a/modules/extras.py b/modules/extras.py index 29ac312e..22c5a1c1 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -33,7 +33,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ for img in image_folder: image = Image.open(img) imageArr.append(image) - imageNameArr.append(os.path.splitext(img.name)[0]) + imageNameArr.append(os.path.splitext(img.orig_name)[0]) elif extras_mode == 2: assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled' -- cgit v1.2.3 From db9ab1a46b5ad4d36ecce76dfee04b7164249829 Mon Sep 17 00:00:00 2001 From: Stephen Date: Mon, 24 Oct 2022 11:16:07 -0400 Subject: [Bugfix][API] - Fix API response for colab users --- modules/api/api.py | 17 +++++++++++++---- modules/api/models.py | 10 ++++++---- 2 files changed, 19 insertions(+), 8 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index a860a964..ba890243 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -7,6 +7,7 @@ import uvicorn from fastapi import Body, APIRouter, HTTPException from fastapi.responses import JSONResponse from pydantic import BaseModel, Field, Json +from typing import List import json import io import base64 @@ -15,12 +16,12 @@ from PIL import Image sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) class TextToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: Json info: Json class ImageToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: Json info: Json @@ -41,6 +42,9 @@ class Api: # convert base64 to PIL image return Image.open(io.BytesIO(imgdata)) + def __processed_info_to_json(self, processed): + return json.dumps(processed.info) + def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -65,7 +69,7 @@ class Api: i.save(buffer, format="png") b64images.append(base64.b64encode(buffer.getvalue())) - return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info)) + return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.js()) @@ -111,7 +115,12 @@ class Api: i.save(buffer, format="png") b64images.append(base64.b64encode(buffer.getvalue())) - return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=json.dumps(processed.info)) + if (not img2imgreq.include_init_images): + # remove img2imgreq.init_images and img2imgreq.mask + img2imgreq.init_images = None + img2imgreq.mask = None + + return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=processed.js()) def extrasapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index f551fa35..c6d43606 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -31,6 +31,7 @@ class ModelDef(BaseModel): field_alias: str field_type: Any field_value: Any + field_exclude: bool = False class PydanticModelGenerator: @@ -68,7 +69,7 @@ class PydanticModelGenerator: field=underscore(k), field_alias=k, field_type=field_type_generator(k, v), - field_value=v.default + field_value=v.default, ) for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED ] @@ -78,7 +79,8 @@ class PydanticModelGenerator: field=underscore(fields["key"]), field_alias=fields["key"], field_type=fields["type"], - field_value=fields["default"])) + field_value=fields["default"], + field_exclude=fields["exclude"] if "exclude" in fields else False)) def generate_model(self): """ @@ -86,7 +88,7 @@ class PydanticModelGenerator: from the json and overrides provided at initialization """ fields = { - d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def + d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias, exclude=d.field_exclude)) for d in self._model_def } DynamicModel = create_model(self._model_name, **fields) DynamicModel.__config__.allow_population_by_field_name = True @@ -102,5 +104,5 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( "StableDiffusionProcessingImg2Img", StableDiffusionProcessingImg2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}] ).generate_model() \ No newline at end of file -- cgit v1.2.3 From b46c64c6e5b40d69521e4d50e2d35f6a35468129 Mon Sep 17 00:00:00 2001 From: Stephen Date: Mon, 24 Oct 2022 12:18:54 -0400 Subject: clean --- modules/api/api.py | 4 ---- modules/api/models.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index ba890243..6e9d6097 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -42,9 +42,6 @@ class Api: # convert base64 to PIL image return Image.open(io.BytesIO(imgdata)) - def __processed_info_to_json(self, processed): - return json.dumps(processed.info) - def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -116,7 +113,6 @@ class Api: b64images.append(base64.b64encode(buffer.getvalue())) if (not img2imgreq.include_init_images): - # remove img2imgreq.init_images and img2imgreq.mask img2imgreq.init_images = None img2imgreq.mask = None diff --git a/modules/api/models.py b/modules/api/models.py index c6d43606..079e33d9 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -69,7 +69,7 @@ class PydanticModelGenerator: field=underscore(k), field_alias=k, field_type=field_type_generator(k, v), - field_value=v.default, + field_value=v.default ) for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED ] -- cgit v1.2.3 From b2e0d8ba789b345145436f6e960a3f0a896a6643 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Wed, 26 Oct 2022 09:54:26 -0300 Subject: Remove folder endpoint --- modules/api/api.py | 9 --------- modules/api/models.py | 6 +----- 2 files changed, 1 insertion(+), 14 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index ca289d9f..49c213ea 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -32,7 +32,6 @@ class Api: self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) - self.app.add_api_route("/sdapi/v1/extra-folder-images", self.extras_folder_processing_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -126,14 +125,6 @@ class Api: return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) - def extras_folder_processing_api(self, req:ExtrasFoldersRequest): - reqDict = setUpscalers(req) - - with self.queue_lock: - result = run_extras(extras_mode=2, image=None, image_folder=None, **reqDict) - - return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) - def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index 00406368..dd122321 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -148,8 +148,4 @@ class ExtrasBatchImagesRequest(ExtrasBaseRequest): imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): - images: list[str] = Field(title="Images", description="The generated images in base64 format.") - -class ExtrasFoldersRequest(ExtrasBaseRequest): - input_dir: str = Field(title="Input directory", description="Directory path from where to take the images") - output_dir: str = Field(title="Output directory", description="Directory path to put the processsed images into") + images: list[str] = Field(title="Images", description="The generated images in base64 format.") \ No newline at end of file -- cgit v1.2.3 From bdc90837987ed8919dd611fd01553b0c170ded5c Mon Sep 17 00:00:00 2001 From: Roy Shilkrot Date: Thu, 27 Oct 2022 15:20:15 -0400 Subject: Add a barebones interrogate API --- launch.py | 2 +- modules/api/api.py | 25 ++++++++++++++++++++++++- modules/api/models.py | 13 ++++++++++++- webui.py | 12 ++++++++---- 4 files changed, 45 insertions(+), 7 deletions(-) (limited to 'modules/api/models.py') diff --git a/launch.py b/launch.py index 8affd410..ae79b4a4 100644 --- a/launch.py +++ b/launch.py @@ -198,7 +198,7 @@ def prepare_enviroment(): def start_webui(): print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}") import webui - webui.webui() + webui.webui_or_api() if __name__ == "__main__": diff --git a/modules/api/api.py b/modules/api/api.py index 6e9d6097..eabdb7b8 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,4 +1,4 @@ -from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI +from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI, InterrogateAPI from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.sd_samplers import all_samplers from modules.extras import run_pnginfo @@ -25,6 +25,11 @@ class ImageToImageResponse(BaseModel): parameters: Json info: Json +class InterrogateResponse(BaseModel): + caption: str = Field(default=None, title="Caption", description="The generated caption for the image.") + parameters: Json + info: Json + class Api: def __init__(self, app, queue_lock): @@ -33,6 +38,7 @@ class Api: self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"]) self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"]) def __base64_to_image(self, base64_string): # if has a comma, deal with prefix @@ -118,6 +124,23 @@ class Api: return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=processed.js()) + def interrogateapi(self, interrogatereq: InterrogateAPI): + image_b64 = interrogatereq.image + if image_b64 is None: + raise HTTPException(status_code=404, detail="Image not found") + + populate = interrogatereq.copy(update={ # Override __init__ params + } + ) + + img = self.__base64_to_image(image_b64) + + # Override object param + with self.queue_lock: + processed = shared.interrogator.interrogate(img) + + return InterrogateResponse(caption=processed, parameters=json.dumps(vars(interrogatereq)), info=None) + def extrasapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index 079e33d9..8be64749 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -63,7 +63,12 @@ class PydanticModelGenerator: self._model_name = model_name - self._class_data = merge_class_params(class_instance) + + if class_instance is not None: + self._class_data = merge_class_params(class_instance) + else: + self._class_data = {} + self._model_def = [ ModelDef( field=underscore(k), @@ -105,4 +110,10 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( "StableDiffusionProcessingImg2Img", StableDiffusionProcessingImg2Img, [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}] +).generate_model() + +InterrogateAPI = PydanticModelGenerator( + "Interrogate", + None, + [{"key": "image", "type": str, "default": None}] ).generate_model() \ No newline at end of file diff --git a/webui.py b/webui.py index ade7334b..7a4bb2a2 100644 --- a/webui.py +++ b/webui.py @@ -146,7 +146,9 @@ def webui(): app.add_middleware(GZipMiddleware, minimum_size=1000) if (launch_api): - create_api(app) + print('launching API') + api = create_api(app) + api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861) wait_on_server(demo) @@ -161,10 +163,12 @@ def webui(): print('Restarting Gradio') - -task = [] -if __name__ == "__main__": +def webui_or_api(): if cmd_opts.nowebui: api_only() else: webui() + +task = [] +if __name__ == "__main__": + webui_or_api() \ No newline at end of file -- cgit v1.2.3 From 4609b83cd496013a05e77c42af031d89f07785a9 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 29 Oct 2022 16:09:19 -0300 Subject: Add PNG Info endpoint --- modules/api/api.py | 12 +++++++++--- modules/api/models.py | 9 ++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 49c213ea..8fcd068d 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -5,7 +5,7 @@ import modules.shared as shared from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.sd_samplers import all_samplers -from modules.extras import run_extras +from modules.extras import run_extras, run_pnginfo def upscaler_to_index(name: str): try: @@ -32,6 +32,7 @@ class Api: self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) + self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -125,8 +126,13 @@ class Api: return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) - def pnginfoapi(self): - raise NotImplementedError + def pnginfoapi(self, req:PNGInfoRequest): + if(not req.image.strip()): + return PNGInfoResponse(info="") + + result = run_pnginfo(decode_base64_to_image(req.image.strip())) + + return PNGInfoResponse(info=result[1]) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index dd122321..58e8e58b 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,4 +1,5 @@ import inspect +from click import prompt from pydantic import BaseModel, Field, create_model from typing import Any, Optional from typing_extensions import Literal @@ -148,4 +149,10 @@ class ExtrasBatchImagesRequest(ExtrasBaseRequest): imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): - images: list[str] = Field(title="Images", description="The generated images in base64 format.") \ No newline at end of file + images: list[str] = Field(title="Images", description="The generated images in base64 format.") + +class PNGInfoRequest(BaseModel): + image: str = Field(title="Image", description="The base64 encoded PNG image") + +class PNGInfoResponse(BaseModel): + info: str = Field(title="Image info", description="A string with all the info the image had") \ No newline at end of file -- cgit v1.2.3 From f62db4d5c753bc32d2ae166606ce41f4c5fa5c43 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 03:55:43 +0800 Subject: fix progress response model --- modules/api/api.py | 30 ------------------------------ modules/api/models.py | 8 ++++---- 2 files changed, 4 insertions(+), 34 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index e93cddcb..7e8522a2 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,33 +1,3 @@ -# import time - -# from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI -# from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -# from modules.sd_samplers import all_samplers -# from modules.extras import run_pnginfo -# import modules.shared as shared -# from modules import devices -# import uvicorn -# from fastapi import Body, APIRouter, HTTPException -# from fastapi.responses import JSONResponse -# from pydantic import BaseModel, Field, Json -# from typing import List -# import json -# import io -# import base64 -# from PIL import Image - -# sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) - -# class TextToImageResponse(BaseModel): -# images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") -# parameters: Json -# info: Json - -# class ImageToImageResponse(BaseModel): -# images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") -# parameters: Json -# info: Json - import time import uvicorn from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image diff --git a/modules/api/models.py b/modules/api/models.py index 8d4abc39..e1762fb9 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,6 +1,6 @@ import inspect from click import prompt -from pydantic import BaseModel, Field, create_model +from pydantic import BaseModel, Field, Json, create_model from typing import Any, Optional from typing_extensions import Literal from inflection import underscore @@ -158,6 +158,6 @@ class PNGInfoResponse(BaseModel): info: str = Field(title="Image info", description="A string with all the info the image had") class ProgressResponse(BaseModel): - progress: float - eta_relative: float - state: dict + progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") + eta_relative: float = Field(title="ETA in secs") + state: Json -- cgit v1.2.3 From e9c6c2a51f972fd7cd88ea740ade4ac3d8108b67 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 04:02:56 +0800 Subject: add description for state field --- modules/api/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index e1762fb9..709ab5a6 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -160,4 +160,4 @@ class PNGInfoResponse(BaseModel): class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") - state: Json + state: Json = Field(title="State", description="The current state snapshot") -- cgit v1.2.3 From 88f46a5bec610cf03641f18becbe3deda541e982 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 05:04:29 +0800 Subject: update progress response model --- modules/api/api.py | 6 +++--- modules/api/models.py | 4 ++-- modules/shared.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 7e8522a2..5912d289 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -61,7 +61,7 @@ class Api: self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) - self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"]) + self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -171,7 +171,7 @@ class Api: # copy from check_progress_call of ui.py if shared.state.job_count == 0: - return ProgressResponse(progress=0, eta_relative=0, state=shared.state.js()) + return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict()) # avoid dividing zero progress = 0.01 @@ -187,7 +187,7 @@ class Api: progress = min(progress, 1) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.js()) + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict()) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 709ab5a6..0ab85ec5 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,6 +1,6 @@ import inspect from click import prompt -from pydantic import BaseModel, Field, Json, create_model +from pydantic import BaseModel, Field, create_model from typing import Any, Optional from typing_extensions import Literal from inflection import underscore @@ -160,4 +160,4 @@ class PNGInfoResponse(BaseModel): class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") - state: Json = Field(title="State", description="The current state snapshot") + state: dict = Field(title="State", description="The current state snapshot") diff --git a/modules/shared.py b/modules/shared.py index 0f4c035d..f7b0990c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -147,7 +147,7 @@ class State: def get_job_timestamp(self): return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp? - def js(self): + def dict(self): obj = { "skipped": self.skipped, "interrupted": self.skipped, @@ -158,7 +158,7 @@ class State: "sampling_steps": self.sampling_steps, } - return json.dumps(obj) + return obj state = State() -- cgit v1.2.3 From 9f104b53c425e248595e5b6481336d2a339e015e Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 05:19:17 +0800 Subject: preview current image when opts.show_progress_every_n_steps is enabled --- modules/api/api.py | 8 ++++++-- modules/api/models.py | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 5912d289..e960bb7b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,7 +1,7 @@ import time import uvicorn from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image -from fastapi import APIRouter, HTTPException +from fastapi import APIRouter, Depends, HTTPException import modules.shared as shared from modules import devices from modules.api.models import * @@ -187,7 +187,11 @@ class Api: progress = min(progress, 1) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict()) + current_image = None + if shared.state.current_image: + current_image = encode_pil_to_base64(shared.state.current_image) + + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 0ab85ec5..c8bc719a 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -161,3 +161,4 @@ class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") state: dict = Field(title="State", description="The current state snapshot") + current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.") -- cgit v1.2.3 From 9f4f894d74b57c3d02ebccaa59f9c22fca2b6c90 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 06:03:32 +0800 Subject: allow skip current image in progress api --- modules/api/api.py | 4 ++-- modules/api/models.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index e960bb7b..5c5b210f 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -167,7 +167,7 @@ class Api: return PNGInfoResponse(info=result[1]) - def progressapi(self): + def progressapi(self, req: ProgressRequest = Depends()): # copy from check_progress_call of ui.py if shared.state.job_count == 0: @@ -188,7 +188,7 @@ class Api: progress = min(progress, 1) current_image = None - if shared.state.current_image: + if shared.state.current_image and not req.skip_current_image: current_image = encode_pil_to_base64(shared.state.current_image) return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image) diff --git a/modules/api/models.py b/modules/api/models.py index c8bc719a..9ee42a17 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -157,6 +157,9 @@ class PNGInfoRequest(BaseModel): class PNGInfoResponse(BaseModel): info: str = Field(title="Image info", description="A string with all the info the image had") +class ProgressRequest(BaseModel): + skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization") + class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") -- cgit v1.2.3 From df6a7ebfe8cc4da23861e3e2583693bb7808d573 Mon Sep 17 00:00:00 2001 From: Roy Shilkrot Date: Mon, 31 Oct 2022 11:50:33 -0400 Subject: revert things to master --- launch.py | 2 +- modules/api/api.py | 2 -- modules/api/models.py | 6 +----- 3 files changed, 2 insertions(+), 8 deletions(-) (limited to 'modules/api/models.py') diff --git a/launch.py b/launch.py index fe9cef3c..958336f2 100644 --- a/launch.py +++ b/launch.py @@ -220,7 +220,7 @@ def tests(argv): def start_webui(): print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}") import webui - webui.webui_or_api() + webui.webui() if __name__ == "__main__": diff --git a/modules/api/api.py b/modules/api/api.py index c510a833..6a903e4c 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -117,8 +117,6 @@ class Api: return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) - def extrasapi(self): - raise NotImplementedError def extras_single_image_api(self, req: ExtrasSingleImageRequest): reqDict = setUpscalers(req) diff --git a/modules/api/models.py b/modules/api/models.py index 035a7179..82ab29b8 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -64,11 +64,7 @@ class PydanticModelGenerator: self._model_name = model_name - - if class_instance is not None: - self._class_data = merge_class_params(class_instance) - else: - self._class_data = {} + self._class_data = merge_class_params(class_instance) self._model_def = [ ModelDef( -- cgit v1.2.3 From 2ac25ea64f31fd0e7dea35d27a52f3646618c3b6 Mon Sep 17 00:00:00 2001 From: digburn Date: Wed, 2 Nov 2022 21:52:23 +0000 Subject: fix: Add required parameter to API extras route --- modules/api/models.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index 9ee42a17..9069c0ac 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -131,6 +131,7 @@ class ExtrasBaseRequest(BaseModel): upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") + upscale_first: bool = Field(default=True, title="Upscale first", description="Should the upscaler run before restoring faces?") class ExtraBaseResponse(BaseModel): html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.") -- cgit v1.2.3 From 7a2e36b583ef9eaefa44322e16faff6f9f1af169 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Thu, 3 Nov 2022 00:51:22 -0300 Subject: Add config and lists endpoints --- modules/api/api.py | 97 ++++++++++++++++++++++++++++++++++++++++++++++++--- modules/api/models.py | 70 +++++++++++++++++++++++++++++++++++-- 2 files changed, 159 insertions(+), 8 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 71c9c160..ed2dce5d 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -2,14 +2,17 @@ import base64 import io import time import uvicorn -from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image -from fastapi import APIRouter, Depends, HTTPException +from threading import Lock +from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image +from fastapi import APIRouter, Depends, FastAPI, HTTPException import modules.shared as shared from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid +from modules.sd_samplers import all_samplers from modules.extras import run_extras, run_pnginfo - +from modules.sd_models import checkpoints_list +from modules.realesrgan_model import get_realesrgan_models +from typing import List def upscaler_to_index(name: str): try: @@ -37,7 +40,7 @@ def encode_pil_to_base64(image): class Api: - def __init__(self, app, queue_lock): + def __init__(self, app: FastAPI, queue_lock: Lock): self.router = APIRouter() self.app = app self.queue_lock = queue_lock @@ -48,6 +51,19 @@ class Api: self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel) + self.app.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel) + self.app.add_api_route("/sdapi/v1/info", self.get_info, methods=["GET"]) + self.app.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem]) + self.app.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem]) + self.app.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem]) + self.app.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem]) + self.app.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem]) + self.app.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem]) + self.app.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem]) + self.app.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str]) + self.app.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -190,6 +206,77 @@ class Api: shared.state.interrupt() return {} + + def get_config(self): + options = {} + for key in shared.opts.data.keys(): + metadata = shared.opts.data_labels.get(key) + if(metadata is not None): + options.update({key: shared.opts.data.get(key, shared.opts.data_labels.get(key).default)}) + else: + options.update({key: shared.opts.data.get(key, None)}) + + return options + + def set_config(self, req: OptionsModel): + reqDict = vars(req) + for o in reqDict: + setattr(shared.opts, o, reqDict[o]) + + shared.opts.save(shared.config_filename) + return + + def get_cmd_flags(self): + return vars(shared.cmd_opts) + + def get_info(self): + + return { + "hypernetworks": [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks], + "face_restorers": [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers], + "realesrgan_models":[{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)], + "promp_styles":[shared.prompt_styles.styles[k] for k in shared.prompt_styles.styles], + "artists_categories": shared.artist_db.cats, + # "artists": [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists] + } + + def get_samplers(self): + return [{"name":sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in all_samplers] + + def get_upscalers(self): + upscalers = [] + + for upscaler in shared.sd_upscalers: + u = upscaler.scaler + upscalers.append({"name":u.name, "model_name":u.model_name, "model_path":u.model_path, "model_url":u.model_url}) + + return upscalers + + def get_sd_models(self): + return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": x.config} for x in checkpoints_list.values()] + + def get_hypernetworks(self): + return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks] + + def get_face_restorers(self): + return [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers] + + def get_realesrgan_models(self): + return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)] + + def get_promp_styles(self): + styleList = [] + for k in shared.prompt_styles.styles: + style = shared.prompt_styles.styles[k] + styleList.append({"name":style[0], "prompt": style[1], "negative_prompr": style[2]}) + + return styleList + + def get_artists_categories(self): + return shared.artist_db.cats + + def get_artists(self): + return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists] def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 9ee42a17..b54b188a 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,11 +1,10 @@ import inspect -from click import prompt from pydantic import BaseModel, Field, create_model -from typing import Any, Optional +from typing import Any, Optional, Union from typing_extensions import Literal from inflection import underscore from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img -from modules.shared import sd_upscalers +from modules.shared import sd_upscalers, opts, parser API_NOT_ALLOWED = [ "self", @@ -165,3 +164,68 @@ class ProgressResponse(BaseModel): eta_relative: float = Field(title="ETA in secs") state: dict = Field(title="State", description="The current state snapshot") current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.") + +fields = {} +for key, value in opts.data.items(): + metadata = opts.data_labels.get(key) + optType = opts.typemap.get(type(value), type(value)) + + if (metadata is not None): + fields.update({key: (Optional[optType], Field( + default=metadata.default ,description=metadata.label))}) + else: + fields.update({key: (Optional[optType], Field())}) + +OptionsModel = create_model("Options", **fields) + +flags = {} +_options = vars(parser)['_option_string_actions'] +for key in _options: + if(_options[key].dest != 'help'): + flag = _options[key] + _type = str + if(_options[key].default != None): _type = type(_options[key].default) + flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))}) + +FlagsModel = create_model("Flags", **flags) + +class SamplerItem(BaseModel): + name: str = Field(title="Name") + aliases: list[str] = Field(title="Aliases") + options: dict[str, str] = Field(title="Options") + +class UpscalerItem(BaseModel): + name: str = Field(title="Name") + model_name: str | None = Field(title="Model Name") + model_path: str | None = Field(title="Path") + model_url: str | None = Field(title="URL") + +class SDModelItem(BaseModel): + title: str = Field(title="Title") + model_name: str = Field(title="Model Name") + hash: str = Field(title="Hash") + filename: str = Field(title="Filename") + config: str = Field(title="Config file") + +class HypernetworkItem(BaseModel): + name: str = Field(title="Name") + path: str | None = Field(title="Path") + +class FaceRestorerItem(BaseModel): + name: str = Field(title="Name") + cmd_dir: str | None = Field(title="Path") + +class RealesrganItem(BaseModel): + name: str = Field(title="Name") + path: str | None = Field(title="Path") + scale: int | None = Field(title="Scale") + +class PromptStyleItem(BaseModel): + name: str = Field(title="Name") + prompt: str | None = Field(title="Prompt") + negative_prompt: str | None = Field(title="Negative Prompt") + +class ArtistItem(BaseModel): + name: str = Field(title="Name") + score: float = Field(title="Score") + category: str = Field(title="Category") \ No newline at end of file -- cgit v1.2.3 From b2c48091db394c2b7d375a33f18d90c924cd4363 Mon Sep 17 00:00:00 2001 From: Gur Date: Fri, 4 Nov 2022 06:55:03 +0800 Subject: fixed api compatibility with python 3.8 --- modules/api/models.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index 9ee42a17..29a934ba 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -6,6 +6,7 @@ from typing_extensions import Literal from inflection import underscore from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img from modules.shared import sd_upscalers +from typing import List API_NOT_ALLOWED = [ "self", @@ -109,12 +110,12 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( ).generate_model() class TextToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: dict info: str class ImageToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: dict info: str @@ -146,10 +147,10 @@ class FileData(BaseModel): name: str = Field(title="File name") class ExtrasBatchImagesRequest(ExtrasBaseRequest): - imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") + imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): - images: list[str] = Field(title="Images", description="The generated images in base64 format.") + images: List[str] = Field(title="Images", description="The generated images in base64 format.") class PNGInfoRequest(BaseModel): image: str = Field(title="Image", description="The base64 encoded PNG image") -- cgit v1.2.3 From 8eb64dab3e9e40531f6a3fa606a1c23a62987249 Mon Sep 17 00:00:00 2001 From: digburn <115176097+digburn@users.noreply.github.com> Date: Fri, 4 Nov 2022 00:35:18 +0000 Subject: fix: correct default val of upscale_first to False --- modules/api/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index 9069c0ac..68fb45c6 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -131,7 +131,7 @@ class ExtrasBaseRequest(BaseModel): upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") - upscale_first: bool = Field(default=True, title="Upscale first", description="Should the upscaler run before restoring faces?") + upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?") class ExtraBaseResponse(BaseModel): html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.") -- cgit v1.2.3 From ebce0c57c78a3f22178e3a38938d19ec0dfb703d Mon Sep 17 00:00:00 2001 From: Billy Cao Date: Sat, 5 Nov 2022 11:38:24 +0800 Subject: Use typing.Optional instead of | to add support for Python 3.9 and below. --- modules/api/models.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index 2ae75f43..a44c5ddd 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,6 +1,6 @@ import inspect from pydantic import BaseModel, Field, create_model -from typing import Any, Optional, Union +from typing import Any, Optional from typing_extensions import Literal from inflection import underscore from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img @@ -185,22 +185,22 @@ _options = vars(parser)['_option_string_actions'] for key in _options: if(_options[key].dest != 'help'): flag = _options[key] - _type = str - if(_options[key].default != None): _type = type(_options[key].default) + _type = str + if _options[key].default is not None: _type = type(_options[key].default) flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))}) FlagsModel = create_model("Flags", **flags) class SamplerItem(BaseModel): name: str = Field(title="Name") - aliases: list[str] = Field(title="Aliases") + aliases: list[str] = Field(title="Aliases") options: dict[str, str] = Field(title="Options") class UpscalerItem(BaseModel): name: str = Field(title="Name") - model_name: str | None = Field(title="Model Name") - model_path: str | None = Field(title="Path") - model_url: str | None = Field(title="URL") + model_name: Optional[str] = Field(title="Model Name") + model_path: Optional[str] = Field(title="Path") + model_url: Optional[str] = Field(title="URL") class SDModelItem(BaseModel): title: str = Field(title="Title") @@ -211,21 +211,21 @@ class SDModelItem(BaseModel): class HypernetworkItem(BaseModel): name: str = Field(title="Name") - path: str | None = Field(title="Path") + path: Optional[str] = Field(title="Path") class FaceRestorerItem(BaseModel): name: str = Field(title="Name") - cmd_dir: str | None = Field(title="Path") + cmd_dir: Optional[str] = Field(title="Path") class RealesrganItem(BaseModel): name: str = Field(title="Name") - path: str | None = Field(title="Path") - scale: int | None = Field(title="Scale") + path: Optional[str] = Field(title="Path") + scale: Optional[int] = Field(title="Scale") class PromptStyleItem(BaseModel): name: str = Field(title="Name") - prompt: str | None = Field(title="Prompt") - negative_prompt: str | None = Field(title="Negative Prompt") + prompt: Optional[str] = Field(title="Prompt") + negative_prompt: Optional[str] = Field(title="Negative Prompt") class ArtistItem(BaseModel): name: str = Field(title="Name") -- cgit v1.2.3 From a170e3d22231e145f42bb878a76ae5f76fdca230 Mon Sep 17 00:00:00 2001 From: Evgeniy Date: Sat, 5 Nov 2022 17:06:56 +0300 Subject: Python 3.8 typing compatibility Solves problems with ```Traceback (most recent call last): File "webui.py", line 201, in webui() File "webui.py", line 178, in webui create_api(app) File "webui.py", line 117, in create_api from modules.api.api import Api File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\api.py", line 9, in from modules.api.models import * File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\models.py", line 194, in class SamplerItem(BaseModel): File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\models.py", line 196, in SamplerItem aliases: list[str] = Field(title="Aliases") TypeError: 'type' object is not subscriptable``` and ```Traceback (most recent call last): File "webui.py", line 201, in webui() File "webui.py", line 178, in webui create_api(app) File "webui.py", line 117, in create_api from modules.api.api import Api File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\api.py", line 9, in from modules.api.models import * File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\models.py", line 194, in class SamplerItem(BaseModel): File "H:\AIart\stable-diffusion\stable-diffusion-webui\modules\api\models.py", line 197, in SamplerItem options: dict[str, str] = Field(title="Options") TypeError: 'type' object is not subscriptable``` --- modules/api/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index a44c5ddd..f89da1ff 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -5,7 +5,7 @@ from typing_extensions import Literal from inflection import underscore from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img from modules.shared import sd_upscalers, opts, parser -from typing import List +from typing import Dict, List API_NOT_ALLOWED = [ "self", @@ -193,8 +193,8 @@ FlagsModel = create_model("Flags", **flags) class SamplerItem(BaseModel): name: str = Field(title="Name") - aliases: list[str] = Field(title="Aliases") - options: dict[str, str] = Field(title="Options") + aliases: List[str] = Field(title="Aliases") + options: Dict[str, str] = Field(title="Options") class UpscalerItem(BaseModel): name: str = Field(title="Name") @@ -230,4 +230,4 @@ class PromptStyleItem(BaseModel): class ArtistItem(BaseModel): name: str = Field(title="Name") score: float = Field(title="Score") - category: str = Field(title="Category") \ No newline at end of file + category: str = Field(title="Category") -- cgit v1.2.3 From 99b05addb1c98169d78957f13efef308aef0af94 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 5 Nov 2022 18:46:47 -0300 Subject: Fix options endpoint not showing the full list of options --- modules/api/models.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index f89da1ff..0ea62155 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -168,9 +168,9 @@ class ProgressResponse(BaseModel): current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.") fields = {} -for key, value in opts.data.items(): - metadata = opts.data_labels.get(key) - optType = opts.typemap.get(type(value), type(value)) +for key, metadata in opts.data_labels.items(): + value = opts.data.get(key) + optType = opts.typemap.get(type(metadata.default), type(value)) if (metadata is not None): fields.update({key: (Optional[optType], Field( -- cgit v1.2.3 From 67c8e11be74180be19341aebbd6a246c37a79fbb Mon Sep 17 00:00:00 2001 From: snowmeow2 Date: Mon, 7 Nov 2022 02:32:06 +0800 Subject: Adding DeepDanbooru to the interrogation API --- modules/api/api.py | 16 ++++++++++++++-- modules/api/models.py | 1 + 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 688469ad..596a6616 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -15,6 +15,9 @@ from modules.sd_models import checkpoints_list from modules.realesrgan_model import get_realesrgan_models from typing import List +if shared.cmd_opts.deepdanbooru: + from modules.deepbooru import get_deepbooru_tags + def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) @@ -220,11 +223,20 @@ class Api: if image_b64 is None: raise HTTPException(status_code=404, detail="Image not found") - img = self.__base64_to_image(image_b64) + img = decode_base64_to_image(image_b64) + img = img.convert('RGB') # Override object param with self.queue_lock: - processed = shared.interrogator.interrogate(img) + if interrogatereq.model == "clip": + processed = shared.interrogator.interrogate(img) + elif interrogatereq.model == "deepdanbooru": + if shared.cmd_opts.deepdanbooru: + processed = get_deepbooru_tags(img) + else: + raise HTTPException(status_code=404, detail="Model not found. Add --deepdanbooru when launching for using the model.") + else: + raise HTTPException(status_code=404, detail="Model not found") return InterrogateResponse(caption=processed) diff --git a/modules/api/models.py b/modules/api/models.py index 34dbfa16..f9cd929e 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -170,6 +170,7 @@ class ProgressResponse(BaseModel): class InterrogateRequest(BaseModel): image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") + model: str = Field(default="clip", title="Model", description="The interrogate model used.") class InterrogateResponse(BaseModel): caption: str = Field(default=None, title="Caption", description="The generated caption for the image.") -- cgit v1.2.3 From c0355caefe3d82e304e6d832699d581fc8f9fbf9 Mon Sep 17 00:00:00 2001 From: Jim Hays Date: Wed, 14 Dec 2022 21:01:32 -0500 Subject: Fix various typos --- README.md | 4 ++-- javascript/contextMenus.js | 24 ++++++++++++------------ javascript/progressbar.js | 12 ++++++------ javascript/ui.js | 2 +- modules/api/api.py | 18 +++++++++--------- modules/api/models.py | 2 +- modules/images.py | 4 ++-- modules/processing.py | 14 +++++++------- modules/safe.py | 4 ++-- modules/scripts.py | 4 ++-- modules/sd_hijack_inpainting.py | 6 +++--- modules/sd_hijack_unet.py | 2 +- modules/textual_inversion/dataset.py | 10 +++++----- modules/textual_inversion/textual_inversion.py | 16 ++++++++-------- scripts/prompt_matrix.py | 10 +++++----- webui.py | 4 ++-- 16 files changed, 68 insertions(+), 68 deletions(-) (limited to 'modules/api/models.py') diff --git a/README.md b/README.md index 55990581..556000fb 100644 --- a/README.md +++ b/README.md @@ -82,8 +82,8 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - Use VAEs - Estimated completion time in progress bar - API -- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML. -- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) +- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML. +- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) - [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions ## Installation and Running diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index fe67c42e..11bcce1b 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -9,7 +9,7 @@ contextMenuInit = function(){ function showContextMenu(event,element,menuEntries){ let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; - let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; + let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; let oldMenu = gradioApp().querySelector('#context-menu') if(oldMenu){ @@ -61,15 +61,15 @@ contextMenuInit = function(){ } - function appendContextMenuOption(targetEmementSelector,entryName,entryFunction){ - - currentItems = menuSpecs.get(targetEmementSelector) - + function appendContextMenuOption(targetElementSelector,entryName,entryFunction){ + + currentItems = menuSpecs.get(targetElementSelector) + if(!currentItems){ currentItems = [] - menuSpecs.set(targetEmementSelector,currentItems); + menuSpecs.set(targetElementSelector,currentItems); } - let newItem = {'id':targetEmementSelector+'_'+uid(), + let newItem = {'id':targetElementSelector+'_'+uid(), 'name':entryName, 'func':entryFunction, 'isNew':true} @@ -97,7 +97,7 @@ contextMenuInit = function(){ if(source.id && source.id.indexOf('check_progress')>-1){ return } - + let oldMenu = gradioApp().querySelector('#context-menu') if(oldMenu){ oldMenu.remove() @@ -117,7 +117,7 @@ contextMenuInit = function(){ }) }); eventListenerApplied=true - + } return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener] @@ -152,8 +152,8 @@ addContextMenuEventListener = initResponse[2]; generateOnRepeat('#img2img_generate','#img2img_interrupt'); }) - let cancelGenerateForever = function(){ - clearInterval(window.generateOnRepeatInterval) + let cancelGenerateForever = function(){ + clearInterval(window.generateOnRepeatInterval) } appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever) @@ -162,7 +162,7 @@ addContextMenuEventListener = initResponse[2]; appendContextMenuOption('#img2img_generate', 'Cancel generate forever',cancelGenerateForever) appendContextMenuOption('#roll','Roll three', - function(){ + function(){ let rollbutton = get_uiCurrentTabContent().querySelector('#roll'); setTimeout(function(){rollbutton.click()},100) setTimeout(function(){rollbutton.click()},200) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index d58737c4..d6323ed9 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -3,7 +3,7 @@ global_progressbars = {} galleries = {} galleryObservers = {} -// this tracks laumnches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running +// this tracks launches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running timeoutIds = {} function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){ @@ -20,21 +20,21 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip var skip = id_skip ? gradioApp().getElementById(id_skip) : null var interrupt = gradioApp().getElementById(id_interrupt) - + if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){ if(progressbar.innerText){ let newtitle = '[' + progressbar.innerText.trim() + '] Stable Diffusion'; if(document.title != newtitle){ - document.title = newtitle; + document.title = newtitle; } }else{ let newtitle = 'Stable Diffusion' if(document.title != newtitle){ - document.title = newtitle; + document.title = newtitle; } } } - + if(progressbar!= null && progressbar != global_progressbars[id_progressbar]){ global_progressbars[id_progressbar] = progressbar @@ -63,7 +63,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip skip.style.display = "none" } interrupt.style.display = "none" - + //disconnect observer once generation finished, so user can close selected image if they want if (galleryObservers[id_gallery]) { galleryObservers[id_gallery].disconnect(); diff --git a/javascript/ui.js b/javascript/ui.js index 2cb280e5..587dd782 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -100,7 +100,7 @@ function create_submit_args(args){ // As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image. // This can lead to uploading a huge gallery of previously generated images, which leads to an unnecessary delay between submitting and beginning to generate. - // I don't know why gradio is seding outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some. + // I don't know why gradio is sending outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some. // If gradio at some point stops sending outputs, this may break something if(Array.isArray(res[res.length - 3])){ res[res.length - 3] = null diff --git a/modules/api/api.py b/modules/api/api.py index 89935a70..33845045 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -67,10 +67,10 @@ def encode_pil_to_base64(image): class Api: def __init__(self, app: FastAPI, queue_lock: Lock): if shared.cmd_opts.api_auth: - self.credenticals = dict() + self.credentials = dict() for auth in shared.cmd_opts.api_auth.split(","): user, password = auth.split(":") - self.credenticals[user] = password + self.credentials[user] = password self.router = APIRouter() self.app = app @@ -93,7 +93,7 @@ class Api: self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem]) self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem]) self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem]) - self.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem]) + self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str]) self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) @@ -102,9 +102,9 @@ class Api: return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs) return self.app.add_api_route(path, endpoint, **kwargs) - def auth(self, credenticals: HTTPBasicCredentials = Depends(HTTPBasic())): - if credenticals.username in self.credenticals: - if compare_digest(credenticals.password, self.credenticals[credenticals.username]): + def auth(self, credentials: HTTPBasicCredentials = Depends(HTTPBasic())): + if credentials.username in self.credentials: + if compare_digest(credentials.password, self.credentials[credentials.username]): return True raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"}) @@ -239,7 +239,7 @@ class Api: def interrogateapi(self, interrogatereq: InterrogateRequest): image_b64 = interrogatereq.image if image_b64 is None: - raise HTTPException(status_code=404, detail="Image not found") + raise HTTPException(status_code=404, detail="Image not found") img = decode_base64_to_image(image_b64) img = img.convert('RGB') @@ -252,7 +252,7 @@ class Api: processed = deepbooru.model.tag(img) else: raise HTTPException(status_code=404, detail="Model not found") - + return InterrogateResponse(caption=processed) def interruptapi(self): @@ -308,7 +308,7 @@ class Api: def get_realesrgan_models(self): return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)] - def get_promp_styles(self): + def get_prompt_styles(self): styleList = [] for k in shared.prompt_styles.styles: style = shared.prompt_styles.styles[k] diff --git a/modules/api/models.py b/modules/api/models.py index f77951fc..a22bc6b3 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -128,7 +128,7 @@ class ExtrasBaseRequest(BaseModel): upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.") upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") - upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?") + upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?") upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") diff --git a/modules/images.py b/modules/images.py index 8146f580..93a14289 100644 --- a/modules/images.py +++ b/modules/images.py @@ -429,7 +429,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory. basename (`str`): The base filename which will be applied to `filename pattern`. - seed, prompt, short_filename, + seed, prompt, short_filename, extension (`str`): Image file extension, default is `png`. pngsectionname (`str`): @@ -590,7 +590,7 @@ def read_info_from_image(image): Negative prompt: {json_info["uc"]} Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337""" except Exception: - print(f"Error parsing NovelAI iamge generation parameters:", file=sys.stderr) + print(f"Error parsing NovelAI image generation parameters:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) return geninfo, items diff --git a/modules/processing.py b/modules/processing.py index 24c537d1..fe7f4faf 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -147,11 +147,11 @@ class StableDiffusionProcessing(): # The "masked-image" in this case will just be all zeros since the entire image is masked. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning)) + image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning)) # Add the fake full 1s mask to the first dimension. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) - image_conditioning = image_conditioning.to(x.dtype) + image_conditioning = image_conditioning.to(x.dtype) return image_conditioning @@ -199,7 +199,7 @@ class StableDiffusionProcessing(): source_image * (1.0 - conditioning_mask), getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) ) - + # Encode the new masked image using first stage of network. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image)) @@ -537,7 +537,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: for n in range(p.n_iter): if state.skipped: state.skipped = False - + if state.interrupted: break @@ -612,7 +612,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image.info["parameters"] = text output_images.append(image) - del x_samples_ddim + del x_samples_ddim devices.torch_gc() @@ -704,7 +704,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2] - """saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images""" + """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" def save_intermediate(image, index): if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix: return @@ -720,7 +720,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") - # Avoid making the inpainting conditioning unless necessary as + # Avoid making the inpainting conditioning unless necessary as # this does need some extra compute to decode / encode the image again. if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0: image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples) diff --git a/modules/safe.py b/modules/safe.py index 10460ad0..20e9d2fa 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -80,7 +80,7 @@ def check_pt(filename, extra_handler): # new pytorch format is a zip file with zipfile.ZipFile(filename) as z: check_zip_filenames(filename, z.namelist()) - + # find filename of data.pkl in zip file: '/data.pkl' data_pkl_filenames = [f for f in z.namelist() if data_pkl_re.match(f)] if len(data_pkl_filenames) == 0: @@ -108,7 +108,7 @@ def load(filename, *args, **kwargs): def load_with_extra(filename, extra_handler=None, *args, **kwargs): """ - this functon is intended to be used by extensions that want to load models with + this function is intended to be used by extensions that want to load models with some extra classes in them that the usual unpickler would find suspicious. Use the extra_handler argument to specify a function that takes module and field name as text, diff --git a/modules/scripts.py b/modules/scripts.py index 23ca195d..722f8685 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -36,7 +36,7 @@ class Script: def ui(self, is_img2img): """this function should create gradio UI elements. See https://gradio.app/docs/#components The return value should be an array of all components that are used in processing. - Values of those returned componenbts will be passed to run() and process() functions. + Values of those returned components will be passed to run() and process() functions. """ pass @@ -47,7 +47,7 @@ class Script: This function should return: - False if the script should not be shown in UI at all - - True if the script should be shown in UI if it's scelected in the scripts drowpdown + - True if the script should be shown in UI if it's selected in the scripts dropdown - script.AlwaysVisible if the script should be shown in UI at all times """ diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 938f9a58..d72f83fd 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -209,7 +209,7 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) - + if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() @@ -278,7 +278,7 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) return x_prev, pred_x0, e_t - + # ================================================================================================= # Monkey patch LatentInpaintDiffusion to load the checkpoint with a proper config. # Adapted from: @@ -326,7 +326,7 @@ def do_inpainting_hijack(): # most of this stuff seems to no longer be needed because it is already included into SD2.0 # LatentInpaintDiffusion remains because SD2.0's LatentInpaintDiffusion can't be loaded without specifying a checkpoint # p_sample_plms is needed because PLMS can't work with dicts as conditionings - # this file should be cleaned up later if weverything tuens out to work fine + # this file should be cleaned up later if everything turns out to work fine # ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning ldm.models.diffusion.ddpm.LatentInpaintDiffusion = LatentInpaintDiffusion diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 1b9d7757..18daf8c1 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -4,7 +4,7 @@ import torch class TorchHijackForUnet: """ This is torch, but with cat that resizes tensors to appropriate dimensions if they do not match; - this makes it possible to create pictures with dimensions that are muliples of 8 rather than 64 + this makes it possible to create pictures with dimensions that are multiples of 8 rather than 64 """ def __getattr__(self, item): diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 2dc64c3c..88d68c76 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -28,9 +28,9 @@ class DatasetEntry: class PersonalizedBase(Dataset): - def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once'): + def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once'): re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None - + self.placeholder_token = placeholder_token self.width = width @@ -50,14 +50,14 @@ class PersonalizedBase(Dataset): self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)] - + self.shuffle_tags = shuffle_tags self.tag_drop_out = tag_drop_out print("Preparing dataset...") for path in tqdm.tqdm(self.image_paths): if shared.state.interrupted: - raise Exception("inturrupted") + raise Exception("interrupted") try: image = Image.open(path).convert('RGB').resize((self.width, self.height), PIL.Image.BICUBIC) except Exception: @@ -144,7 +144,7 @@ class PersonalizedDataLoader(DataLoader): self.collate_fn = collate_wrapper_random else: self.collate_fn = collate_wrapper - + class BatchLoader: def __init__(self, data): diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index e28c357a..daf3997b 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -133,7 +133,7 @@ class EmbeddingDatabase: process_file(fullfn, fn) except Exception: - print(f"Error loading emedding {fn}:", file=sys.stderr) + print(f"Error loading embedding {fn}:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) continue @@ -194,7 +194,7 @@ def write_loss(log_directory, filename, step, epoch_len, values): csv_writer.writeheader() epoch = (step - 1) // epoch_len - epoch_step = (step - 1) % epoch_len + epoch_step = (step - 1) % epoch_len csv_writer.writerow({ "step": step, @@ -270,9 +270,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed - + pin_memory = shared.opts.pin_memory - + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) latent_sampling_method = ds.latent_sampling_method @@ -295,12 +295,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ loss_step = 0 _loss_step = 0 #internal - + last_saved_file = "" last_saved_image = "" forced_filename = "" embedding_yet_to_be_embedded = False - + pbar = tqdm.tqdm(total=steps - initial_step) try: for i in range((steps-initial_step) * gradient_step): @@ -327,10 +327,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ c = shared.sd_model.cond_stage_model(batch.cond_text) loss = shared.sd_model(x, c)[0] / gradient_step del x - + _loss_step += loss.item() scaler.scale(loss).backward() - + # go back until we reach gradient accumulation steps if (j + 1) % gradient_step != 0: continue diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index c53ca28c..4c79eaef 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -18,7 +18,7 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell): ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys] hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs] - first_pocessed = None + first_processed = None state.job_count = len(xs) * len(ys) @@ -27,17 +27,17 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell): state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}" processed = cell(x, y) - if first_pocessed is None: - first_pocessed = processed + if first_processed is None: + first_processed = processed res.append(processed.images[0]) grid = images.image_grid(res, rows=len(ys)) grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts) - first_pocessed.images = [grid] + first_processed.images = [grid] - return first_pocessed + return first_processed class Script(scripts.Script): diff --git a/webui.py b/webui.py index c2d0c6be..4b32e77d 100644 --- a/webui.py +++ b/webui.py @@ -153,8 +153,8 @@ def webui(): # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for # an attacker to trick the user into opening a malicious HTML page, which makes a request to the - # running web ui and do whatever the attcker wants, including installing an extension and - # runnnig its code. We disable this here. Suggested by RyotaK. + # running web ui and do whatever the attacker wants, including installing an extension and + # running its code. We disable this here. Suggested by RyotaK. app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware'] setup_cors(app) -- cgit v1.2.3 From 5f1dfbbc959855fd90ba80c0c76301d2063772fa Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 24 Dec 2022 18:02:22 -0500 Subject: implement train api --- modules/api/api.py | 94 ++++++++++++++++++++++++++++++++++- modules/api/models.py | 9 ++++ modules/hypernetworks/hypernetwork.py | 26 ++++++++++ modules/hypernetworks/ui.py | 31 ++---------- 4 files changed, 132 insertions(+), 28 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index b43dd16b..1ceba75d 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -10,13 +10,17 @@ from fastapi.security import HTTPBasic, HTTPBasicCredentials from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru +from modules import sd_samplers, deepbooru, sd_hijack from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.extras import run_extras, run_pnginfo +from modules.textual_inversion.textual_inversion import create_embedding, train_embedding +from modules.textual_inversion.preprocess import preprocess +from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from PIL import PngImagePlugin,Image from modules.sd_models import checkpoints_list from modules.realesrgan_model import get_realesrgan_models +from modules import devices from typing import List def upscaler_to_index(name: str): @@ -97,6 +101,11 @@ class Api: self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str]) self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) + self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse) + self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse) + self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse) + self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) + self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) def add_api_route(self, path: str, endpoint, **kwargs): if shared.cmd_opts.api_auth: @@ -326,6 +335,89 @@ class Api: def refresh_checkpoints(self): shared.refresh_checkpoints() + def create_embedding(self, args: dict): + try: + shared.state.begin() + filename = create_embedding(**args) # create empty embedding + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used + shared.state.end() + return CreateResponse(info = "create embedding filename: {filename}".format(filename = filename)) + except AssertionError as e: + shared.state.end() + return TrainResponse(info = "create embedding error: {error}".format(error = e)) + + def create_hypernetwork(self, args: dict): + try: + shared.state.begin() + filename = create_hypernetwork(**args) # create empty embedding + shared.state.end() + return CreateResponse(info = "create hypernetwork filename: {filename}".format(filename = filename)) + except AssertionError as e: + shared.state.end() + return TrainResponse(info = "create hypernetwork error: {error}".format(error = e)) + + def preprocess(self, args: dict): + try: + shared.state.begin() + preprocess(**args) # quick operation unless blip/booru interrogation is enabled + shared.state.end() + return PreprocessResponse(info = 'preprocess complete') + except KeyError as e: + shared.state.end() + return PreprocessResponse(info = "preprocess error: invalid token: {error}".format(error = e)) + except AssertionError as e: + shared.state.end() + return PreprocessResponse(info = "preprocess error: {error}".format(error = e)) + except FileNotFoundError as e: + shared.state.end() + return PreprocessResponse(info = 'preprocess error: {error}'.format(error = e)) + + def train_embedding(self, args: dict): + try: + shared.state.begin() + apply_optimizations = shared.opts.training_xattention_optimizations + error = None + filename = '' + if not apply_optimizations: + sd_hijack.undo_optimizations() + try: + embedding, filename = train_embedding(**args) # can take a long time to complete + except Exception as e: + error = e + finally: + if not apply_optimizations: + sd_hijack.apply_optimizations() + shared.state.end() + return TrainResponse(info = "train embedding complete: filename: {filename} error: {error}".format(filename = filename, error = error)) + except AssertionError as msg: + shared.state.end() + return TrainResponse(info = "train embedding error: {msg}".format(msg = msg)) + + def train_hypernetwork(self, args: dict): + try: + shared.state.begin() + initial_hypernetwork = shared.loaded_hypernetwork + apply_optimizations = shared.opts.training_xattention_optimizations + error = None + filename = '' + if not apply_optimizations: + sd_hijack.undo_optimizations() + try: + hypernetwork, filename = train_hypernetwork(*args) + except Exception as e: + error = e + finally: + shared.loaded_hypernetwork = initial_hypernetwork + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + if not apply_optimizations: + sd_hijack.apply_optimizations() + shared.state.end() + return TrainResponse(info = "train embedding complete: filename: {filename} error: {error}".format(filename = filename, error = error)) + except AssertionError as msg: + shared.state.end() + return TrainResponse(info = "train embedding error: {error}".format(error = error)) + def launch(self, server_name, port): self.app.include_router(self.router) uvicorn.run(self.app, host=server_name, port=port) diff --git a/modules/api/models.py b/modules/api/models.py index a22bc6b3..c446ce7a 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -175,6 +175,15 @@ class InterrogateRequest(BaseModel): class InterrogateResponse(BaseModel): caption: str = Field(default=None, title="Caption", description="The generated caption for the image.") +class TrainResponse(BaseModel): + info: str = Field(title="Train info", description="Response string from train embedding or hypernetwork task.") + +class CreateResponse(BaseModel): + info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.") + +class PreprocessResponse(BaseModel): + info: str = Field(title="Preprocess info", description="Response string from preprocessing task.") + fields = {} for key, metadata in opts.data_labels.items(): value = opts.data.get(key) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index c406ffb3..3182ff03 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -378,6 +378,32 @@ def report_statistics(loss_info:dict): print(e) +def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False): + # Remove illegal characters from name. + name = "".join( x for x in name if (x.isalnum() or x in "._- ")) + + fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt") + if not overwrite_old: + assert not os.path.exists(fn), f"file {fn} already exists" + + if type(layer_structure) == str: + layer_structure = [float(x.strip()) for x in layer_structure.split(",")] + + hypernet = modules.hypernetworks.hypernetwork.Hypernetwork( + name=name, + enable_sizes=[int(x) for x in enable_sizes], + layer_structure=layer_structure, + activation_func=activation_func, + weight_init=weight_init, + add_layer_norm=add_layer_norm, + use_dropout=use_dropout, + ) + hypernet.save(fn) + + shared.reload_hypernetworks() + + return fn + def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # images allows training previews to have infotext. Importing it at the top causes a circular import problem. diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index c2d4b51c..e7f9e593 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -3,39 +3,16 @@ import os import re import gradio as gr -import modules.textual_inversion.preprocess -import modules.textual_inversion.textual_inversion +import modules.hypernetworks.hypernetwork from modules import devices, sd_hijack, shared -from modules.hypernetworks import hypernetwork not_available = ["hardswish", "multiheadattention"] -keys = list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) +keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False): - # Remove illegal characters from name. - name = "".join( x for x in name if (x.isalnum() or x in "._- ")) + filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout) - fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt") - if not overwrite_old: - assert not os.path.exists(fn), f"file {fn} already exists" - - if type(layer_structure) == str: - layer_structure = [float(x.strip()) for x in layer_structure.split(",")] - - hypernet = modules.hypernetworks.hypernetwork.Hypernetwork( - name=name, - enable_sizes=[int(x) for x in enable_sizes], - layer_structure=layer_structure, - activation_func=activation_func, - weight_init=weight_init, - add_layer_norm=add_layer_norm, - use_dropout=use_dropout, - ) - hypernet.save(fn) - - shared.reload_hypernetworks() - - return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {fn}", "" + return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", "" def train_hypernetwork(*args): -- cgit v1.2.3 From b5819d9bf1794071139c640b5f1e72c84a0e051a Mon Sep 17 00:00:00 2001 From: Philpax Date: Mon, 2 Jan 2023 10:17:33 +1100 Subject: feat(api): add /sdapi/v1/embeddings --- modules/api/api.py | 8 ++++++++ modules/api/models.py | 3 +++ 2 files changed, 11 insertions(+) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 11daff0d..30bf3dac 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -100,6 +100,7 @@ class Api: self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str]) self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) + self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse) self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse) @@ -327,6 +328,13 @@ class Api: def get_artists(self): return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists] + def get_embeddings(self): + db = sd_hijack.model_hijack.embedding_db + return { + "loaded": sorted(db.word_embeddings.keys()), + "skipped": sorted(db.skipped_embeddings), + } + def refresh_checkpoints(self): shared.refresh_checkpoints() diff --git a/modules/api/models.py b/modules/api/models.py index c446ce7a..a8472dc9 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -249,3 +249,6 @@ class ArtistItem(BaseModel): score: float = Field(title="Score") category: str = Field(title="Category") +class EmbeddingsResponse(BaseModel): + loaded: List[str] = Field(title="Loaded", description="Embeddings loaded for the current model") + skipped: List[str] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file -- cgit v1.2.3 From c65909ad16a1962129114c6251de092f49479b06 Mon Sep 17 00:00:00 2001 From: Philpax Date: Mon, 2 Jan 2023 12:21:22 +1100 Subject: feat(api): return more data for embeddings --- modules/api/api.py | 17 +++++++++++++++-- modules/api/models.py | 11 +++++++++-- modules/textual_inversion/textual_inversion.py | 8 ++++---- 3 files changed, 28 insertions(+), 8 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 30bf3dac..9c670f00 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -330,9 +330,22 @@ class Api: def get_embeddings(self): db = sd_hijack.model_hijack.embedding_db + + def convert_embedding(embedding): + return { + "step": embedding.step, + "sd_checkpoint": embedding.sd_checkpoint, + "sd_checkpoint_name": embedding.sd_checkpoint_name, + "shape": embedding.shape, + "vectors": embedding.vectors, + } + + def convert_embeddings(embeddings): + return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()} + return { - "loaded": sorted(db.word_embeddings.keys()), - "skipped": sorted(db.skipped_embeddings), + "loaded": convert_embeddings(db.word_embeddings), + "skipped": convert_embeddings(db.skipped_embeddings), } def refresh_checkpoints(self): diff --git a/modules/api/models.py b/modules/api/models.py index a8472dc9..4a632c68 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -249,6 +249,13 @@ class ArtistItem(BaseModel): score: float = Field(title="Score") category: str = Field(title="Category") +class EmbeddingItem(BaseModel): + step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available") + sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available") + sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead") + shape: int = Field(title="Shape", description="The length of each individual vector in the embedding") + vectors: int = Field(title="Vectors", description="The number of vectors in the embedding") + class EmbeddingsResponse(BaseModel): - loaded: List[str] = Field(title="Loaded", description="Embeddings loaded for the current model") - skipped: List[str] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file + loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") + skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 1e5722e7..fd253477 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -59,7 +59,7 @@ class EmbeddingDatabase: def __init__(self, embeddings_dir): self.ids_lookup = {} self.word_embeddings = {} - self.skipped_embeddings = [] + self.skipped_embeddings = {} self.dir_mtime = None self.embeddings_dir = embeddings_dir self.expected_shape = -1 @@ -91,7 +91,7 @@ class EmbeddingDatabase: self.dir_mtime = mt self.ids_lookup.clear() self.word_embeddings.clear() - self.skipped_embeddings = [] + self.skipped_embeddings.clear() self.expected_shape = self.get_expected_shape() def process_file(path, filename): @@ -136,7 +136,7 @@ class EmbeddingDatabase: if self.expected_shape == -1 or self.expected_shape == embedding.shape: self.register_embedding(embedding, shared.sd_model) else: - self.skipped_embeddings.append(name) + self.skipped_embeddings[name] = embedding for fn in os.listdir(self.embeddings_dir): try: @@ -153,7 +153,7 @@ class EmbeddingDatabase: print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}") if len(self.skipped_embeddings) > 0: - print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings)}") + print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}") def find_embedding_at_position(self, tokens, offset): token = tokens[offset] -- cgit v1.2.3 From 1288a3bb7d21064e5bd0af7158a3840886027c51 Mon Sep 17 00:00:00 2001 From: Suffocate <70031311+lolsuffocate@users.noreply.github.com> Date: Wed, 4 Jan 2023 20:36:30 +0000 Subject: Use the read_info_from_image function directly --- modules/api/api.py | 16 ++++++++++++---- modules/api/models.py | 5 +++-- 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 48a70a44..2103709b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -11,10 +11,10 @@ from fastapi.security import HTTPBasic, HTTPBasicCredentials from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack +from modules import sd_samplers, deepbooru, sd_hijack, images from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -from modules.extras import run_extras, run_pnginfo +from modules.extras import run_extras from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork @@ -233,9 +233,17 @@ class Api: if(not req.image.strip()): return PNGInfoResponse(info="") - result = run_pnginfo(decode_base64_to_image(req.image.strip())) + image = decode_base64_to_image(req.image.strip()) + if image is None: + return PNGInfoResponse(info="") + + geninfo, items = images.read_info_from_image(image) + if geninfo is None: + geninfo = "" + + items = {**{'parameters': geninfo}, **items} - return PNGInfoResponse(info=result[1]) + return PNGInfoResponse(info=geninfo, items=items) def progressapi(self, req: ProgressRequest = Depends()): # copy from check_progress_call of ui.py diff --git a/modules/api/models.py b/modules/api/models.py index 4a632c68..d8198a27 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -157,7 +157,8 @@ class PNGInfoRequest(BaseModel): image: str = Field(title="Image", description="The base64 encoded PNG image") class PNGInfoResponse(BaseModel): - info: str = Field(title="Image info", description="A string with all the info the image had") + info: str = Field(title="Image info", description="A string with the parameters used to generate the image") + items: dict = Field(title="Items", description="An object containing all the info the image had") class ProgressRequest(BaseModel): skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization") @@ -258,4 +259,4 @@ class EmbeddingItem(BaseModel): class EmbeddingsResponse(BaseModel): loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") - skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file + skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") -- cgit v1.2.3 From b5253f0dab529707f1fe2e11211a10ce2f264617 Mon Sep 17 00:00:00 2001 From: noodleanon <122053346+noodleanon@users.noreply.github.com> Date: Thu, 5 Jan 2023 21:21:48 +0000 Subject: allow img2img api to run scripts --- modules/api/api.py | 27 ++++++++++++++++++++++++--- modules/api/models.py | 2 +- modules/processing.py | 4 ++-- 3 files changed, 27 insertions(+), 6 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 2103709b..aa62a42e 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -11,7 +11,7 @@ from fastapi.security import HTTPBasic, HTTPBasicCredentials from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack, images +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.extras import run_extras @@ -28,8 +28,13 @@ def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) except: - raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}") + raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}") +def script_name_to_index(name, scripts): + try: + return [script.title().lower() for script in scripts].index(name.lower()) + except: + raise HTTPException(status_code=422, detail=f"Script '{name}' not found") def validate_sampler_name(name): config = sd_samplers.all_samplers_map.get(name, None) @@ -170,6 +175,14 @@ class Api: if init_images is None: raise HTTPException(status_code=404, detail="Init image not found") + if img2imgreq.script_name is not None: + if scripts.scripts_img2img.scripts == []: + scripts.scripts_img2img.initialize_scripts(True) + ui.create_ui() + + script_idx = script_name_to_index(img2imgreq.script_name, scripts.scripts_img2img.selectable_scripts) + script = scripts.scripts_img2img.selectable_scripts[script_idx] + mask = img2imgreq.mask if mask: mask = decode_base64_to_image(mask) @@ -186,13 +199,21 @@ class Api: args = vars(populate) args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine. + args.pop('script_name', None) with self.queue_lock: p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args) p.init_images = [decode_base64_to_image(x) for x in init_images] shared.state.begin() - processed = process_images(p) + if 'script' in locals(): + p.outpath_grids = opts.outdir_img2img_grids + p.outpath_samples = opts.outdir_img2img_samples + p.script_args = [script_idx + 1] + [None] * (script.args_from - 1) + p.script_args + processed = scripts.scripts_img2img.run(p, *p.script_args) + else: + processed = process_images(p) + shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) diff --git a/modules/api/models.py b/modules/api/models.py index d8198a27..862477e7 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -106,7 +106,7 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( "StableDiffusionProcessingImg2Img", StableDiffusionProcessingImg2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}] + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}] ).generate_model() class TextToImageResponse(BaseModel): diff --git a/modules/processing.py b/modules/processing.py index a408d622..d5ac7eb1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -98,7 +98,7 @@ class StableDiffusionProcessing(): """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing """ - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -149,7 +149,7 @@ class StableDiffusionProcessing(): self.seed_resize_from_w = 0 self.scripts = None - self.script_args = None + self.script_args = script_args self.all_prompts = None self.all_negative_prompts = None self.all_seeds = None -- cgit v1.2.3 From 82c1f10b144f733460feead0bdc37a861489dc57 Mon Sep 17 00:00:00 2001 From: Dean Hopkins Date: Fri, 6 Jan 2023 22:00:12 +0000 Subject: increase upscale api validation limit --- modules/api/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index f77951fc..22b88c59 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -125,7 +125,7 @@ class ExtrasBaseRequest(BaseModel): gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.") codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.") codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.") - upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.") + upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.") upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?") -- cgit v1.2.3 From 47534577eda63b0db1eeb8921c2a161773ec434c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 7 Jan 2023 07:51:35 -0500 Subject: api-get-memory --- modules/api/api.py | 37 +++++++++++++++++++++++++++++++++++++ modules/api/models.py | 4 ++++ 2 files changed, 41 insertions(+) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 2103709b..d2222b18 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -130,6 +130,7 @@ class Api: self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse) self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) + self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse) def add_api_route(self, path: str, endpoint, **kwargs): if shared.cmd_opts.api_auth: @@ -465,6 +466,42 @@ class Api: shared.state.end() return TrainResponse(info = "train embedding error: {error}".format(error = error)) + def get_memory(self): + def gb(val: float): + return round(val / 1024 / 1024 / 1024, 2) + try: + import os, psutil + process = psutil.Process(os.getpid()) + res = process.memory_info() + ram_total = 100 * res.rss / process.memory_percent() + ram = { 'free': gb(ram_total - res.rss), 'used': gb(res.rss), 'total': gb(ram_total) } + except Exception as err: + ram = { 'error': f'{err}' } + try: + import torch + if torch.cuda.is_available(): + s = torch.cuda.mem_get_info() + system = { 'free': gb(s[0]), 'used': gb(s[1] - s[0]), 'total': gb(s[1]) } + s = dict(torch.cuda.memory_stats(shared.device)) + allocated = { 'current': gb(s['allocated_bytes.all.current']), 'peak': gb(s['allocated_bytes.all.peak']) } + reserved = { 'current': gb(s['reserved_bytes.all.current']), 'peak': gb(s['reserved_bytes.all.peak']) } + active = { 'current': gb(s['active_bytes.all.current']), 'peak': gb(s['active_bytes.all.peak']) } + inactive = { 'current': gb(s['inactive_split_bytes.all.current']), 'peak': gb(s['inactive_split_bytes.all.peak']) } + warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] } + cuda = { + 'system': system, + 'active': active, + 'allocated': allocated, + 'reserved': reserved, + 'inactive': inactive, + 'events': warnings, + } + else: + cuda = { 'error': 'unavailable' } + except Exception as err: + cuda = { 'error': f'{err}' } + return MemoryResponse(ram = ram, cuda = cuda) + def launch(self, server_name, port): self.app.include_router(self.router) uvicorn.run(self.app, host=server_name, port=port) diff --git a/modules/api/models.py b/modules/api/models.py index 5fa63774..49bf1e7a 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -260,3 +260,7 @@ class EmbeddingItem(BaseModel): class EmbeddingsResponse(BaseModel): loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") + +class MemoryResponse(BaseModel): + ram: dict[str, str] | dict[str, float] = Field(title="RAM", description="System memory stats") + cuda: dict[str, str] | dict[str, dict] = Field(title="CUDA", description="nVidia CUDA memory stats") -- cgit v1.2.3 From d38ede71d5330958f4bbac5f99c1be3c146b506a Mon Sep 17 00:00:00 2001 From: noodleanon <122053346+noodleanon@users.noreply.github.com> Date: Sat, 7 Jan 2023 14:21:31 +0000 Subject: Added script support in txt2img endpoint --- modules/api/api.py | 22 +++++++++++++++++++--- modules/api/models.py | 2 +- 2 files changed, 20 insertions(+), 4 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index aa62a42e..0e8ea263 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -149,6 +149,14 @@ class Api: raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"}) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): + if txt2imgreq.script_name is not None: + if scripts.scripts_txt2img.scripts == []: + scripts.scripts_txt2img.initialize_scripts(True) + ui.create_ui() + + script_idx = script_name_to_index(txt2imgreq.script_name, scripts.scripts_txt2img.selectable_scripts) + script = scripts.scripts_txt2img.selectable_scripts[script_idx] + populate = txt2imgreq.copy(update={ # Override __init__ params "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index), "do_not_save_samples": True, @@ -158,11 +166,20 @@ class Api: if populate.sampler_name: populate.sampler_index = None # prevent a warning later on + args = vars(populate) + args.pop('script_name', None) + with self.queue_lock: - p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **vars(populate)) + p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args) shared.state.begin() - processed = process_images(p) + if 'script' in locals(): + p.outpath_grids = opts.outdir_txt2img_grids + p.outpath_samples = opts.outdir_txt2img_samples + p.script_args = [script_idx + 1] + [None] * (script.args_from - 1) + p.script_args + processed = scripts.scripts_txt2img.run(p, *p.script_args) + else: + processed = process_images(p) shared.state.end() @@ -213,7 +230,6 @@ class Api: processed = scripts.scripts_img2img.run(p, *p.script_args) else: processed = process_images(p) - shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) diff --git a/modules/api/models.py b/modules/api/models.py index c85eb94d..ce43c858 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -100,7 +100,7 @@ class PydanticModelGenerator: StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( "StableDiffusionProcessingTxt2Img", StableDiffusionProcessingTxt2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}] + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}] ).generate_model() StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( -- cgit v1.2.3 From 2275f130bfe437c3245a66559f92af94d0e4d8ff Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Mon, 9 Jan 2023 21:23:58 -0500 Subject: relax reponse type check enforcement --- modules/api/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/models.py b/modules/api/models.py index 880edde6..034b4aa0 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -262,5 +262,5 @@ class EmbeddingsResponse(BaseModel): skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") class MemoryResponse(BaseModel): - ram: dict[str, str] | dict[str, float] = Field(title="RAM", description="System memory stats") - cuda: dict[str, str] | dict[str, dict] = Field(title="CUDA", description="nVidia CUDA memory stats") + ram: dict = Field(title="RAM", description="System memory stats") + cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats") -- cgit v1.2.3 From 39ea251945d70efcf9b59d44eb0e71269d754aa4 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 11 Jan 2023 10:23:51 -0500 Subject: add textinfo to progress response --- modules/api/api.py | 4 ++-- modules/api/models.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'modules/api/models.py') diff --git a/modules/api/api.py b/modules/api/api.py index 6c564ad8..5767ba90 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -286,7 +286,7 @@ class Api: # copy from check_progress_call of ui.py if shared.state.job_count == 0: - return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict()) + return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) # avoid dividing zero progress = 0.01 @@ -308,7 +308,7 @@ class Api: if shared.state.current_image and not req.skip_current_image: current_image = encode_pil_to_base64(shared.state.current_image) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image) + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) def interrogateapi(self, interrogatereq: InterrogateRequest): image_b64 = interrogatereq.image diff --git a/modules/api/models.py b/modules/api/models.py index 034b4aa0..c78095ca 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -168,6 +168,7 @@ class ProgressResponse(BaseModel): eta_relative: float = Field(title="ETA in secs") state: dict = Field(title="State", description="The current state snapshot") current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.") + textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.") class InterrogateRequest(BaseModel): image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") -- cgit v1.2.3