aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--modules/api/api.py60
-rw-r--r--modules/processing.py2
-rw-r--r--modules/shared.py2
-rw-r--r--webui.py69
4 files changed, 102 insertions, 31 deletions
diff --git a/modules/api/api.py b/modules/api/api.py
new file mode 100644
index 00000000..9d7c699d
--- /dev/null
+++ b/modules/api/api.py
@@ -0,0 +1,60 @@
+from modules.api.processing import StableDiffusionProcessingAPI
+from modules.processing import StableDiffusionProcessingTxt2Img, process_images
+import modules.shared as shared
+import uvicorn
+from fastapi import FastAPI, Body, APIRouter
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel, Field, Json
+import json
+import io
+import base64
+
+app = FastAPI()
+
+class TextToImageResponse(BaseModel):
+ images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
+ parameters: Json
+ info: Json
+
+
+class Api:
+ def __init__(self, txt2img, img2img, run_extras, run_pnginfo):
+ self.router = APIRouter()
+ app.add_api_route("/v1/txt2img", self.text2imgapi, methods=["POST"])
+
+ def text2imgapi(self, txt2imgreq: StableDiffusionProcessingAPI ):
+ print(txt2imgreq)
+ p = StableDiffusionProcessingTxt2Img(**vars(txt2imgreq))
+ p.sd_model = shared.sd_model
+ print(p)
+ processed = process_images(p)
+
+ b64images = []
+ for i in processed.images:
+ buffer = io.BytesIO()
+ i.save(buffer, format="png")
+ b64images.append(base64.b64encode(buffer.getvalue()))
+
+ response = {
+ "images": b64images,
+ "info": processed.js(),
+ "parameters": json.dumps(vars(txt2imgreq))
+ }
+
+
+ return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info))
+
+
+
+ def img2imgendoint(self):
+ raise NotImplementedError
+
+ def extrasendoint(self):
+ raise NotImplementedError
+
+ def pnginfoendoint(self):
+ raise NotImplementedError
+
+ def launch(self, server_name, port):
+ app.include_router(self.router)
+ uvicorn.run(app, host=server_name, port=port) \ No newline at end of file
diff --git a/modules/processing.py b/modules/processing.py
index deb6125e..4a7c6ccc 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -723,4 +723,4 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
del x
devices.torch_gc()
- return samples
+ return samples \ No newline at end of file
diff --git a/modules/shared.py b/modules/shared.py
index c2775603..6c6405fd 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -74,7 +74,7 @@ parser.add_argument("--disable-console-progressbars", action='store_true', help=
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
-
+parser.add_argument("--api", action='store_true', help="use api=True to launch the api instead of the webui")
cmd_opts = parser.parse_args()
restricted_opts = [
diff --git a/webui.py b/webui.py
index fe0ce321..cd8a99ea 100644
--- a/webui.py
+++ b/webui.py
@@ -97,40 +97,51 @@ def webui():
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
+
+ if cmd_opts.api:
+ from modules.api.api import Api
+ api = Api(txt2img=modules.txt2img.txt2img,
+ img2img=modules.img2img.img2img,
+ run_extras=modules.extras.run_extras,
+ run_pnginfo=modules.extras.run_pnginfo)
- while 1:
-
- demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
-
- app, local_url, share_url = demo.launch(
- share=cmd_opts.share,
- server_name="0.0.0.0" if cmd_opts.listen else None,
- server_port=cmd_opts.port,
- debug=cmd_opts.gradio_debug,
- auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
- inbrowser=cmd_opts.autolaunch,
- prevent_thread_lock=True
- )
-
- app.add_middleware(GZipMiddleware, minimum_size=1000)
+ api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1",
+ port=cmd_opts.port if cmd_opts.port else 7861)
+ else:
while 1:
- time.sleep(0.5)
- if getattr(demo, 'do_restart', False):
- time.sleep(0.5)
- demo.close()
- time.sleep(0.5)
- break
- sd_samplers.set_samplers()
+ demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
- print('Reloading Custom Scripts')
- modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
- print('Reloading modules: modules.ui')
- importlib.reload(modules.ui)
- print('Refreshing Model List')
- modules.sd_models.list_models()
- print('Restarting Gradio')
+ app, local_url, share_url = demo.launch(
+ share=cmd_opts.share,
+ server_name="0.0.0.0" if cmd_opts.listen else None,
+ server_port=cmd_opts.port,
+ debug=cmd_opts.gradio_debug,
+ auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
+ inbrowser=cmd_opts.autolaunch,
+ prevent_thread_lock=True
+ )
+
+ app.add_middleware(GZipMiddleware, minimum_size=1000)
+
+ while 1:
+ time.sleep(0.5)
+ if getattr(demo, 'do_restart', False):
+ time.sleep(0.5)
+ demo.close()
+ time.sleep(0.5)
+ break
+
+ sd_samplers.set_samplers()
+
+ print('Reloading Custom Scripts')
+ modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
+ print('Reloading modules: modules.ui')
+ importlib.reload(modules.ui)
+ print('Refreshing Model List')
+ modules.sd_models.list_models()
+ print('Restarting Gradio')
if __name__ == "__main__":