diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2022-09-08 08:46:51 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-09-08 08:46:51 +0000 |
commit | a196c45f15dc088e7162476729cf3bf28b0b3d73 (patch) | |
tree | e90b392fefa197780427abbfb2ce0d19ae62b109 | |
parent | 27dfcf69daf10b0c1e9b1844bb630dcc1898af50 (diff) | |
parent | cce6f1df4117dc90cb16a07dc1634f0203c6bc0d (diff) | |
download | stable-diffusion-webui-gfx803-a196c45f15dc088e7162476729cf3bf28b0b3d73.tar.gz stable-diffusion-webui-gfx803-a196c45f15dc088e7162476729cf3bf28b0b3d73.tar.bz2 stable-diffusion-webui-gfx803-a196c45f15dc088e7162476729cf3bf28b0b3d73.zip |
Merge pull request #146 from orionaskatu/orionaskatu-port-option
--port option for #131
-rw-r--r-- | README.md | 4 | ||||
-rw-r--r-- | modules/shared.py | 1 | ||||
-rw-r--r-- | webui.py | 2 |
3 files changed, 6 insertions, 1 deletions
@@ -100,6 +100,10 @@ program in collabs. Use `--listen` to make the server listen to network connections. This will allow computers on local newtork
to access the UI, and if you configure port forwarding, also computers on the internet.
+Use `--port xxxx` to make the server listen on a specific port, xxxx being the wanted port. Remember that
+all ports below 1024 needs root/admin rights, for this reason it is advised to use a port above 1024.
+Defaults to port 7860 if available.
+
### Textual Inversion
To make use of pretrained embeddings, create `embeddings` directory (in the same palce as `webui.py`)
and put your embeddings into it. They must be .pt files, each with only one trained embedding,
diff --git a/modules/shared.py b/modules/shared.py index e529ec27..de7cbf02 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -34,6 +34,7 @@ parser.add_argument("--share", action='store_true', help="use share=True for gra parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
parser.add_argument("--opt-split-attention", action='store_true', help="enable optimization that reduced vram usage by a lot for about 10%% decrease in performance")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
+parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
cmd_opts = parser.parse_args()
if torch.has_cuda:
@@ -191,4 +191,4 @@ if __name__ == "__main__": run_pnginfo=run_pnginfo
)
- demo.launch(share=cmd_opts.share, server_name="0.0.0.0" if cmd_opts.listen else None)
+ demo.launch(share=cmd_opts.share, server_name="0.0.0.0" if cmd_opts.listen else None, server_port=cmd_opts.port)
|