diff options
64 files changed, 2590 insertions, 8537 deletions
@@ -70,7 +70,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - separate prompts using uppercase `AND`
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
-- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args)
+- DeepDanbooru integration, creates danbooru style tags for anime prompts
- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args)
- via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI
- Generate forever option
@@ -84,26 +84,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - API
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
-
-## Where are Aesthetic Gradients?!?!
-Aesthetic Gradients are now an extension. You can install it using git:
-
-```commandline
-git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients extensions/aesthetic-gradients
-```
-
-After running this command, make sure that you have `aesthetic-gradients` dir in webui's `extensions` directory and restart
-the UI. The interface for Aesthetic Gradients should appear exactly the same as it was.
-
-## Where is History/Image browser?!?!
-Image browser is now an extension. You can install it using git:
-
-```commandline
-git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser extensions/images-browser
-```
-
-After running this command, make sure that you have `images-browser` dir in webui's `extensions` directory and restart
-the UI. The interface for Image browser should appear exactly the same as it was.
+- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
diff --git a/javascript/generationParams.js b/javascript/generationParams.js new file mode 100644 index 00000000..95f05093 --- /dev/null +++ b/javascript/generationParams.js @@ -0,0 +1,33 @@ +// attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes + +let txt2img_gallery, img2img_gallery, modal = undefined; +onUiUpdate(function(){ + if (!txt2img_gallery) { + txt2img_gallery = attachGalleryListeners("txt2img") + } + if (!img2img_gallery) { + img2img_gallery = attachGalleryListeners("img2img") + } + if (!modal) { + modal = gradioApp().getElementById('lightboxModal') + modalObserver.observe(modal, { attributes : true, attributeFilter : ['style'] }); + } +}); + +let modalObserver = new MutationObserver(function(mutations) { + mutations.forEach(function(mutationRecord) { + let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText + if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img') + gradioApp().getElementById(selectedTab+"_generation_info_button").click() + }); +}); + +function attachGalleryListeners(tab_name) { + gallery = gradioApp().querySelector('#'+tab_name+'_gallery') + gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name+"_generation_info_button").click()); + gallery?.addEventListener('keydown', (e) => { + if (e.keyCode == 37 || e.keyCode == 39) // left or right arrow + gradioApp().getElementById(tab_name+"_generation_info_button").click() + }); + return gallery; +} @@ -105,22 +105,26 @@ def version_check(commit): print("version check failed", e)
+def run_extension_installer(extension_dir):
+ path_installer = os.path.join(extension_dir, "install.py")
+ if not os.path.isfile(path_installer):
+ return
+
+ try:
+ env = os.environ.copy()
+ env['PYTHONPATH'] = os.path.abspath(".")
+
+ print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
+ except Exception as e:
+ print(e, file=sys.stderr)
+
+
def run_extensions_installers():
if not os.path.isdir(dir_extensions):
return
for dirname_extension in os.listdir(dir_extensions):
- path_installer = os.path.join(dir_extensions, dirname_extension, "install.py")
- if not os.path.isfile(path_installer):
- continue
-
- try:
- env = os.environ.copy()
- env['PYTHONPATH'] = os.path.abspath(".")
-
- print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env))
- except Exception as e:
- print(e, file=sys.stderr)
+ run_extension_installer(os.path.join(dir_extensions, dirname_extension))
def prepare_enviroment():
@@ -130,19 +134,19 @@ def prepare_enviroment(): gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
- deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@d91a2963bf87c6a770d74894667e9ffa9f6de7ff")
+ openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b")
xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
- stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git")
- taming_transformers_repo = os.environ.get('TAMING_REANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
+ stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
+ taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
- codeformer_repo = os.environ.get('CODEFORMET_REPO', 'https://github.com/sczhou/CodeFormer.git')
+ codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
- stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
+ stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "47b6b607fdd31875c9279cd2f4f16b92e4ea958e")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
- k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "60e5042ca0da89c14d1dd59d73883280f8fce991")
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
@@ -154,7 +158,6 @@ def prepare_enviroment(): sys.argv, update_check = extract_arg(sys.argv, '--update-check')
sys.argv, run_tests = extract_arg(sys.argv, '--tests')
xformers = '--xformers' in sys.argv
- deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv
try:
@@ -177,6 +180,9 @@ def prepare_enviroment(): if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
+ if not is_installed("open_clip"):
+ run_pip(f"install {openclip_package}", "open_clip")
+
if (not is_installed("xformers") or reinstall_xformers) and xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
@@ -189,15 +195,12 @@ def prepare_enviroment(): elif platform.system() == "Linux":
run_pip("install xformers", "xformers")
- if not is_installed("deepdanbooru") and deepdanbooru:
- run_pip(f"install {deepdanbooru_package}#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
-
if not is_installed("pyngrok") and ngrok:
run_pip("install pyngrok", "ngrok")
os.makedirs(dir_repos, exist_ok=True)
- git_clone(stable_diffusion_repo, repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
+ git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
diff --git a/localizations/ar_AR.json b/localizations/ar_AR.json deleted file mode 100644 index abbbcff4..00000000 --- a/localizations/ar_AR.json +++ /dev/null @@ -1,518 +0,0 @@ -{ - "rtl": true, - "Loading...": "لحظة...", - "view": "اعرض ", - "api": "واجهة البرمجة", - "built with gradio": "مبني باستخدام gradio", - "Stable Diffusion checkpoint": "أوزان نموذج الإنتشار المسقر", - "txt2img": "نص إلى صورة", - "Prompt": "الطلب", - "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "الطلب (لبدء الإنتاج Ctrl+Enter أو Alt+Enter اضغط)", - "Negative prompt": "عكس الطلب", - "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "عكس الطلب (لبدء الإنتاج Ctrl+Enter أو Alt+Enter اضغط)", - "Add a random artist to the prompt.": "أضف فنان عشوائي للطلب", - "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "اقرأ عوامل الإنتاج من الطلب أو من الإنتاج السابق إذا كان الطلب فارغا", - "Save style": "احتفظ بالطلب وعكسه كإضافة", - "Apply selected styles to current prompt": "ألحق الإضافات المحددة إلى الطلب وعكسه", - "Generate": "أنتج", - "Skip": "تخطى", - "Stop processing current image and continue processing.": "لا تكمل خطوات هذة الحزمة وانتقل إلى الحزمة التالية", - "Interrupt": "توقف", - "Stop processing images and return any results accumulated so far.": "توقف عن الإنتاج واعرض ما تم إلى الآن", - "Style 1": "الإضافة 1", - "Style to apply; styles have components for both positive and negative prompts and apply to both": "الإضافات (styles) عبارة عن كلمات تتكرر كثيرا يتم إلحاقها بالطلب وعكسه عند الرغبة", - "Style 2": "الإضافة 2", - "Do not do anything special": "لا يغير شيئا", - "Sampling Steps": "عدد الخطوات", - "Sampling method": "أسلوب الخطو", - "Which algorithm to use to produce the image": "Sampler: اسم نظام تحديد طريقة تغيير المسافات بين الخطوات", - "Euler a": "Euler a", - "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral: طريقة مبدعة يمكن أن تنتج صور مختلفة على حسب عدد الخطوات، لا تتغير بعد 30-40 خطوة", - "Euler": "Euler", - "LMS": "LMS", - "Heun": "Heun", - "DPM2": "DPM2", - "DPM2 a": "DPM2 a", - "DPM fast": "DPM fast", - "DPM adaptive": "DPM adaptive", - "LMS Karras": "LMS Karras", - "DPM2 Karras": "DPM2 Karras", - "DPM2 a Karras": "DPM2 a Karras", - "DDIM": "DDIM", - "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models: الأفضل في الإنتاج الجزئي", - "PLMS": "PLMS", - "Width": "العرض", - "Height": "الإرتفاع", - "Restore faces": "تحسين الوجوه", - "Tiling": "ترصيف", - "Produce an image that can be tiled.": "أنتج صور يمكن ترصيفها بجانب بعضها كالبلاط", - "Highres. fix": "إصلاح الدقة العالية", - "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "أنتج صورة بدقة منخفضة ثم قم برفع الدقة فيما بعد لمنع التشوهات التي تحصل عندما تكون الدقة المطلوبة كبيرة", - "Firstpass width": "العرض الأولي", - "Firstpass height": "الإرتفاع الأولي", - "Denoising strength": "المدى", - "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Denoising strength: حدد مدى الإبتعاد عن الصورة (عدد الخطوات الفعلي = عدد الخطوات * المدى)", - "Batch count": "عدد الحزم", - "How many batches of images to create": "يتم إنتاج الصور على دفعات، كل دفعة فيها حزمة من الصور", - "Batch size": "حجم الحزمة", - "How many image to create in a single batch": "Batch size: إنتاج حزمة صور أسرع من إنتاجهم فرادى، حدد عدد الصور في كل حزمة", - "CFG Scale": "التركيز", - "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "CFG scale: يحدد مقدار التركيز على تلبية الطلب وتجنب عكسه، كلما زاد قل الإبداع", - "Seed": "البذرة", - "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Seed: رقم طبيعي عشوائي يسمح بإعادة إنتاج نفس الصورة إذا توافقت قيم العوامل الأخرى", - "Set seed to -1, which will cause a new random number to be used every time": "استخدم بذرة جديدة في كل مرة (نرمز لهذا الخيار بجعل قيمة البذرة 1-)", - "Reuse seed from last generation, mostly useful if it was randomed": "أعد استخدام البذرة من الإنتاج السابق", - "Extra": "مزج", - "Variation seed": "بذرة الممزوج", - "Seed of a different picture to be mixed into the generation.": "Variation seed: بذرة صورة أخرى ليتم مزجها مع الصورة الحالية", - "Variation strength": "أثر الممزوج", - "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Variation seed strength: مقدار أثر الصورة المدمجة على النتيجة النهائية (0: لا أثر، 1: أثر كامل ما عدا عند استخدام أسلوب خطو سلفي Ancestral)", - "Resize seed from width": "عرض الممزوج", - "Resize seed from height": "إرتفاع الممزوج", - "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Seed resize from: حدد دقة صورة الممزوج (0: نفس دقة الإنتاج)", - "Open for Clip Aesthetic!": "تضمين تجميلي", - "▼": "▼", - "Aesthetic weight": "أثر التضمين", - "Aesthetic steps": "عدد الخطوات", - "Aesthetic learning rate": "معدل التعلم", - "Slerp interpolation": "امزج بطريقة كروية", - "Aesthetic imgs embedding": "التضمين", - "None": "بدون", - "Aesthetic text for imgs": "الطلب (اختياري)", - "This text is used to rotate the feature space of the imgs embs": "لإعادة توجيه التضمين التجميلي", - "Slerp angle": "أثر الطلب", - "Is negative text": "الطلب عكسي", - "Script": "أدوات خاصة", - "Prompt matrix": "مصفوفة طلبات", - "Put variable parts at start of prompt": "الجزء المتغير في بداية الطلب", - "Prompts from file or textbox": " قائمة طلبات", - "Iterate seed every line": "غير البذرة مع كل طلب", - "List of prompt inputs": "قائمة الطلبات", - "Upload prompt inputs": "اجلب الطلبات من ملف", - "Drop File Here": "اسقط ملف هنا", - "-": "-", - "or": "أو", - "Click to Upload": "انقر للرفع", - "X/Y plot": "مصفوفة عوامل", - "X type": "العامل الأول", - "Nothing": "لا شيء", - "Var. seed": "بذرة الممزوج", - "Var. strength": "أثر الممزوج", - "Steps": "عدد الخطوات", - "Prompt S/R": "كلمات بديلة", - "Prompt order": "ترتيب الكلمات", - "Sampler": "أسلوب الخطو", - "Checkpoint name": "ملف الأوزان", - "Hypernetwork": "الشبكة الفائقة", - "Hypernet str.": "قوة الشبكة الفائقة", - "Inpainting conditioning mask strength": "قوة قناع الإنتاج الجزئي", - "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "حدد مدى صرامة قناع الإنتاج، يصبح القناع شفاف إذا قوته 0 (لا يعمل إلا مع ملفات أوزان الإنتاج الجزئي: inpainting)", - "Sigma Churn": "العشوائية (Schurn)", - "Sigma min": "أدنى تشويش (Stmin)", - "Sigma max": "أقصى تشويش (Stmax)", - "Sigma noise": "التشويش (Snoise)", - "Eta": "العامل Eta η", - "Clip skip": "تخطي آخر طبقات CLIP", - "Denoising": "المدى", - "Cond. Image Mask Weight": "قوة قناع الإنتاج الجزئي", - "X values": "قيم العامل الأول", - "Separate values for X axis using commas.": "افصل القيم بفواصل (,) من اليسار إلى اليمين", - "Y type": "العامل الثاني", - "Y values": "قيم العامل الثاني", - "Separate values for Y axis using commas.": "افصل القيم بفواصل (,) من الأعلى إلى الأسفل", - "Draw legend": "أضف مفتاح التوضيح", - "Include Separate Images": "أضف الصور منفصلة", - "Keep -1 for seeds": "استخدم بذور عشوائية", - "Save": "احفظ", - "Write image to a directory (default - log/images) and generation parameters into csv file.": "احفظ الصور مع ملف العوامل بصيغة CSV", - "Send to img2img": "أرسل لصورة إلى صورة", - "Send to inpaint": "أرسل للإنتاج الجزئي", - "Send to extras": "أرسل للمعالجة", - "Open images output directory": "افتح مجلد الصور المخرجة", - "Make Zip when Save?": "ضع النتائج في ملف مضغوط عند الحفظ", - "img2img": "صورة إلى صورة", - "Interrogate\nCLIP": "استجواب\nCLIP", - "Drop Image Here": "اسقط صورة هنا", - "Just resize": "تغيير الحجم فقط", - "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "غير حجم الصورة بدون مراعات اتزان الأبعاد", - "Crop and resize": "تغيير الحجم وقص الأطراف", - "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "غير حجم الصورة واقتص الأطراف الزائدة", - "Resize and fill": "تغيير الحجم وتبطين الأطراف", - "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "غير حجم الصورة واملأ الأطراف الزائدة بألوان من الصورة", - "img2img alternative test": "استجواب الصورة (تجريبي)", - "should be 2 or lower.": "يفترض أن يكون 2 أو أقل", |