diff options
36 files changed, 702 insertions, 757 deletions
diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 8937b585..7c371deb 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -178,6 +178,7 @@ def load_loras(names, multipliers=None): def lora_forward(module, input, res):
+ input = devices.cond_cast_unet(input)
if len(loaded_loras) == 0:
return res
diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js index 4a85c8eb..f0918e26 100644 --- a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js @@ -89,22 +89,15 @@ function checkBrackets(evt, textArea, counterElt) { function setupBracketChecking(id_prompt, id_counter){ var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); var counter = gradioApp().getElementById(id_counter) + textarea.addEventListener("input", function(evt){ checkBrackets(evt, textarea, counter) }); } -var shadowRootLoaded = setInterval(function() { - var shadowRoot = document.querySelector('gradio-app').shadowRoot; - if(! shadowRoot) return false; - - var shadowTextArea = shadowRoot.querySelectorAll('#txt2img_prompt > label > textarea'); - if(shadowTextArea.length < 1) return false; - - clearInterval(shadowRootLoaded); - +onUiLoaded(function(){ setupBracketChecking('txt2img_prompt', 'txt2img_token_counter') setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter') - setupBracketChecking('img2img_prompt', 'imgimg_token_counter') + setupBracketChecking('img2img_prompt', 'img2img_token_counter') setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter') -}, 1000); +})
\ No newline at end of file diff --git a/html/licenses.html b/html/licenses.html index bddbf466..bc995aa0 100644 --- a/html/licenses.html +++ b/html/licenses.html @@ -635,4 +635,30 @@ SOFTWARE. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+</pre>
+
+<h2><a href="https://github.com/explosion/curated-transformers/blob/main/LICENSE">Curated transformers</a></h2>
+<small>The MPS workaround for nn.Linear on macOS 13.2.X is based on the MPS workaround for nn.Linear created by danieldk for Curated transformers</small>
+<pre>
+The MIT License (MIT)
+
+Copyright (C) 2021 ExplosionAI GmbH
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
</pre>
\ No newline at end of file diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index 11bcce1b..06f505b0 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -43,7 +43,7 @@ contextMenuInit = function(){ })
- gradioApp().getRootNode().appendChild(contextMenu)
+ gradioApp().appendChild(contextMenu)
let menuWidth = contextMenu.offsetWidth + 4;
let menuHeight = contextMenu.offsetHeight + 4;
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 619bb1fa..20a5aadf 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -1,6 +1,6 @@ function keyupEditAttention(event){
let target = event.originalTarget || event.composedPath()[0];
- if (!target.matches("[id*='_toprow'] textarea.gr-text-input[placeholder]")) return;
+ if (! target.matches("[id*='_toprow'] [id*='_prompt'] textarea")) return;
if (! (event.metaKey || event.ctrlKey)) return;
let isPlus = event.key == "ArrowUp"
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 2fb87cd5..40818bb4 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -139,3 +139,39 @@ function extraNetworksShowMetadata(text){ popup(elem);
}
+
+function requestGet(url, data, handler, errorHandler){
+ var xhr = new XMLHttpRequest();
+ var args = Object.keys(data).map(function(k){ return encodeURIComponent(k) + '=' + encodeURIComponent(data[k]) }).join('&')
+ xhr.open("GET", url + "?" + args, true);
+
+ xhr.onreadystatechange = function () {
+ if (xhr.readyState === 4) {
+ if (xhr.status === 200) {
+ try {
+ var js = JSON.parse(xhr.responseText);
+ handler(js)
+ } catch (error) {
+ console.error(error);
+ errorHandler()
+ }
+ } else{
+ errorHandler()
+ }
+ }
+ };
+ var js = JSON.stringify(data);
+ xhr.send(js);
+}
+
+function extraNetworksRequestMetadata(extraPage, cardName){
+ showError = function(){ extraNetworksShowMetadata("there was an error getting metadata"); }
+
+ requestGet("./sd_extra_networks/metadata", {"page": extraPage, "item": cardName}, function(data){
+ if(data && data.metadata){
+ extraNetworksShowMetadata(data.metadata)
+ } else{
+ showError()
+ }
+ }, showError)
+}
diff --git a/javascript/hints.js b/javascript/hints.js index 7f4101b2..b3f3d08d 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -18,7 +18,7 @@ titles = { "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u{1f4c2}": "Open images output directory", "\u{1f4be}": "Save style", - "\u{1f5d1}": "Clear prompt", + "\u{1f5d1}\ufe0f": "Clear prompt", "\u{1f4cb}": "Apply selected styles to current prompt", "\u{1f4d2}": "Paste available values into the field", "\u{1f3b4}": "Show extra networks", @@ -40,8 +40,7 @@ titles = { "Inpaint at full resolution": "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image", "Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.", - "Denoising strength change factor": "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.", - + "Skip": "Stop processing current image and continue processing.", "Interrupt": "Stop processing images and return any results accumulated so far.", "Save": "Write image to a directory (default - log/images) and generation parameters into csv file.", @@ -71,8 +70,10 @@ titles = { "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg],[prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.", "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle", - "Loopback": "Process an image, use it as an input, repeat.", - "Loops": "How many times to repeat processing an image and using it as input for the next iteration", + "Loopback": "Performs img2img processing multiple times. Output images are used as input for the next loop.", + "Loops": "How many times to process an image. Each output is used as the input of the next loop. If set to 1, behavior will be as if this script were not used.", + "Final denoising strength": "The denoising strength for the final loop of each image in the batch.", + "Denoising strength curve": "The denoising curve controls the rate of denoising strength change each loop. Aggressive: Most of the change will happen towards the start of the loops. Linear: Change will be constant through all loops. Lazy: Most of the change will happen towards the end of the loops.", "Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both", "Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both", diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 28e748b7..7547e771 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -50,7 +50,7 @@ function updateOnBackgroundChange() { } function modalImageSwitch(offset) { - var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all") + var allgalleryButtons = gradioApp().querySelectorAll(".gradio-gallery .thumbnail-item") var galleryButtons = [] allgalleryButtons.forEach(function(elem) { if (elem.parentElement.offsetParent) { @@ -59,7 +59,7 @@ function modalImageSwitch(offset) { }) if (galleryButtons.length > 1) { - var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") + var allcurrentButtons = gradioApp().querySelectorAll(".gradio-gallery .thumbnail-item.selected") var currentButton = null allcurrentButtons.forEach(function(elem) { if (elem.parentElement.offsetParent) { @@ -136,37 +136,29 @@ function modalKeyHandler(event) { } } -function showGalleryImage() { - setTimeout(function() { - fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') - - if (fullImg_preview != null) { - fullImg_preview.forEach(function function_name(e) { - if (e.dataset.modded) - return; - e.dataset.modded = true; - if(e && e.parentElement.tagName == 'DIV'){ - e.style.cursor='pointer' - e.style.userSelect='none' - - var isFirefox = isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1 - - // For Firefox, listening on click first switched to next image then shows the lightbox. - // If you know how to fix this without switching to mousedown event, please. - // For other browsers the event is click to make it possiblr to drag picture. - var event = isFirefox ? 'mousedown' : 'click' - - e.addEventListener(event, function (evt) { - if(!opts.js_modal_lightbox || evt.button != 0) return; - modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed) - evt.preventDefault() - showModal(evt) - }, true); - } - }); - } +function setupImageForLightbox(e) { + if (e.dataset.modded) + return; + + e.dataset.modded = true; + e.style.cursor='pointer' + e.style.userSelect='none' + + var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1 + + // For Firefox, listening on click first switched to next image then shows the lightbox. + // If you know how to fix this without switching to mousedown event, please. + // For other browsers the event is click to make it possiblr to drag picture. + var event = isFirefox ? 'mousedown' : 'click' + + e.addEventListener(event, function (evt) { + if(!opts.js_modal_lightbox || evt.button != 0) return; + + modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed) + evt.preventDefault() + showModal(evt) + }, true); - }, 100); } function modalZoomSet(modalImage, enable) { @@ -199,21 +191,21 @@ function modalTileImageToggle(event) { } function galleryImageHandler(e) { - if (e && e.parentElement.tagName == 'BUTTON') { + //if (e && e.parentElement.tagName == 'BUTTON') { e.onclick = showGalleryImage; - } + //} } onUiUpdate(function() { - fullImg_preview = gradioApp().querySelectorAll('img.w-full') + fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img') if (fullImg_preview != null) { - fullImg_preview.forEach(galleryImageHandler); + fullImg_preview.forEach(setupImageForLightbox); } updateOnBackgroundChange(); }) document.addEventListener("DOMContentLoaded", function() { - const modalFragment = document.createDocumentFragment(); + //const modalFragment = document.createDocumentFragment(); const modal = document.createElement('div') modal.onclick = closeModal; modal.id = "lightboxModal"; @@ -277,9 +269,9 @@ document.addEventListener("DOMContentLoaded", function() { modal.appendChild(modalNext) + gradioApp().appendChild(modal) - gradioApp().getRootNode().appendChild(modal) - document.body.appendChild(modalFragment); + document.body.appendChild(modal); }); diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 9ccc9da4..4ac9b8db 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -1,78 +1,13 @@ // code related to showing and updating progressbar shown as the image is being made - -galleries = {} -storedGallerySelections = {} -galleryObservers = {} - function rememberGallerySelection(id_gallery){ - storedGallerySelections[id_gallery] = getGallerySelectedIndex(id_gallery) -} -function getGallerySelectedIndex(id_gallery){ - let galleryButtons = gradioApp().querySelectorAll('#'+id_gallery+' .gallery-item') - let galleryBtnSelected = gradioApp().querySelector('#'+id_gallery+' .gallery-item.\\!ring-2') - - let currentlySelectedIndex = -1 - galleryButtons.forEach(function(v, i){ if(v==galleryBtnSelected) { currentlySelectedIndex = i } }) - - return currentlySelectedIndex } -// this is a workaround for https://github.com/gradio-app/gradio/issues/2984 -function check_gallery(id_gallery){ - let gallery = gradioApp().getElementById(id_gallery) - // if gallery has no change, no need to setting up observer again. - if (gallery && galleries[id_gallery] !== gallery){ - galleries[id_gallery] = gallery; - if(galleryObservers[id_gallery]){ - galleryObservers[id_gallery].disconnect(); - } +function getGallerySelectedIndex(id_gallery){ - storedGallerySelections[id_gallery] = -1 - - galleryObservers[id_gallery] = new MutationObserver(function (){ - let galleryButtons = gradioApp().querySelectorAll('#'+id_gallery+' .gallery-item') - let galleryBtnSelected = gradioApp().querySelector('#'+id_gallery+' .gallery-item.\\!ring-2') - let currentlySelectedIndex = getGallerySelectedIndex(id_gallery) - prevSelectedIndex = storedGallerySelections[id_gallery] - storedGallerySelections[id_gallery] = -1 - - if (prevSelectedIndex !== -1 && galleryButtons.length>prevSelectedIndex && !galleryBtnSelected) { - // automatically re-open previously selected index (if exists) - activeElement = gradioApp().activeElement; - let scrollX = window.scrollX; - let scrollY = window.scrollY; - - galleryButtons[prevSelectedIndex].click(); - showGalleryImage(); - - // When the gallery button is clicked, it gains focus and scrolls itself into view - // We need to scroll back to the previous position - setTimeout(function (){ - window.scrollTo(scrollX, scrollY); - }, 50); - - if(activeElement){ - // i fought this for about an hour; i don't know why the focus is lost or why this helps recover it - // if someone has a better solution please by all means - setTimeout(function (){ - activeElement.focus({ - preventScroll: true // Refocus the element that was focused before the gallery was opened without scrolling to it - }) - }, 1); - } - } - }) - galleryObservers[id_gallery].observe( gallery, { childList:true, subtree:false }) - } } -onUiUpdate(function(){ - check_gallery('txt2img_gallery') - check_gallery('img2img_gallery') -}) - function request(url, data, handler, errorHandler){ var xhr = new XMLHttpRequest(); var url = url; diff --git a/javascript/ui.js b/javascript/ui.js index b7a8268a..fcaf5608 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -86,7 +86,7 @@ function get_tab_index(tabId){ var res = 0 gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button').forEach(function(button, i){ - if(button.className.indexOf('bg-white') != -1) + if(button.className.indexOf('selected') != -1) res = i }) @@ -255,7 +255,6 @@ onUiUpdate(function(){ } prompt.parentElement.insertBefore(counter, prompt) - counter.classList.add("token-counter") prompt.parentElement.style.position = "relative" promptTokecountUpdateFuncs[id] = function(){ update_token_counter(id_button); } diff --git a/modules/api/api.py b/modules/api/api.py index 35e17afc..f52f7fef 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -18,7 +18,7 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_ from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from PIL import PngImagePlugin,Image -from modules.sd_models import checkpoints_list +from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices @@ -150,6 +150,8 @@ class Api: self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse) + self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) + self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList) def add_api_route(self, path: str, endpoint, **kwargs): @@ -412,6 +414,16 @@ class Api: return {} + def unloadapi(self): + unload_model_weights() + + return {} + + def reloadapi(self): + reload_model_weights() + + return {} + def skip(self): shared.state.skip() diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 7c0b5b4e..6df76858 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -401,9 +401,14 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, button.click(
fn=paste_func,
- _js=f"recalculate_prompts_{tabname}",
inputs=[input_comp],
outputs=[x[0] for x in paste_fields],
)
+ button.click(
+ fn=None,
+ _js=f"recalculate_prompts_{tabname}",
+ inputs=[],
+ outputs=[],
+ )
diff --git a/modules/images.py b/modules/images.py index 2da988ee..7030aaaa 100644 --- a/modules/images.py +++ b/modules/images.py @@ -645,6 +645,8 @@ Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]} def image_data(data):
+ import gradio as gr
+
try:
image = Image.open(io.BytesIO(data))
textinfo, _ = read_info_from_image(image)
@@ -660,7 +662,7 @@ def image_data(data): except Exception:
pass
- return '', None
+ return gr.update(), None
def flatten(img, bgcolor):
diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 18e6ff72..6fe8dea0 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,4 +1,5 @@ import torch +import platform from modules import paths from modules.sd_hijack_utils import CondFunc from packaging import version @@ -32,6 +33,10 @@ if has_mps: # MPS fix for randn in torchsde CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps') + if platform.mac_ver()[0].startswith("13.2."): + # MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124) + CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760) + if version.parse(torch.__version__) < version.parse("1.13"): # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working @@ -49,4 +54,6 @@ if has_mps: CondFunc('torch.cumsum', cumsum_fix_func, None) CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None) CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None) - + if version.parse(torch.__version__) == version.parse("2.0"): + # MPS workaround for https://github.com/pytorch/pytorch/issues/96113 + CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda *args, **kwargs: len(args) == 6) diff --git a/modules/modelloader.py b/modules/modelloader.py index e351d808..522affc6 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -4,7 +4,6 @@ import shutil import importlib from urllib.parse import urlparse -from basicsr.utils.download_util import load_file_from_url from modules import shared from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, UpscalerNone from modules.paths import script_path, models_path @@ -59,6 +58,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None if model_url is not None and len(output) == 0: if download_name is not None: + from basicsr.utils.download_util import load_file_from_url dl = load_file_from_url(model_url, model_path, True, download_name) output.append(dl) else: diff --git a/modules/processing.py b/modules/processing.py index 59717b4c..2e5a363f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -689,6 +689,22 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image.info["parameters"] = text
output_images.append(image)
+ if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay:
+ image_mask = p.mask_for_overlay.convert('RGB')
+ image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA')
+
+ if opts.save_mask:
+ images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
+
+ if opts.save_mask_composite:
+ images.save_image(image_mask_composite, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
+
+ if opts.return_mask:
+ output_images.append(image_mask)
+
+ if opts.return_mask_composite:
+ output_images.append(image_mask_composite)
+
del x_samples_ddim
devices.torch_gc()
diff --git a/modules/scripts.py b/modules/scripts.py index 8de19884..d661be4f 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -239,7 +239,15 @@ def load_scripts(): elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing):
postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
- for scriptfile in sorted(scripts_list):
+ def orderby(basedir):
+ # 1st webui, 2nd extensions-builtin, 3rd extensions
+ priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0}
+ for key in priority:
+ if basedir.startswith(key):
+ return priority[key]
+ return 9999
+
+ for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]):
try:
if scriptfile.basedir != paths.script_path:
sys.path = [scriptfile.basedir] + sys.path
|