diff options
Diffstat (limited to 'extensions-builtin')
-rw-r--r-- | extensions-builtin/Lora/lora_patches.py | 31 | ||||
-rw-r--r-- | extensions-builtin/Lora/network_full.py | 7 | ||||
-rw-r--r-- | extensions-builtin/Lora/networks.py | 37 | ||||
-rw-r--r-- | extensions-builtin/Lora/scripts/lora_script.py | 51 | ||||
-rw-r--r-- | extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 128 | ||||
-rw-r--r-- | extensions-builtin/canvas-zoom-and-pan/style.css | 3 | ||||
-rw-r--r-- | extensions-builtin/extra-options-section/scripts/extra_options_section.py | 12 | ||||
-rw-r--r-- | extensions-builtin/mobile/javascript/mobile.js | 6 |
8 files changed, 191 insertions, 84 deletions
diff --git a/extensions-builtin/Lora/lora_patches.py b/extensions-builtin/Lora/lora_patches.py new file mode 100644 index 00000000..b394d8e9 --- /dev/null +++ b/extensions-builtin/Lora/lora_patches.py @@ -0,0 +1,31 @@ +import torch
+
+import networks
+from modules import patches
+
+
+class LoraPatches:
+ def __init__(self):
+ self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward)
+ self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict)
+ self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward)
+ self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict)
+ self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward)
+ self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict)
+ self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward)
+ self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict)
+ self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward)
+ self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict)
+
+ def undo(self):
+ self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward')
+ self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict')
+ self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward')
+ self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict')
+ self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward')
+ self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict')
+ self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward')
+ self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict')
+ self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward')
+ self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict')
+
diff --git a/extensions-builtin/Lora/network_full.py b/extensions-builtin/Lora/network_full.py index 109b4c2c..bf6930e9 100644 --- a/extensions-builtin/Lora/network_full.py +++ b/extensions-builtin/Lora/network_full.py @@ -14,9 +14,14 @@ class NetworkModuleFull(network.NetworkModule): super().__init__(net, weights)
self.weight = weights.w.get("diff")
+ self.ex_bias = weights.w.get("diff_b")
def calc_updown(self, orig_weight):
output_shape = self.weight.shape
updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype)
+ if self.ex_bias is not None:
+ ex_bias = self.ex_bias.to(orig_weight.device, dtype=orig_weight.dtype)
+ else:
+ ex_bias = None
- return self.finalize_updown(updown, orig_weight, output_shape)
+ return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 22fdff4a..96f935b2 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -2,6 +2,7 @@ import logging import os
import re
+import lora_patches
import network
import network_lora
import network_hada
@@ -303,7 +304,10 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks)
weights_backup = getattr(self, "network_weights_backup", None)
- if weights_backup is None:
+ if weights_backup is None and wanted_names != ():
+ if current_names != ():
+ raise RuntimeError("no backup weights found and current weights are not unchanged")
+
if isinstance(self, torch.nn.MultiheadAttention):
weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
else:
@@ -418,74 +422,74 @@ def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): def network_Linear_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.Linear_forward_before_network)
+ return network_forward(self, input, originals.Linear_forward)
network_apply_weights(self)
- return torch.nn.Linear_forward_before_network(self, input)
+ return originals.Linear_forward(self, input)
def network_Linear_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.Linear_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.Linear_load_state_dict(self, *args, **kwargs)
def network_Conv2d_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.Conv2d_forward_before_network)
+ return network_forward(self, input, originals.Conv2d_forward)
network_apply_weights(self)
- return torch.nn.Conv2d_forward_before_network(self, input)
+ return originals.Conv2d_forward(self, input)
def network_Conv2d_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.Conv2d_load_state_dict(self, *args, **kwargs)
def network_GroupNorm_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.GroupNorm_forward_before_network)
+ return network_forward(self, input, originals.GroupNorm_forward)
network_apply_weights(self)
- return torch.nn.GroupNorm_forward_before_network(self, input)
+ return originals.GroupNorm_forward(self, input)
def network_GroupNorm_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.GroupNorm_load_state_dict(self, *args, **kwargs)
def network_LayerNorm_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.LayerNorm_forward_before_network)
+ return network_forward(self, input, originals.LayerNorm_forward)
network_apply_weights(self)
- return torch.nn.LayerNorm_forward_before_network(self, input)
+ return originals.LayerNorm_forward(self, input)
def network_LayerNorm_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.LayerNorm_load_state_dict(self, *args, **kwargs)
def network_MultiheadAttention_forward(self, *args, **kwargs):
network_apply_weights(self)
- return torch.nn.MultiheadAttention_forward_before_network(self, *args, **kwargs)
+ return originals.MultiheadAttention_forward(self, *args, **kwargs)
def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.MultiheadAttention_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
def list_available_networks():
@@ -552,6 +556,9 @@ def infotext_pasted(infotext, params): if added:
params["Prompt"] += "\n" + "".join(added)
+
+originals: lora_patches.LoraPatches = None
+
extra_network_lora = None
available_networks = {}
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 4c6e774a..ef23968c 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -1,23 +1,19 @@ import re
-import torch
import gradio as gr
from fastapi import FastAPI
import network
import networks
import lora # noqa:F401
+import lora_patches
import extra_networks_lora
import ui_extra_networks_lora
from modules import script_callbacks, ui_extra_networks, extra_networks, shared
+
def unload():
- torch.nn.Linear.forward = torch.nn.Linear_forward_before_network
- torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_network
- torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_network
- torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_network
- torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_network
- torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_network
+ networks.originals.undo()
def before_ui():
@@ -28,46 +24,7 @@ def before_ui(): extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco")
-if not hasattr(torch.nn, 'Linear_forward_before_network'):
- torch.nn.Linear_forward_before_network = torch.nn.Linear.forward
-
-if not hasattr(torch.nn, 'Linear_load_state_dict_before_network'):
- torch.nn.Linear_load_state_dict_before_network = torch.nn.Linear._load_from_state_dict
-
-if not hasattr(torch.nn, 'Conv2d_forward_before_network'):
- torch.nn.Conv2d_forward_before_network = torch.nn.Conv2d.forward
-
-if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'):
- torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict
-
-if not hasattr(torch.nn, 'GroupNorm_forward_before_network'):
- torch.nn.GroupNorm_forward_before_network = torch.nn.GroupNorm.forward
-
-if not hasattr(torch.nn, 'GroupNorm_load_state_dict_before_network'):
- torch.nn.GroupNorm_load_state_dict_before_network = torch.nn.GroupNorm._load_from_state_dict
-
-if not hasattr(torch.nn, 'LayerNorm_forward_before_network'):
- torch.nn.LayerNorm_forward_before_network = torch.nn.LayerNorm.forward
-
-if not hasattr(torch.nn, 'LayerNorm_load_state_dict_before_network'):
- torch.nn.LayerNorm_load_state_dict_before_network = torch.nn.LayerNorm._load_from_state_dict
-
-if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'):
- torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward
-
-if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_network'):
- torch.nn.MultiheadAttention_load_state_dict_before_network = torch.nn.MultiheadAttention._load_from_state_dict
-
-torch.nn.Linear.forward = networks.network_Linear_forward
-torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict
-torch.nn.Conv2d.forward = networks.network_Conv2d_forward
-torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict
-torch.nn.GroupNorm.forward = networks.network_GroupNorm_forward
-torch.nn.GroupNorm._load_from_state_dict = networks.network_GroupNorm_load_state_dict
-torch.nn.LayerNorm.forward = networks.network_LayerNorm_forward
-torch.nn.LayerNorm._load_from_state_dict = networks.network_LayerNorm_load_state_dict
-torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward
-torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict
+networks.originals = lora_patches.LoraPatches()
script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules)
script_callbacks.on_script_unloaded(unload)
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 72c8ba87..23423891 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -15,6 +15,19 @@ onUiLoaded(async() => { // Helper functions // Get active tab + + /** + * Waits for an element to be present in the DOM. + */ + const waitForElement = (id) => new Promise(resolve => { + const checkForElement = () => { + const element = document.querySelector(id); + if (element) return resolve(element); + setTimeout(checkForElement, 100); + }; + checkForElement(); + }); + function getActiveTab(elements, all = false) { const tabs = elements.img2imgTabs.querySelectorAll("button"); @@ -35,7 +48,7 @@ onUiLoaded(async() => { // Wait until opts loaded async function waitForOpts() { - for (;;) { + for (; ;) { if (window.opts && Object.keys(window.opts).length) { return window.opts; } @@ -256,7 +269,7 @@ onUiLoaded(async() => { input?.addEventListener("input", () => restoreImgRedMask(elements)); } - function applyZoomAndPan(elemId) { + function applyZoomAndPan(elemId, isExtension = true) { const targetElement = gradioApp().querySelector(elemId); if (!targetElement) { @@ -368,6 +381,10 @@ onUiLoaded(async() => { panY: 0 }; + if (isExtension) { + targetElement.style.overflow = "hidden"; + } + fixCanvas(); targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`; @@ -383,8 +400,22 @@ onUiLoaded(async() => { closeBtn.addEventListener("click", resetZoom); } + if (canvas && isExtension) { + const parentElement = targetElement.closest('[id^="component-"]'); + if ( + canvas && + parseFloat(canvas.style.width) > parentElement.offsetWidth && + parseFloat(targetElement.style.width) > parentElement.offsetWidth + ) { + fitToElement(); + return; + } + + } + if ( canvas && + !isExtension && parseFloat(canvas.style.width) > 865 && parseFloat(targetElement.style.width) > 865 ) { @@ -393,9 +424,6 @@ onUiLoaded(async() => { } targetElement.style.width = ""; - if (canvas) { - targetElement.style.height = canvas.style.height; - } } // Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements @@ -462,6 +490,10 @@ onUiLoaded(async() => { targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`; toggleOverlap("on"); + if (isExtension) { + targetElement.style.overflow = "visible"; + } + return newZoomLevel; } @@ -484,7 +516,7 @@ onUiLoaded(async() => { fullScreenMode = false; elemData[elemId].zoomLevel = updateZoom( elemData[elemId].zoomLevel + - (operation === "+" ? delta : -delta), + (operation === "+" ? delta : -delta), zoomPosX - targetElement.getBoundingClientRect().left, zoomPosY - targetElement.getBoundingClientRect().top ); @@ -501,10 +533,19 @@ onUiLoaded(async() => { //Reset Zoom targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`; + let parentElement; + + if (isExtension) { + parentElement = targetElement.closest('[id^="component-"]'); + } else { + parentElement = targetElement.parentElement; + } + + // Get element and screen dimensions const elementWidth = targetElement.offsetWidth; const elementHeight = targetElement.offsetHeight; - const parentElement = targetElement.parentElement; + const screenWidth = parentElement.clientWidth; const screenHeight = parentElement.clientHeight; @@ -555,10 +596,15 @@ onUiLoaded(async() => { `${elemId} canvas[key="interface"]` ); + if (isExtension) { + targetElement.style.overflow = "visible"; + } + + if (!canvas) return; - if (canvas.offsetWidth > 862) { - targetElement.style.width = canvas.offsetWidth + "px"; + if (canvas.offsetWidth > 862 || isExtension) { + targetElement.style.width = (canvas.offsetWidth + 2) + "px"; } if (fullScreenMode) { @@ -667,9 +713,7 @@ onUiLoaded(async() => { targetElement.isExpanded = false; function autoExpand() { const canvas = document.querySelector(`${elemId} canvas[key="interface"]`); - const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch; - - if (canvas && isMainTab) { + if (canvas) { if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) { targetElement.style.visibility = "hidden"; setTimeout(() => { @@ -808,6 +852,11 @@ onUiLoaded(async() => { if (isMoving && elemId === activeElement) { updatePanPosition(e.movementX, e.movementY); targetElement.style.pointerEvents = "none"; + + if (isExtension) { + targetElement.style.overflow = "visible"; + } + } else { targetElement.style.pointerEvents = "auto"; } @@ -821,10 +870,57 @@ onUiLoaded(async() => { gradioApp().addEventListener("mousemove", handleMoveByKey); } - applyZoomAndPan(elementIDs.sketch); - applyZoomAndPan(elementIDs.inpaint); - applyZoomAndPan(elementIDs.inpaintSketch); + applyZoomAndPan(elementIDs.sketch, false); + applyZoomAndPan(elementIDs.inpaint, false); + applyZoomAndPan(elementIDs.inpaintSketch, false); // Make the function global so that other extensions can take advantage of this solution - window.applyZoomAndPan = applyZoomAndPan; + const applyZoomAndPanIntegration = async(id, elementIDs) => { + const mainEl = document.querySelector(id); + if (id.toLocaleLowerCase() === "none") { + for (const elementID of elementIDs) { + const el = await waitForElement(elementID); + if (!el) break; + applyZoomAndPan(elementID); + } + return; + } + + if (!mainEl) return; + mainEl.addEventListener("click", async() => { + for (const elementID of elementIDs) { + const el = await waitForElement(elementID); + if (!el) break; + applyZoomAndPan(elementID); + } + }, {once: true}); + }; + + window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image") + + window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension + + /* + The function `applyZoomAndPanIntegration` takes two arguments: + + 1. `id`: A string identifier for the element to which zoom and pan functionality will be applied on click. + If the `id` value is "none", the functionality will be applied to all elements specified in the second argument without a click event. + + 2. `elementIDs`: An array of string identifiers for elements. Zoom and pan functionality will be applied to each of these elements on click of the element specified by the first argument. + If "none" is specified in the first argument, the functionality will be applied to each of these elements without a click event. + + Example usage: + applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]); + In this example, zoom and pan functionality will be applied to the element with the identifier "txt2img_controlnet_ControlNet_input_image" upon clicking the element with the identifier "txt2img_controlnet". + */ + + // More examples + // Add integration with ControlNet txt2img One TAB + // applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]); + + // Add integration with ControlNet txt2img Tabs + // applyZoomAndPanIntegration("#txt2img_controlnet",Array.from({ length: 10 }, (_, i) => `#txt2img_controlnet_ControlNet-${i}_input_image`)); + + // Add integration with Inpaint Anything + // applyZoomAndPanIntegration("None", ["#ia_sam_image", "#ia_sel_mask"]); }); diff --git a/extensions-builtin/canvas-zoom-and-pan/style.css b/extensions-builtin/canvas-zoom-and-pan/style.css index 6bcc9570..5d8054e6 100644 --- a/extensions-builtin/canvas-zoom-and-pan/style.css +++ b/extensions-builtin/canvas-zoom-and-pan/style.css @@ -61,3 +61,6 @@ to {opacity: 1;} } +.styler { + overflow:inherit !important; +}
\ No newline at end of file diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index 588b64d2..983f87ff 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -22,22 +22,23 @@ class ExtraOptionsSection(scripts.Script): self.comps = []
self.setting_names = []
self.infotext_fields = []
+ extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img
mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping}
with gr.Blocks() as interface:
- with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group():
+ with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and extra_options else gr.Group():
- row_count = math.ceil(len(shared.opts.extra_options) / shared.opts.extra_options_cols)
+ row_count = math.ceil(len(extra_options) / shared.opts.extra_options_cols)
for row in range(row_count):
with gr.Row():
for col in range(shared.opts.extra_options_cols):
index = row * shared.opts.extra_options_cols + col
- if index >= len(shared.opts.extra_options):
+ if index >= len(extra_options):
break
- setting_name = shared.opts.extra_options[index]
+ setting_name = extra_options[index]
with FormColumn():
comp = ui_settings.create_setting_component(setting_name)
@@ -64,7 +65,8 @@ class ExtraOptionsSection(scripts.Script): shared.options_templates.update(shared.options_section(('ui', "User interface"), {
- "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_reload_ui(),
+ "extra_options_txt2img": shared.OptionInfo([], "Options in main UI - txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(),
+ "extra_options_img2img": shared.OptionInfo([], "Options in main UI - img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(),
"extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(),
"extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui()
}))
diff --git a/extensions-builtin/mobile/javascript/mobile.js b/extensions-builtin/mobile/javascript/mobile.js index 12cae4b7..652f07ac 100644 --- a/extensions-builtin/mobile/javascript/mobile.js +++ b/extensions-builtin/mobile/javascript/mobile.js @@ -20,7 +20,13 @@ function reportWindowSize() { var button = gradioApp().getElementById(tab + '_generate_box'); var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column'); target.insertBefore(button, target.firstElementChild); + + gradioApp().getElementById(tab + '_results').classList.toggle('mobile', currentlyMobile); } } window.addEventListener("resize", reportWindowSize); + +onUiLoaded(function() { + reportWindowSize(); +}); |