diff --git a/javascript/_result.js b/javascript/_result.js index 96129f1..823f26d 100644 --- a/javascript/_result.js +++ b/javascript/_result.js @@ -27,6 +27,7 @@ class AutocompleteResult { aliases = null; meta = null; hash = null; + sortKey = null; // Constructor constructor(text, type) { diff --git a/javascript/_utils.js b/javascript/_utils.js index 2619ad5..50ab101 100644 --- a/javascript/_utils.js +++ b/javascript/_utils.js @@ -223,6 +223,42 @@ function observeElement(element, property, callback, delay = 0) { } } +// Sort functions +function getSortFunction() { + let criterion = TAC_CFG.modelSortOrder || "Name"; + + const textSort = (a, b, reverse = false) => { + const textHolderA = a.type === ResultType.chant ? a.aliases : a.text; + const textHolderB = b.type === ResultType.chant ? b.aliases : b.text; + + const aKey = a.sortKey || textHolderA; + const bKey = b.sortKey || textHolderB; + return reverse ? bKey.localeCompare(aKey) : aKey.localeCompare(bKey); + } + const numericSort = (a, b, reverse = false) => { + const noKey = reverse ? "-1" : Number.MAX_SAFE_INTEGER; + let aParsed = parseFloat(a.sortKey || noKey); + let bParsed = parseFloat(b.sortKey || noKey); + + if (aParsed === bParsed) { + return textSort(a, b, false); + } + + return reverse ? bParsed - aParsed : aParsed - bParsed; + } + + return (a, b) => { + switch (criterion) { + case "Date Modified (newest first)": + return numericSort(a, b, true); + case "Date Modified (oldest first)": + return numericSort(a, b, false); + default: + return textSort(a, b); + } + } +} + // Queue calling function to process global queues async function processQueue(queue, context, ...args) { for (let i = 0; i < queue.length; i++) { diff --git a/javascript/ext_embeddings.js b/javascript/ext_embeddings.js index e51aa4b..9c7bd44 100644 --- a/javascript/ext_embeddings.js +++ b/javascript/ext_embeddings.js @@ -16,7 +16,7 @@ class EmbeddingParser extends BaseTagParser { let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm); if (versionString) - tempResults = embeddings.filter(x => filterCondition(x) && x[1] && x[1] === versionString); // Filter by tagword + tempResults = embeddings.filter(x => filterCondition(x) && x[2] && x[2] === versionString); // Filter by tagword else tempResults = embeddings.filter(x => filterCondition(x)); // Filter by tagword } else { @@ -27,7 +27,8 @@ class EmbeddingParser extends BaseTagParser { let finalResults = []; tempResults.forEach(t => { let result = new AutocompleteResult(t[0].trim(), ResultType.embedding) - result.meta = t[1] + " Embedding"; + result.sortKey = t[1]; + result.meta = t[2] + " Embedding"; finalResults.push(result); }); @@ -38,9 +39,9 @@ class EmbeddingParser extends BaseTagParser { async function load() { if (embeddings.length === 0) { try { - embeddings = (await readFile(`${tagBasePath}/temp/emb.txt`)).split("\n") - .filter(x => x.trim().length > 0) // Remove empty lines - .map(x => x.trim().split(",")); // Split into name, version type pairs + embeddings = (await loadCSV(`${tagBasePath}/temp/emb.txt`)) + .filter(x => x[0]?.trim().length > 0) // Remove empty lines + .map(x => [x[0].trim(), x[1], x[2]]); // Return name, sortKey, hash tuples } catch (e) { console.error("Error loading embeddings.txt: " + e); } diff --git a/javascript/ext_hypernets.js b/javascript/ext_hypernets.js index 7f564fd..3613b2a 100644 --- a/javascript/ext_hypernets.js +++ b/javascript/ext_hypernets.js @@ -8,7 +8,7 @@ class HypernetParser extends BaseTagParser { if (tagword !== "<" && tagword !== " x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm); - tempResults = hypernetworks.filter(x => filterCondition(x)); // Filter by tagword + tempResults = hypernetworks.filter(x => filterCondition(x[0])); // Filter by tagword } else { tempResults = hypernetworks; } @@ -16,8 +16,9 @@ class HypernetParser extends BaseTagParser { // Add final results let finalResults = []; tempResults.forEach(t => { - let result = new AutocompleteResult(t.trim(), ResultType.hypernetwork) + let result = new AutocompleteResult(t[0].trim(), ResultType.hypernetwork) result.meta = "Hypernetwork"; + result.sortKey = t[1]; finalResults.push(result); }); @@ -28,9 +29,9 @@ class HypernetParser extends BaseTagParser { async function load() { if (hypernetworks.length === 0) { try { - hypernetworks = (await readFile(`${tagBasePath}/temp/hyp.txt`)).split("\n") - .filter(x => x.trim().length > 0) //Remove empty lines - .map(x => x.trim()); // Remove carriage returns and padding if it exists + hypernetworks = (await loadCSV(`${tagBasePath}/temp/hyp.txt`)) + .filter(x => x[0]?.trim().length > 0) //Remove empty lines + .map(x => [x[0]?.trim(), x[1]]); // Remove carriage returns and padding if it exists } catch (e) { console.error("Error loading hypernetworks.txt: " + e); } diff --git a/javascript/ext_loras.js b/javascript/ext_loras.js index 9a94b75..22c879c 100644 --- a/javascript/ext_loras.js +++ b/javascript/ext_loras.js @@ -23,7 +23,8 @@ class LoraParser extends BaseTagParser { let result = new AutocompleteResult(name, ResultType.lora) result.meta = "Lora"; - result.hash = t[1]; + result.sortKey = t[1]; + result.hash = t[2]; finalResults.push(result); }); @@ -36,7 +37,7 @@ async function load() { try { loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`)) .filter(x => x[0]?.trim().length > 0) // Remove empty lines - .map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs + .map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs } catch (e) { console.error("Error loading lora.txt: " + e); } diff --git a/javascript/ext_lycos.js b/javascript/ext_lycos.js index dd1b439..ad6271e 100644 --- a/javascript/ext_lycos.js +++ b/javascript/ext_lycos.js @@ -23,7 +23,8 @@ class LycoParser extends BaseTagParser { let result = new AutocompleteResult(name, ResultType.lyco) result.meta = "Lyco"; - result.hash = t[1]; + result.sortKey = t[1]; + result.hash = t[2]; finalResults.push(result); }); @@ -36,7 +37,7 @@ async function load() { try { lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`)) .filter(x => x[0]?.trim().length > 0) // Remove empty lines - .map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs + .map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs } catch (e) { console.error("Error loading lyco.txt: " + e); } diff --git a/javascript/ext_umi.js b/javascript/ext_umi.js index c076734..a55f80c 100644 --- a/javascript/ext_umi.js +++ b/javascript/ext_umi.js @@ -149,6 +149,7 @@ class UmiParser extends BaseTagParser { finalResults.push(result); }); + finalResults = finalResults.sort((a, b) => b.count - a.count); return finalResults; } else if (showAll) { let filteredWildcardsSorted = filteredWildcards(""); @@ -163,6 +164,8 @@ class UmiParser extends BaseTagParser { originalTagword = tagword; tagword = ""; + + finalResults = finalResults.sort((a, b) => b.count - a.count); return finalResults; } } else { @@ -178,6 +181,8 @@ class UmiParser extends BaseTagParser { originalTagword = tagword; tagword = ""; + + finalResults = finalResults.sort((a, b) => b.count - a.count); return finalResults; } } diff --git a/javascript/ext_wildcards.js b/javascript/ext_wildcards.js index cde0421..34361b8 100644 --- a/javascript/ext_wildcards.js +++ b/javascript/ext_wildcards.js @@ -85,13 +85,14 @@ class WildcardFileParser extends BaseTagParser { } else { result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile); result.meta = "Wildcard file"; + result.sortKey = wcFile[2].trim(); } finalResults.push(result); alreadyAdded.set(wcFile[1], true); }); - finalResults.sort((a, b) => a.text.localeCompare(b.text)); + finalResults.sort(getSortFunction()); return finalResults; } @@ -100,17 +101,17 @@ class WildcardFileParser extends BaseTagParser { async function load() { if (wildcardFiles.length === 0 && wildcardExtFiles.length === 0) { try { - let wcFileArr = (await readFile(`${tagBasePath}/temp/wc.txt`)).split("\n"); - let wcBasePath = wcFileArr[0].trim(); // First line should be the base path + let wcFileArr = await loadCSV(`${tagBasePath}/temp/wc.txt`); + let wcBasePath = wcFileArr[0][0].trim(); // First line should be the base path wildcardFiles = wcFileArr.slice(1) - .filter(x => x.trim().length > 0) // Remove empty lines - .map(x => [wcBasePath, x.trim().replace(".txt", "")]); // Remove file extension & newlines + .filter(x => x[0]?.trim().length > 0) //Remove empty lines + .map(x => [wcBasePath, x[0]?.trim().replace(".txt", ""), x[1]]); // Remove file extension & newlines // To support multiple sources, we need to separate them using the provided "-----" strings - let wcExtFileArr = (await readFile(`${tagBasePath}/temp/wce.txt`)).split("\n"); + let wcExtFileArr = await loadCSV(`${tagBasePath}/temp/wce.txt`); let splitIndices = []; for (let index = 0; index < wcExtFileArr.length; index++) { - if (wcExtFileArr[index].trim() === "-----") { + if (wcExtFileArr[index][0].trim() === "-----") { splitIndices.push(index); } } @@ -121,12 +122,10 @@ async function load() { let end = splitIndices[i]; let wcExtFile = wcExtFileArr.slice(start, end); - let base = wcExtFile[0].trim() + "/"; + let base = wcExtFile[0][0].trim() + "/"; wcExtFile = wcExtFile.slice(1) - .filter(x => x.trim().length > 0) // Remove empty lines - .map(x => x.trim().replace(base, "").replace(".txt", "")); // Remove file extension & newlines; - - wcExtFile = wcExtFile.map(x => [base, x]); + .filter(x => x[0]?.trim().length > 0) //Remove empty lines + .map(x => [base, x[0]?.trim().replace(base, "").replace(".txt", ""), x[1]]); wildcardExtFiles.push(...wcExtFile); } diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index 775ba39..87b09b5 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -217,6 +217,7 @@ async function syncOptions() { useLycos: opts["tac_useLycos"], showWikiLinks: opts["tac_showWikiLinks"], showExtraNetworkPreviews: opts["tac_showExtraNetworkPreviews"], + modelSortOrder: opts["tac_modelSortOrder"], // Insertion related settings replaceUnderscores: opts["tac_replaceUnderscores"], escapeParentheses: opts["tac_escapeParentheses"], @@ -269,6 +270,17 @@ async function syncOptions() { await loadTags(newCFG); } + // Refresh temp files if model sort order changed + // Contrary to the other loads, this one shouldn't happen on a first time load + if (TAC_CFG && newCFG.modelSortOrder !== TAC_CFG.modelSortOrder) { + const dropdown = gradioApp().querySelector("#setting_tac_modelSortOrder"); + dropdown.style.opacity = 0.5; + dropdown.style.pointerEvents = "none"; + await refreshTacTempFiles(true); + dropdown.style.opacity = null; + dropdown.style.pointerEvents = null; + } + // Update CSS if maxResults changed if (TAC_CFG && newCFG.maxResults !== TAC_CFG.maxResults) { gradioApp().querySelectorAll(".autocompleteResults").forEach(r => { @@ -1002,41 +1014,34 @@ async function autocomplete(textArea, prompt, fixedTag = null) { tagword = tagword.toLowerCase().replace(/[\n\r]/g, ""); // Process all parsers - let resultCandidates = await processParsers(textArea, prompt); + let resultCandidates = (await processParsers(textArea, prompt))?.filter(x => x.length > 0); // If one ore more result candidates match, use their results if (resultCandidates && resultCandidates.length > 0) { // Flatten our candidate(s) results = resultCandidates.flat(); - // If there was more than one candidate, sort the results by text to mix them - // instead of having them added in the order of the parsers - let shouldSort = resultCandidates.length > 1; - if (shouldSort) { - results = results.sort((a, b) => { - let sortByA = a.type === ResultType.chant ? a.aliases : a.text; - let sortByB = b.type === ResultType.chant ? b.aliases : b.text; - return sortByA.localeCompare(sortByB); - }); + // Sort results, but not if it's umi tags since they are sorted by count + if (!(resultCandidates.length === 1 && results[0].type === ResultType.umiWildcard)) + results = results.sort(getSortFunction()); - // Since some tags are kaomoji, we have to add the normal results in some cases - if (tagword.startsWith("<") || tagword.startsWith("*<")) { - // Create escaped search regex with support for * as a start placeholder - let searchRegex; - if (tagword.startsWith("*")) { - tagword = tagword.slice(1); - searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i'); - } else { - searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i'); - } - let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults); - - genericResults.forEach(g => { - let result = new AutocompleteResult(g[0].trim(), ResultType.tag) - result.category = g[1]; - result.count = g[2]; - result.aliases = g[3]; - results.push(result); - }); + // Since some tags are kaomoji, we have to add the normal results in some cases + if (tagword.startsWith("<") || tagword.startsWith("*<")) { + // Create escaped search regex with support for * as a start placeholder + let searchRegex; + if (tagword.startsWith("*")) { + tagword = tagword.slice(1); + searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i'); + } else { + searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i'); } + let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults); + + genericResults.forEach(g => { + let result = new AutocompleteResult(g[0].trim(), ResultType.tag) + result.category = g[1]; + result.count = g[2]; + result.aliases = g[3]; + results.push(result); + }); } } // Else search the normal tag list @@ -1223,8 +1228,8 @@ function navigateInList(textArea, event) { event.stopPropagation(); } -async function refreshTacTempFiles() { - setTimeout(async () => { +async function refreshTacTempFiles(api = false) { + const reload = async () => { wildcardFiles = []; wildcardExtFiles = []; umiWildcards = []; @@ -1236,7 +1241,16 @@ async function refreshTacTempFiles() { await processQueue(QUEUE_FILE_LOAD, null); console.log("TAC: Refreshed temp files"); - }, 2000); + } + + if (api) { + await postAPI("tacapi/v1/refresh-temp-files", null); + await reload(); + } else { + setTimeout(async () => { + await reload(); + }, 2000); + } } function addAutocompleteToArea(area) { @@ -1326,6 +1340,13 @@ async function setup() { // Listener for internal temp files refresh button gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles); + // Also add listener for external network refresh button (plus triggering python code) + ["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => { + gradioApp().querySelector(e)?.addEventListener("click", ()=>{ + refreshTacTempFiles(true); + }); + }) + // Add mutation observer for the model hash text to also allow hash-based blacklist again let modelHashText = gradioApp().querySelector("#sd_checkpoint_hash"); updateModelName(); diff --git a/scripts/tag_autocomplete_helper.py b/scripts/tag_autocomplete_helper.py index cbc7de7..b9ff5a3 100644 --- a/scripts/tag_autocomplete_helper.py +++ b/scripts/tag_autocomplete_helper.py @@ -1,6 +1,7 @@ # This helper script scans folders for wildcards and embeddings and writes them # to a temporary file to expose it to the javascript side +import os import glob import json import urllib.parse @@ -24,12 +25,45 @@ except Exception as e: # Not supported. load_textual_inversion_embeddings = lambda *args, **kwargs: None print("Tag Autocomplete: Cannot reload embeddings instantly:", e) +# Sorting functions for extra networks / embeddings stuff +sort_criteria = { + "Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(), + "Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime, + "Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime +} + +def sort_models(model_list, sort_method = None, name_has_subpath = False): + """Sorts models according to the setting. + + Input: list of (full_path, display_name, {hash}) models. + Returns models in the format of name, sort key, meta. + Meta is optional and can be a hash, version string or other required info. + """ + if len(model_list) == 0: + return model_list + + if sort_method is None: + sort_method = getattr(shared.opts, "tac_modelSortOrder", "Name") + + # Get sorting method from dictionary + sorter = sort_criteria.get(sort_method, sort_criteria["Name"]) + + # During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated. + # The list itself doesn't need to get sorted at this point. + if len(model_list[0]) > 2: + results = [f'{name},"{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list] + else: + results = [f'{name},"{sorter(path, name, name_has_subpath)}"' for path, name in model_list] + return results + + def get_wildcards(): """Returns a list of all wildcards. Works on nested folders.""" wildcard_files = list(WILDCARD_PATH.rglob("*.txt")) - resolved = [w.relative_to(WILDCARD_PATH).as_posix( - ) for w in wildcard_files if w.name != "put wildcards here.txt"] - return resolved + resolved = [(w, w.relative_to(WILDCARD_PATH).as_posix()) + for w in wildcard_files + if w.name != "put wildcards here.txt"] + return sort_models(resolved, name_has_subpath=True) def get_ext_wildcards(): @@ -38,7 +72,10 @@ def get_ext_wildcards(): for path in WILDCARD_EXT_PATHS: wildcard_files.append(path.as_posix()) - wildcard_files.extend(p.relative_to(path).as_posix() for p in path.rglob("*.txt") if p.name != "put wildcards here.txt") + resolved = [(w, w.relative_to(path).as_posix()) + for w in path.rglob("*.txt") + if w.name != "put wildcards here.txt"] + wildcard_files.extend(sort_models(resolved, name_has_subpath=True)) wildcard_files.append("-----") return wildcard_files @@ -52,7 +89,9 @@ def is_umi_format(data): break return not issue_found -def parse_umi_format(umi_tags, count, data): +count = 0 +def parse_umi_format(umi_tags, data): + global count for item in data: umi_tags[count] = ','.join(data[item]['Tags']) count += 1 @@ -82,7 +121,6 @@ def get_yaml_wildcards(): yaml_wildcards = {} umi_tags = {} # { tag: count } - count = 0 for path in yaml_files: try: @@ -90,7 +128,7 @@ def get_yaml_wildcards(): data = yaml.safe_load(file) if (data): if (is_umi_format(data)): - parse_umi_format(umi_tags, count, data) + parse_umi_format(umi_tags, data) else: parse_dynamic_prompt_format(yaml_wildcards, data, path) else: @@ -136,14 +174,14 @@ def get_embeddings(sd_model): # Add embeddings to the correct list if (emb_a_shape == V1_SHAPE): - emb_v1 = list(emb_type_a.keys()) + emb_v1 = [(Path(v.filename), k, "v1") for (k,v) in emb_type_a.items()] elif (emb_a_shape == V2_SHAPE): - emb_v2 = list(emb_type_a.keys()) + emb_v2 = [(Path(v.filename), k, "v2") for (k,v) in emb_type_a.items()] if (emb_b_shape == V1_SHAPE): - emb_v1 = list(emb_type_b.keys()) + emb_v1 = [(Path(v.filename), k, "v1") for (k,v) in emb_type_b.items()] elif (emb_b_shape == V2_SHAPE): - emb_v2 = list(emb_type_b.keys()) + emb_v2 = [(Path(v.filename), k, "v2") for (k,v) in emb_type_b.items()] # Get shape of current model #vec = sd_model.cond_stage_model.encode_embedding_init_text(",", 1) @@ -155,7 +193,7 @@ def get_embeddings(sd_model): # results = [e + ",v2" for e in emb_v2] + [e + ",v1" for e in emb_v1] #else: # raise AttributeError # Fallback to old method - results = sorted([e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2], key=lambda x: x.lower()) + results = sort_models(emb_v1) + sort_models(emb_v2) except AttributeError: print("tag_autocomplete_helper: Old webui version or unrecognized model shape, using fallback for embedding completion.") # Get a list of all embeddings in the folder @@ -173,9 +211,8 @@ def get_hypernetworks(): # Get a list of all hypernetworks in the folder hyp_paths = [Path(h) for h in glob.glob(HYP_PATH.joinpath("**/*").as_posix(), recursive=True)] - all_hypernetworks = [str(h.name) for h in hyp_paths if h.suffix in {".pt"}] - # Remove file extensions - return sorted([h[:h.rfind('.')] for h in all_hypernetworks], key=lambda x: x.lower()) + all_hypernetworks = [(h, h.stem) for h in hyp_paths if h.suffix in {".pt"}] + return sort_models(all_hypernetworks) model_keyword_installed = write_model_keyword_path() def get_lora(): @@ -186,17 +223,16 @@ def get_lora(): lora_paths = [Path(l) for l in glob.glob(LORA_PATH.joinpath("**/*").as_posix(), recursive=True)] # Get hashes valid_loras = [lf for lf in lora_paths if lf.suffix in {".safetensors", ".ckpt", ".pt"}] - hashes = {} + loras_with_hash = [] for l in valid_loras: name = l.relative_to(LORA_PATH).as_posix() if model_keyword_installed: - hashes[name] = get_lora_simple_hash(l) + hash = get_lora_simple_hash(l) else: - hashes[name] = "" + hash = "" + loras_with_hash.append((l, name, hash)) # Sort - sorted_loras = dict(sorted(hashes.items())) - # Add hashes and return - return [f"\"{name}\",{hash}" for name, hash in sorted_loras.items()] + return sort_models(loras_with_hash) def get_lyco(): @@ -207,19 +243,16 @@ def get_lyco(): # Get hashes valid_lycos = [lyf for lyf in lyco_paths if lyf.suffix in {".safetensors", ".ckpt", ".pt"}] - hashes = {} + lycos_with_hash = [] for ly in valid_lycos: name = ly.relative_to(LYCO_PATH).as_posix() if model_keyword_installed: - hashes[name] = get_lora_simple_hash(ly) + hash = get_lora_simple_hash(ly) else: - hashes[name] = "" - + hash = "" + lycos_with_hash.append((ly, name, hash)) # Sort - sorted_lycos = dict(sorted(hashes.items())) - # Add hashes and return - return [f"\"{name}\",{hash}" for name, hash in sorted_lycos.items()] - + return sort_models(lycos_with_hash) def write_tag_base_path(): """Writes the tag base path to a fixed location temporary file""" @@ -375,6 +408,7 @@ def on_ui_settings(): "tac_useLycos": shared.OptionInfo(True, "Search for LyCORIS/LoHa"), "tac_showWikiLinks": shared.OptionInfo(False, "Show '?' next to tags, linking to its Danbooru or e621 wiki page").info("Warning: This is an external site and very likely contains NSFW examples!"), "tac_showExtraNetworkPreviews": shared.OptionInfo(True, "Show preview thumbnails for extra networks if available"), + "tac_modelSortOrder": shared.OptionInfo("Name", "Model sort order", gr.Dropdown, lambda: {"choices": list(sort_criteria.keys())}).info("Order for extra network models and wildcards in dropdown"), # Insertion related settings "tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"), "tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"), @@ -483,6 +517,10 @@ def api_tac(_: gr.Blocks, app: FastAPI): except Exception as e: return JSONResponse({"error": e}, status_code=500) + @app.post("/tacapi/v1/refresh-temp-files") + async def api_refresh_temp_files(): + refresh_temp_files() + @app.get("/tacapi/v1/lora-info/{lora_name}") async def get_lora_info(lora_name): return await get_json_info(LORA_PATH, lora_name)