From 395326048576fba66a23cfa9f72f1a28334aa723 Mon Sep 17 00:00:00 2001 From: Symbiomatrix Date: Wed, 13 Sep 2023 01:34:49 +0300 Subject: [PATCH 01/12] Model sort selection. --- scripts/tag_autocomplete_helper.py | 89 ++++++++++++++++++++++-------- 1 file changed, 65 insertions(+), 24 deletions(-) diff --git a/scripts/tag_autocomplete_helper.py b/scripts/tag_autocomplete_helper.py index 1a05f01..78d9a06 100644 --- a/scripts/tag_autocomplete_helper.py +++ b/scripts/tag_autocomplete_helper.py @@ -1,6 +1,7 @@ # This helper script scans folders for wildcards and embeddings and writes them # to a temporary file to expose it to the javascript side +import os import glob import json import urllib.parse @@ -24,12 +25,48 @@ except Exception as e: # Not supported. load_textual_inversion_embeddings = lambda *args, **kwargs: None print("Tag Autocomplete: Cannot reload embeddings instantly:", e) +QUO = "\"" +EXTKEY = "tac" +# EXTNAME = "Tag Autocomplete Helper" +# Default values, because shared doesn't allocate a value automatically. +# (id: def) +DEXTSETV = { +"sortModels": "default", +} +fseti = lambda x: shared.opts.data.get(EXTKEY + "_" + x, DEXTSETV[x]) + +def sort_models(lmodels, sort_method = None, indwrap = False): + """Sorts models according to setting. + + Input: list of (full_path, display_name, {hash}) models. + Returns models in the standard temp file format (ie name, hash). + Default sort is lexicographical, mdate is by file modification date. + Hash is optional, can be any textual value written after the name (eg v1/v2 for embeddings). + For some reason only loras and lycos are wrapped in quote marks, so it's left to caller. + Creep: Requires sort modifications on js side to preserve order during merge. + """ + if len(lmodels) == 0: + return lmodels + if sort_method is None: + sort_method = fseti("sortModels") + if sort_method == "modified_date": + lsorted = sorted(lmodels, key=lambda x: os.path.getmtime(x[0]), reverse = True) + else: + lsorted = sorted(lmodels, key = lambda x: x[1].lower()) + if len(lsorted[0]) > 2: + # lret = [f"\"{name}\",{hash}" for pt, name, hash in lsorted] + lret = [f"{name},{hash}" for pt, name, hash in lsorted] + else: + lret = [name for pt, name in lsorted] + return lret + def get_wildcards(): """Returns a list of all wildcards. Works on nested folders.""" wildcard_files = list(WILDCARD_PATH.rglob("*.txt")) - resolved = [w.relative_to(WILDCARD_PATH).as_posix( - ) for w in wildcard_files if w.name != "put wildcards here.txt"] - return resolved + resolved = [(w, w.relative_to(WILDCARD_PATH).as_posix()) + for w in wildcard_files + if w.name != "put wildcards here.txt"] + return sort_models(resolved) def get_ext_wildcards(): @@ -38,7 +75,10 @@ def get_ext_wildcards(): for path in WILDCARD_EXT_PATHS: wildcard_files.append(path.as_posix()) - wildcard_files.extend(p.relative_to(path).as_posix() for p in path.rglob("*.txt") if p.name != "put wildcards here.txt") + lfiles = [(w, w.relative_to(path).as_posix()) + for w in path.rglob("*.txt") + if w.name != "put wildcards here.txt"] + wildcard_files.extend(sort_models(lfiles)) wildcard_files.append("-----") return wildcard_files @@ -136,14 +176,14 @@ def get_embeddings(sd_model): # Add embeddings to the correct list if (emb_a_shape == V1_SHAPE): - emb_v1 = list(emb_type_a.keys()) + emb_v1 = [(v.filename, k, "v1") for (k,v) in emb_type_a.items()] elif (emb_a_shape == V2_SHAPE): - emb_v2 = list(emb_type_a.keys()) + emb_v2 = [(v.filename, k, "v2") for (k,v) in emb_type_a.items()] if (emb_b_shape == V1_SHAPE): - emb_v1 = list(emb_type_b.keys()) + emb_v1 = [(v.filename, k, "v1") for (k,v) in emb_type_b.items()] elif (emb_b_shape == V2_SHAPE): - emb_v2 = list(emb_type_b.keys()) + emb_v2 = [(v.filename, k, "v2") for (k,v) in emb_type_b.items()] # Get shape of current model #vec = sd_model.cond_stage_model.encode_embedding_init_text(",", 1) @@ -155,7 +195,7 @@ def get_embeddings(sd_model): # results = [e + ",v2" for e in emb_v2] + [e + ",v1" for e in emb_v1] #else: # raise AttributeError # Fallback to old method - results = sorted([e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2], key=lambda x: x.lower()) + results = sort_models(emb_v1) + sort_models(emb_v2) except AttributeError: print("tag_autocomplete_helper: Old webui version or unrecognized model shape, using fallback for embedding completion.") # Get a list of all embeddings in the folder @@ -173,9 +213,10 @@ def get_hypernetworks(): # Get a list of all hypernetworks in the folder hyp_paths = [Path(h) for h in glob.glob(HYP_PATH.joinpath("**/*").as_posix(), recursive=True)] - all_hypernetworks = [str(h.name) for h in hyp_paths if h.suffix in {".pt"}] + all_hypernetworks = [h for h in hyp_paths if h.suffix in {".pt"}] # Remove file extensions - return sorted([h[:h.rfind('.')] for h in all_hypernetworks], key=lambda x: x.lower()) + lfiles = [(h, os.path.splitext(h.name)[0]) for h in all_hypernetworks] + return sort_models(lfiles) model_keyword_installed = write_model_keyword_path() def get_lora(): @@ -186,17 +227,17 @@ def get_lora(): lora_paths = [Path(l) for l in glob.glob(LORA_PATH.joinpath("**/*").as_posix(), recursive=True)] # Get hashes valid_loras = [lf for lf in lora_paths if lf.suffix in {".safetensors", ".ckpt", ".pt"}] - hashes = {} + lhashes = [] for l in valid_loras: name = l.relative_to(LORA_PATH).as_posix() + name = QUO + name + QUO # Wrapped in quote marks. if model_keyword_installed: - hashes[name] = get_lora_simple_hash(l) + vhash = get_lora_simple_hash(l) else: - hashes[name] = "" + vhash = "" + lhashes.append((l, name, vhash)) # Sort - sorted_loras = dict(sorted(hashes.items())) - # Add hashes and return - return [f"\"{name}\",{hash}" for name, hash in sorted_loras.items()] + return sort_models(lhashes) def get_lyco(): @@ -207,19 +248,18 @@ def get_lyco(): # Get hashes valid_lycos = [lyf for lyf in lyco_paths if lyf.suffix in {".safetensors", ".ckpt", ".pt"}] - hashes = {} + lhashes = [] for ly in valid_lycos: name = ly.relative_to(LYCO_PATH).as_posix() + name = QUO + name + QUO if model_keyword_installed: - hashes[name] = get_lora_simple_hash(ly) + vhash = get_lora_simple_hash(ly) else: - hashes[name] = "" + vhash = "" + lhashes.append((ly, name, vhash)) # Sort - sorted_lycos = dict(sorted(hashes.items())) - # Add hashes and return - return [f"\"{name}\",{hash}" for name, hash in sorted_lycos.items()] - + return sort_models(lhashes) def write_tag_base_path(): """Writes the tag base path to a fixed location temporary file""" @@ -397,6 +437,7 @@ def on_ui_settings(): "tac_extra.addMode": shared.OptionInfo("Insert before", "Mode to add the extra tags to the main tag list", gr.Dropdown, lambda: {"choices": ["Insert before","Insert after"]}), # Chant settings "tac_chantFile": shared.OptionInfo("demo-chants.json", "Chant filename", gr.Dropdown, lambda: {"choices": json_files_withnone}, refresh=update_json_files).info("Chants are longer prompt presets"), + "tac_sortModels": shared.OptionInfo("name", "Model sort order", gr.Dropdown, lambda: {"choices": ["name", "modified_date"]}).info("WIP: Order of appearance for models in dropdown"), } # Add normal settings From 475ef591975b1726d040500f277d819139a882c6 Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Wed, 13 Sep 2023 11:46:17 +0200 Subject: [PATCH 02/12] Rework sorting function to calculate keys instead of pre-sort the list Rename added/changed variables to be clearer --- scripts/tag_autocomplete_helper.py | 112 +++++++++++++++-------------- 1 file changed, 59 insertions(+), 53 deletions(-) diff --git a/scripts/tag_autocomplete_helper.py b/scripts/tag_autocomplete_helper.py index 78d9a06..9094f77 100644 --- a/scripts/tag_autocomplete_helper.py +++ b/scripts/tag_autocomplete_helper.py @@ -25,40 +25,44 @@ except Exception as e: # Not supported. load_textual_inversion_embeddings = lambda *args, **kwargs: None print("Tag Autocomplete: Cannot reload embeddings instantly:", e) -QUO = "\"" -EXTKEY = "tac" -# EXTNAME = "Tag Autocomplete Helper" -# Default values, because shared doesn't allocate a value automatically. -# (id: def) -DEXTSETV = { -"sortModels": "default", +# Sorting functions for extra networks / embeddings stuff +sort_criteria = { + "Name": { + "key": lambda path, name: name.lower() if Path(name).parts > 1 else path.stem.lower(), + "reverse": False + }, + "Date Modified": { + "key": lambda path, name: path.stat().st_mtime, + "reverse": True + }, } -fseti = lambda x: shared.opts.data.get(EXTKEY + "_" + x, DEXTSETV[x]) -def sort_models(lmodels, sort_method = None, indwrap = False): - """Sorts models according to setting. +def sort_models(model_list, sort_method = None): + """Sorts models according to the setting. Input: list of (full_path, display_name, {hash}) models. - Returns models in the standard temp file format (ie name, hash). - Default sort is lexicographical, mdate is by file modification date. - Hash is optional, can be any textual value written after the name (eg v1/v2 for embeddings). - For some reason only loras and lycos are wrapped in quote marks, so it's left to caller. - Creep: Requires sort modifications on js side to preserve order during merge. + Returns models in the format of name, sort key, meta. + Meta is optional and can be a hash, version string or other required info. + Whether the currently selected sort method needs to be reversed is provided + by an API endpoint to reduce duplication in temp files. """ - if len(lmodels) == 0: - return lmodels + if len(model_list) == 0: + return model_list + if sort_method is None: - sort_method = fseti("sortModels") - if sort_method == "modified_date": - lsorted = sorted(lmodels, key=lambda x: os.path.getmtime(x[0]), reverse = True) - else: - lsorted = sorted(lmodels, key = lambda x: x[1].lower()) - if len(lsorted[0]) > 2: - # lret = [f"\"{name}\",{hash}" for pt, name, hash in lsorted] - lret = [f"{name},{hash}" for pt, name, hash in lsorted] + sort_method = getattr(shared.opts, "tac_modelSortOrder", "Name") + + # Get sorting method from dictionary + sorter = sort_criteria[sort_method] if sort_criteria[sort_method] else sort_criteria['Name'] + + # During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated. + # The list itself doesn't need to get sorted at this point. + if len(model_list[0]) > 2: + results = [f'{name},"{sorter["key"](path, name)}",{meta}' for path, name, meta in model_list] else: - lret = [name for pt, name in lsorted] - return lret + results = [f'{name},"{sorter["key"](path, name)}"' for path, name in model_list] + return results + def get_wildcards(): """Returns a list of all wildcards. Works on nested folders.""" @@ -75,10 +79,10 @@ def get_ext_wildcards(): for path in WILDCARD_EXT_PATHS: wildcard_files.append(path.as_posix()) - lfiles = [(w, w.relative_to(path).as_posix()) - for w in path.rglob("*.txt") - if w.name != "put wildcards here.txt"] - wildcard_files.extend(sort_models(lfiles)) + resolved = [(w, w.relative_to(path).as_posix()) + for w in path.rglob("*.txt") + if w.name != "put wildcards here.txt"] + wildcard_files.extend(sort_models(resolved)) wildcard_files.append("-----") return wildcard_files @@ -176,14 +180,14 @@ def get_embeddings(sd_model): # Add embeddings to the correct list if (emb_a_shape == V1_SHAPE): - emb_v1 = [(v.filename, k, "v1") for (k,v) in emb_type_a.items()] + emb_v1 = [(Path(v.filename), k, "v1") for (k,v) in emb_type_a.items()] elif (emb_a_shape == V2_SHAPE): - emb_v2 = [(v.filename, k, "v2") for (k,v) in emb_type_a.items()] + emb_v2 = [(Path(v.filename), k, "v2") for (k,v) in emb_type_a.items()] if (emb_b_shape == V1_SHAPE): - emb_v1 = [(v.filename, k, "v1") for (k,v) in emb_type_b.items()] + emb_v1 = [(Path(v.filename), k, "v1") for (k,v) in emb_type_b.items()] elif (emb_b_shape == V2_SHAPE): - emb_v2 = [(v.filename, k, "v2") for (k,v) in emb_type_b.items()] + emb_v2 = [(Path(v.filename), k, "v2") for (k,v) in emb_type_b.items()] # Get shape of current model #vec = sd_model.cond_stage_model.encode_embedding_init_text(",", 1) @@ -213,10 +217,8 @@ def get_hypernetworks(): # Get a list of all hypernetworks in the folder hyp_paths = [Path(h) for h in glob.glob(HYP_PATH.joinpath("**/*").as_posix(), recursive=True)] - all_hypernetworks = [h for h in hyp_paths if h.suffix in {".pt"}] - # Remove file extensions - lfiles = [(h, os.path.splitext(h.name)[0]) for h in all_hypernetworks] - return sort_models(lfiles) + all_hypernetworks = [(h, h.stem) for h in hyp_paths if h.suffix in {".pt"}] + return sort_models(all_hypernetworks) model_keyword_installed = write_model_keyword_path() def get_lora(): @@ -227,17 +229,17 @@ def get_lora(): lora_paths = [Path(l) for l in glob.glob(LORA_PATH.joinpath("**/*").as_posix(), recursive=True)] # Get hashes valid_loras = [lf for lf in lora_paths if lf.suffix in {".safetensors", ".ckpt", ".pt"}] - lhashes = [] + loras_with_hash = [] for l in valid_loras: name = l.relative_to(LORA_PATH).as_posix() - name = QUO + name + QUO # Wrapped in quote marks. + name = f'"{name}"' if model_keyword_installed: - vhash = get_lora_simple_hash(l) + hash = get_lora_simple_hash(l) else: - vhash = "" - lhashes.append((l, name, vhash)) + hash = "" + loras_with_hash.append((l, name, hash)) # Sort - return sort_models(lhashes) + return sort_models(loras_with_hash) def get_lyco(): @@ -248,18 +250,17 @@ def get_lyco(): # Get hashes valid_lycos = [lyf for lyf in lyco_paths if lyf.suffix in {".safetensors", ".ckpt", ".pt"}] - lhashes = [] + lycos_with_hash = [] for ly in valid_lycos: name = ly.relative_to(LYCO_PATH).as_posix() - name = QUO + name + QUO + name = f'"{name}"' if model_keyword_installed: - vhash = get_lora_simple_hash(ly) + hash = get_lora_simple_hash(ly) else: - vhash = "" - lhashes.append((ly, name, vhash)) - + hash = "" + lycos_with_hash.append((ly, name, hash)) # Sort - return sort_models(lhashes) + return sort_models(lycos_with_hash) def write_tag_base_path(): """Writes the tag base path to a fixed location temporary file""" @@ -415,6 +416,7 @@ def on_ui_settings(): "tac_useLycos": shared.OptionInfo(True, "Search for LyCORIS/LoHa"), "tac_showWikiLinks": shared.OptionInfo(False, "Show '?' next to tags, linking to its Danbooru or e621 wiki page").info("Warning: This is an external site and very likely contains NSFW examples!"), "tac_showExtraNetworkPreviews": shared.OptionInfo(True, "Show preview thumbnails for extra networks if available"), + "tac_modelSortOrder": shared.OptionInfo("Name", "Model sort order", gr.Dropdown, lambda: {"choices": ["Name", "Date Modified"]}).info("Order for extra network models and wildcards in dropdown"), # Insertion related settings "tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"), "tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"), @@ -437,7 +439,6 @@ def on_ui_settings(): "tac_extra.addMode": shared.OptionInfo("Insert before", "Mode to add the extra tags to the main tag list", gr.Dropdown, lambda: {"choices": ["Insert before","Insert after"]}), # Chant settings "tac_chantFile": shared.OptionInfo("demo-chants.json", "Chant filename", gr.Dropdown, lambda: {"choices": json_files_withnone}, refresh=update_json_files).info("Chants are longer prompt presets"), - "tac_sortModels": shared.OptionInfo("name", "Model sort order", gr.Dropdown, lambda: {"choices": ["name", "modified_date"]}).info("WIP: Order of appearance for models in dropdown"), } # Add normal settings @@ -524,6 +525,11 @@ def api_tac(_: gr.Blocks, app: FastAPI): except Exception as e: return JSONResponse({"error": e}, status_code=500) + @app.get("/tacapi/v1/sort-direction") + async def get_sort_direction(): + criterium = getattr(shared.opts, "tac_modelSortOrder", "Name") + return sort_criteria[criterium]['reverse'] if sort_criteria[criterium] else sort_criteria['Name']['reverse'] + @app.get("/tacapi/v1/lora-info/{lora_name}") async def get_lora_info(lora_name): return await get_json_info(LORA_PATH, lora_name) From 44effca7027a1d23720fc261a920c72f8433d0e6 Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Wed, 13 Sep 2023 14:02:28 +0200 Subject: [PATCH 03/12] Add sorting to javascript side Now uses the sortKey if available. Elements without a sortKey will always use name as fallback. Removed sort direction API again since it needs to be modeled case-by-case in the javascript anyway. --- javascript/_result.js | 1 + javascript/_utils.js | 38 ++++++++++++++++ javascript/ext_embeddings.js | 11 ++--- javascript/ext_hypernets.js | 11 ++--- javascript/ext_loras.js | 5 ++- javascript/ext_lycos.js | 5 ++- javascript/ext_wildcards.js | 23 +++++----- javascript/tagAutocomplete.js | 70 +++++++++++++++++------------- scripts/tag_autocomplete_helper.py | 31 +++++-------- 9 files changed, 117 insertions(+), 78 deletions(-) diff --git a/javascript/_result.js b/javascript/_result.js index 96129f1..823f26d 100644 --- a/javascript/_result.js +++ b/javascript/_result.js @@ -27,6 +27,7 @@ class AutocompleteResult { aliases = null; meta = null; hash = null; + sortKey = null; // Constructor constructor(text, type) { diff --git a/javascript/_utils.js b/javascript/_utils.js index 6ef46c0..069ee2e 100644 --- a/javascript/_utils.js +++ b/javascript/_utils.js @@ -81,6 +81,17 @@ async function fetchAPI(url, json = true, cache = false) { return await response.text(); } +async function postAPI(url, body) { + let response = await fetch(url, { method: "POST", body: body }); + + if (response.status != 200) { + console.error(`Error posting to API endpoint "${url}": ` + response.status, response.statusText); + return null; + } + + return await response.json(); +} + // Extra network preview thumbnails async function getExtraNetworkPreviewURL(filename, type) { const previewJSON = await fetchAPI(`tacapi/v1/thumb-preview/${filename}?type=${type}`, true, true); @@ -200,6 +211,33 @@ function observeElement(element, property, callback, delay = 0) { } } +// Sort functions +function getSortFunction() { + let criterium = TAC_CFG.modelSortOrder || "Name"; + return (a, b) => { + let textHolderA = a.type === ResultType.chant ? a.aliases : a.text; + let textHolderB = b.type === ResultType.chant ? b.aliases : b.text; + + switch (criterium) { + case "Date Modified": + let aParsed = parseFloat(a.sortKey || "-1"); + let bParsed = parseFloat(b.sortKey || "-1"); + + if (aParsed === bParsed) { + let aKey = a.sortKey || textHolderA; + let bKey = b.sortKey || textHolderB; + return aKey.localeCompare(bKey); + } + + return bParsed - aParsed; + default: + let aKey = a.sortKey || textHolderA; + let bKey = b.sortKey || textHolderB; + return aKey.localeCompare(bKey); + } + } +} + // Queue calling function to process global queues async function processQueue(queue, context, ...args) { for (let i = 0; i < queue.length; i++) { diff --git a/javascript/ext_embeddings.js b/javascript/ext_embeddings.js index e51aa4b..9c7bd44 100644 --- a/javascript/ext_embeddings.js +++ b/javascript/ext_embeddings.js @@ -16,7 +16,7 @@ class EmbeddingParser extends BaseTagParser { let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm); if (versionString) - tempResults = embeddings.filter(x => filterCondition(x) && x[1] && x[1] === versionString); // Filter by tagword + tempResults = embeddings.filter(x => filterCondition(x) && x[2] && x[2] === versionString); // Filter by tagword else tempResults = embeddings.filter(x => filterCondition(x)); // Filter by tagword } else { @@ -27,7 +27,8 @@ class EmbeddingParser extends BaseTagParser { let finalResults = []; tempResults.forEach(t => { let result = new AutocompleteResult(t[0].trim(), ResultType.embedding) - result.meta = t[1] + " Embedding"; + result.sortKey = t[1]; + result.meta = t[2] + " Embedding"; finalResults.push(result); }); @@ -38,9 +39,9 @@ class EmbeddingParser extends BaseTagParser { async function load() { if (embeddings.length === 0) { try { - embeddings = (await readFile(`${tagBasePath}/temp/emb.txt`)).split("\n") - .filter(x => x.trim().length > 0) // Remove empty lines - .map(x => x.trim().split(",")); // Split into name, version type pairs + embeddings = (await loadCSV(`${tagBasePath}/temp/emb.txt`)) + .filter(x => x[0]?.trim().length > 0) // Remove empty lines + .map(x => [x[0].trim(), x[1], x[2]]); // Return name, sortKey, hash tuples } catch (e) { console.error("Error loading embeddings.txt: " + e); } diff --git a/javascript/ext_hypernets.js b/javascript/ext_hypernets.js index 7f564fd..3613b2a 100644 --- a/javascript/ext_hypernets.js +++ b/javascript/ext_hypernets.js @@ -8,7 +8,7 @@ class HypernetParser extends BaseTagParser { if (tagword !== "<" && tagword !== " x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm); - tempResults = hypernetworks.filter(x => filterCondition(x)); // Filter by tagword + tempResults = hypernetworks.filter(x => filterCondition(x[0])); // Filter by tagword } else { tempResults = hypernetworks; } @@ -16,8 +16,9 @@ class HypernetParser extends BaseTagParser { // Add final results let finalResults = []; tempResults.forEach(t => { - let result = new AutocompleteResult(t.trim(), ResultType.hypernetwork) + let result = new AutocompleteResult(t[0].trim(), ResultType.hypernetwork) result.meta = "Hypernetwork"; + result.sortKey = t[1]; finalResults.push(result); }); @@ -28,9 +29,9 @@ class HypernetParser extends BaseTagParser { async function load() { if (hypernetworks.length === 0) { try { - hypernetworks = (await readFile(`${tagBasePath}/temp/hyp.txt`)).split("\n") - .filter(x => x.trim().length > 0) //Remove empty lines - .map(x => x.trim()); // Remove carriage returns and padding if it exists + hypernetworks = (await loadCSV(`${tagBasePath}/temp/hyp.txt`)) + .filter(x => x[0]?.trim().length > 0) //Remove empty lines + .map(x => [x[0]?.trim(), x[1]]); // Remove carriage returns and padding if it exists } catch (e) { console.error("Error loading hypernetworks.txt: " + e); } diff --git a/javascript/ext_loras.js b/javascript/ext_loras.js index 9a94b75..22c879c 100644 --- a/javascript/ext_loras.js +++ b/javascript/ext_loras.js @@ -23,7 +23,8 @@ class LoraParser extends BaseTagParser { let result = new AutocompleteResult(name, ResultType.lora) result.meta = "Lora"; - result.hash = t[1]; + result.sortKey = t[1]; + result.hash = t[2]; finalResults.push(result); }); @@ -36,7 +37,7 @@ async function load() { try { loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`)) .filter(x => x[0]?.trim().length > 0) // Remove empty lines - .map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs + .map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs } catch (e) { console.error("Error loading lora.txt: " + e); } diff --git a/javascript/ext_lycos.js b/javascript/ext_lycos.js index dd1b439..ad6271e 100644 --- a/javascript/ext_lycos.js +++ b/javascript/ext_lycos.js @@ -23,7 +23,8 @@ class LycoParser extends BaseTagParser { let result = new AutocompleteResult(name, ResultType.lyco) result.meta = "Lyco"; - result.hash = t[1]; + result.sortKey = t[1]; + result.hash = t[2]; finalResults.push(result); }); @@ -36,7 +37,7 @@ async function load() { try { lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`)) .filter(x => x[0]?.trim().length > 0) // Remove empty lines - .map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs + .map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs } catch (e) { console.error("Error loading lyco.txt: " + e); } diff --git a/javascript/ext_wildcards.js b/javascript/ext_wildcards.js index cde0421..34361b8 100644 --- a/javascript/ext_wildcards.js +++ b/javascript/ext_wildcards.js @@ -85,13 +85,14 @@ class WildcardFileParser extends BaseTagParser { } else { result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile); result.meta = "Wildcard file"; + result.sortKey = wcFile[2].trim(); } finalResults.push(result); alreadyAdded.set(wcFile[1], true); }); - finalResults.sort((a, b) => a.text.localeCompare(b.text)); + finalResults.sort(getSortFunction()); return finalResults; } @@ -100,17 +101,17 @@ class WildcardFileParser extends BaseTagParser { async function load() { if (wildcardFiles.length === 0 && wildcardExtFiles.length === 0) { try { - let wcFileArr = (await readFile(`${tagBasePath}/temp/wc.txt`)).split("\n"); - let wcBasePath = wcFileArr[0].trim(); // First line should be the base path + let wcFileArr = await loadCSV(`${tagBasePath}/temp/wc.txt`); + let wcBasePath = wcFileArr[0][0].trim(); // First line should be the base path wildcardFiles = wcFileArr.slice(1) - .filter(x => x.trim().length > 0) // Remove empty lines - .map(x => [wcBasePath, x.trim().replace(".txt", "")]); // Remove file extension & newlines + .filter(x => x[0]?.trim().length > 0) //Remove empty lines + .map(x => [wcBasePath, x[0]?.trim().replace(".txt", ""), x[1]]); // Remove file extension & newlines // To support multiple sources, we need to separate them using the provided "-----" strings - let wcExtFileArr = (await readFile(`${tagBasePath}/temp/wce.txt`)).split("\n"); + let wcExtFileArr = await loadCSV(`${tagBasePath}/temp/wce.txt`); let splitIndices = []; for (let index = 0; index < wcExtFileArr.length; index++) { - if (wcExtFileArr[index].trim() === "-----") { + if (wcExtFileArr[index][0].trim() === "-----") { splitIndices.push(index); } } @@ -121,12 +122,10 @@ async function load() { let end = splitIndices[i]; let wcExtFile = wcExtFileArr.slice(start, end); - let base = wcExtFile[0].trim() + "/"; + let base = wcExtFile[0][0].trim() + "/"; wcExtFile = wcExtFile.slice(1) - .filter(x => x.trim().length > 0) // Remove empty lines - .map(x => x.trim().replace(base, "").replace(".txt", "")); // Remove file extension & newlines; - - wcExtFile = wcExtFile.map(x => [base, x]); + .filter(x => x[0]?.trim().length > 0) //Remove empty lines + .map(x => [base, x[0]?.trim().replace(base, "").replace(".txt", ""), x[1]]); wildcardExtFiles.push(...wcExtFile); } diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index 775ba39..9331296 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -217,6 +217,7 @@ async function syncOptions() { useLycos: opts["tac_useLycos"], showWikiLinks: opts["tac_showWikiLinks"], showExtraNetworkPreviews: opts["tac_showExtraNetworkPreviews"], + modelSortOrder: opts["tac_modelSortOrder"], // Insertion related settings replaceUnderscores: opts["tac_replaceUnderscores"], escapeParentheses: opts["tac_escapeParentheses"], @@ -269,6 +270,12 @@ async function syncOptions() { await loadTags(newCFG); } + // Refresh temp files if model sort order changed + // Contrary to the other loads, this one shouldn't happen on a first time load + if (TAC_CFG && newCFG.modelSortOrder !== TAC_CFG.modelSortOrder) { + await refreshTacTempFiles(true); + } + // Update CSS if maxResults changed if (TAC_CFG && newCFG.maxResults !== TAC_CFG.maxResults) { gradioApp().querySelectorAll(".autocompleteResults").forEach(r => { @@ -1007,36 +1014,28 @@ async function autocomplete(textArea, prompt, fixedTag = null) { if (resultCandidates && resultCandidates.length > 0) { // Flatten our candidate(s) results = resultCandidates.flat(); - // If there was more than one candidate, sort the results by text to mix them - // instead of having them added in the order of the parsers - let shouldSort = resultCandidates.length > 1; - if (shouldSort) { - results = results.sort((a, b) => { - let sortByA = a.type === ResultType.chant ? a.aliases : a.text; - let sortByB = b.type === ResultType.chant ? b.aliases : b.text; - return sortByA.localeCompare(sortByB); - }); + // Sort results + results = results.sort(getSortFunction()); - // Since some tags are kaomoji, we have to add the normal results in some cases - if (tagword.startsWith("<") || tagword.startsWith("*<")) { - // Create escaped search regex with support for * as a start placeholder - let searchRegex; - if (tagword.startsWith("*")) { - tagword = tagword.slice(1); - searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i'); - } else { - searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i'); - } - let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults); - - genericResults.forEach(g => { - let result = new AutocompleteResult(g[0].trim(), ResultType.tag) - result.category = g[1]; - result.count = g[2]; - result.aliases = g[3]; - results.push(result); - }); + // Since some tags are kaomoji, we have to add the normal results in some cases + if (tagword.startsWith("<") || tagword.startsWith("*<")) { + // Create escaped search regex with support for * as a start placeholder + let searchRegex; + if (tagword.startsWith("*")) { + tagword = tagword.slice(1); + searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i'); + } else { + searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i'); } + let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults); + + genericResults.forEach(g => { + let result = new AutocompleteResult(g[0].trim(), ResultType.tag) + result.category = g[1]; + result.count = g[2]; + result.aliases = g[3]; + results.push(result); + }); } } // Else search the normal tag list @@ -1223,8 +1222,8 @@ function navigateInList(textArea, event) { event.stopPropagation(); } -async function refreshTacTempFiles() { - setTimeout(async () => { +async function refreshTacTempFiles(api = false) { + const reload = async () => { wildcardFiles = []; wildcardExtFiles = []; umiWildcards = []; @@ -1236,7 +1235,16 @@ async function refreshTacTempFiles() { await processQueue(QUEUE_FILE_LOAD, null); console.log("TAC: Refreshed temp files"); - }, 2000); + } + + if (api) { + await postAPI("tacapi/v1/refresh-temp-files", null); + await reload(); + } else { + setTimeout(async () => { + await reload(); + }, 2000); + } } function addAutocompleteToArea(area) { diff --git a/scripts/tag_autocomplete_helper.py b/scripts/tag_autocomplete_helper.py index 9094f77..0af571a 100644 --- a/scripts/tag_autocomplete_helper.py +++ b/scripts/tag_autocomplete_helper.py @@ -27,24 +27,16 @@ except Exception as e: # Not supported. # Sorting functions for extra networks / embeddings stuff sort_criteria = { - "Name": { - "key": lambda path, name: name.lower() if Path(name).parts > 1 else path.stem.lower(), - "reverse": False - }, - "Date Modified": { - "key": lambda path, name: path.stat().st_mtime, - "reverse": True - }, + "Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(), + "Date Modified": lambda path, name, subpath: path.stat().st_mtime } -def sort_models(model_list, sort_method = None): +def sort_models(model_list, sort_method = None, name_has_subpath = False): """Sorts models according to the setting. Input: list of (full_path, display_name, {hash}) models. Returns models in the format of name, sort key, meta. Meta is optional and can be a hash, version string or other required info. - Whether the currently selected sort method needs to be reversed is provided - by an API endpoint to reduce duplication in temp files. """ if len(model_list) == 0: return model_list @@ -58,9 +50,9 @@ def sort_models(model_list, sort_method = None): # During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated. # The list itself doesn't need to get sorted at this point. if len(model_list[0]) > 2: - results = [f'{name},"{sorter["key"](path, name)}",{meta}' for path, name, meta in model_list] + results = [f'{name},"{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list] else: - results = [f'{name},"{sorter["key"](path, name)}"' for path, name in model_list] + results = [f'{name},"{sorter(path, name, name_has_subpath)}"' for path, name in model_list] return results @@ -70,7 +62,7 @@ def get_wildcards(): resolved = [(w, w.relative_to(WILDCARD_PATH).as_posix()) for w in wildcard_files if w.name != "put wildcards here.txt"] - return sort_models(resolved) + return sort_models(resolved, name_has_subpath=True) def get_ext_wildcards(): @@ -82,7 +74,7 @@ def get_ext_wildcards(): resolved = [(w, w.relative_to(path).as_posix()) for w in path.rglob("*.txt") if w.name != "put wildcards here.txt"] - wildcard_files.extend(sort_models(resolved)) + wildcard_files.extend(sort_models(resolved, name_has_subpath=True)) wildcard_files.append("-----") return wildcard_files @@ -232,7 +224,6 @@ def get_lora(): loras_with_hash = [] for l in valid_loras: name = l.relative_to(LORA_PATH).as_posix() - name = f'"{name}"' if model_keyword_installed: hash = get_lora_simple_hash(l) else: @@ -253,7 +244,6 @@ def get_lyco(): lycos_with_hash = [] for ly in valid_lycos: name = ly.relative_to(LYCO_PATH).as_posix() - name = f'"{name}"' if model_keyword_installed: hash = get_lora_simple_hash(ly) else: @@ -525,10 +515,9 @@ def api_tac(_: gr.Blocks, app: FastAPI): except Exception as e: return JSONResponse({"error": e}, status_code=500) - @app.get("/tacapi/v1/sort-direction") - async def get_sort_direction(): - criterium = getattr(shared.opts, "tac_modelSortOrder", "Name") - return sort_criteria[criterium]['reverse'] if sort_criteria[criterium] else sort_criteria['Name']['reverse'] + @app.post("/tacapi/v1/refresh-temp-files") + async def api_refresh_temp_files(): + refresh_temp_files() @app.get("/tacapi/v1/lora-info/{lora_name}") async def get_lora_info(lora_name): From 783a847978bcd6b42a5cdfd6da4bc1a65a5f600c Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Wed, 13 Sep 2023 16:37:44 +0200 Subject: [PATCH 04/12] Fix typo --- javascript/_utils.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/_utils.js b/javascript/_utils.js index 069ee2e..4050f29 100644 --- a/javascript/_utils.js +++ b/javascript/_utils.js @@ -213,12 +213,12 @@ function observeElement(element, property, callback, delay = 0) { // Sort functions function getSortFunction() { - let criterium = TAC_CFG.modelSortOrder || "Name"; + let criterion = TAC_CFG.modelSortOrder || "Name"; return (a, b) => { let textHolderA = a.type === ResultType.chant ? a.aliases : a.text; let textHolderB = b.type === ResultType.chant ? b.aliases : b.text; - switch (criterium) { + switch (criterion) { case "Date Modified": let aParsed = parseFloat(a.sortKey || "-1"); let bParsed = parseFloat(b.sortKey || "-1"); From 2846d79b7d49f171669c9ee3a9ae1af5fd4cea4f Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Wed, 13 Sep 2023 19:39:48 +0200 Subject: [PATCH 05/12] Small cleanup, add reverse option Properly add text at the end on non-reverse numeric --- javascript/_utils.js | 43 ++++++++++++++++++------------ scripts/tag_autocomplete_helper.py | 7 ++--- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/javascript/_utils.js b/javascript/_utils.js index 4050f29..de745f1 100644 --- a/javascript/_utils.js +++ b/javascript/_utils.js @@ -214,26 +214,35 @@ function observeElement(element, property, callback, delay = 0) { // Sort functions function getSortFunction() { let criterion = TAC_CFG.modelSortOrder || "Name"; + + const textSort = (a, b, reverse = false) => { + const textHolderA = a.type === ResultType.chant ? a.aliases : a.text; + const textHolderB = b.type === ResultType.chant ? b.aliases : b.text; + + const aKey = a.sortKey || textHolderA; + const bKey = b.sortKey || textHolderB; + return reverse ? bKey.localeCompare(aKey) : aKey.localeCompare(bKey); + } + const numericSort = (a, b, reverse = false) => { + const noKey = reverse ? "-1" : Number.MAX_SAFE_INTEGER; + let aParsed = parseFloat(a.sortKey || noKey); + let bParsed = parseFloat(b.sortKey || noKey); + + if (aParsed === bParsed) { + return textSort(a, b, false); + } + + return reverse ? bParsed - aParsed : aParsed - bParsed; + } + return (a, b) => { - let textHolderA = a.type === ResultType.chant ? a.aliases : a.text; - let textHolderB = b.type === ResultType.chant ? b.aliases : b.text; - switch (criterion) { - case "Date Modified": - let aParsed = parseFloat(a.sortKey || "-1"); - let bParsed = parseFloat(b.sortKey || "-1"); - - if (aParsed === bParsed) { - let aKey = a.sortKey || textHolderA; - let bKey = b.sortKey || textHolderB; - return aKey.localeCompare(bKey); - } - - return bParsed - aParsed; + case "Date Modified (newest first)": + return numericSort(a, b, true); + case "Date Modified (oldest first)": + return numericSort(a, b, false); default: - let aKey = a.sortKey || textHolderA; - let bKey = b.sortKey || textHolderB; - return aKey.localeCompare(bKey); + return textSort(a, b); } } } diff --git a/scripts/tag_autocomplete_helper.py b/scripts/tag_autocomplete_helper.py index 0af571a..2b6f69e 100644 --- a/scripts/tag_autocomplete_helper.py +++ b/scripts/tag_autocomplete_helper.py @@ -28,7 +28,8 @@ except Exception as e: # Not supported. # Sorting functions for extra networks / embeddings stuff sort_criteria = { "Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(), - "Date Modified": lambda path, name, subpath: path.stat().st_mtime + "Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime, + "Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime } def sort_models(model_list, sort_method = None, name_has_subpath = False): @@ -45,7 +46,7 @@ def sort_models(model_list, sort_method = None, name_has_subpath = False): sort_method = getattr(shared.opts, "tac_modelSortOrder", "Name") # Get sorting method from dictionary - sorter = sort_criteria[sort_method] if sort_criteria[sort_method] else sort_criteria['Name'] + sorter = sort_criteria.get(sort_method, sort_criteria["Name"]) # During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated. # The list itself doesn't need to get sorted at this point. @@ -406,7 +407,7 @@ def on_ui_settings(): "tac_useLycos": shared.OptionInfo(True, "Search for LyCORIS/LoHa"), "tac_showWikiLinks": shared.OptionInfo(False, "Show '?' next to tags, linking to its Danbooru or e621 wiki page").info("Warning: This is an external site and very likely contains NSFW examples!"), "tac_showExtraNetworkPreviews": shared.OptionInfo(True, "Show preview thumbnails for extra networks if available"), - "tac_modelSortOrder": shared.OptionInfo("Name", "Model sort order", gr.Dropdown, lambda: {"choices": ["Name", "Date Modified"]}).info("Order for extra network models and wildcards in dropdown"), + "tac_modelSortOrder": shared.OptionInfo("Name", "Model sort order", gr.Dropdown, lambda: {"choices": list(sort_criteria.keys())}).info("Order for extra network models and wildcards in dropdown"), # Insertion related settings "tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"), "tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"), From 018c6c8198720b9d6fc915dc226763634b0590fb Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Wed, 13 Sep 2023 21:50:41 +0200 Subject: [PATCH 06/12] Fix Umi tag gathering & sorting Fixes #238 --- javascript/ext_umi.js | 5 +++++ javascript/tagAutocomplete.js | 2 +- scripts/tag_autocomplete_helper.py | 7 ++++--- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/javascript/ext_umi.js b/javascript/ext_umi.js index c076734..a55f80c 100644 --- a/javascript/ext_umi.js +++ b/javascript/ext_umi.js @@ -149,6 +149,7 @@ class UmiParser extends BaseTagParser { finalResults.push(result); }); + finalResults = finalResults.sort((a, b) => b.count - a.count); return finalResults; } else if (showAll) { let filteredWildcardsSorted = filteredWildcards(""); @@ -163,6 +164,8 @@ class UmiParser extends BaseTagParser { originalTagword = tagword; tagword = ""; + + finalResults = finalResults.sort((a, b) => b.count - a.count); return finalResults; } } else { @@ -178,6 +181,8 @@ class UmiParser extends BaseTagParser { originalTagword = tagword; tagword = ""; + + finalResults = finalResults.sort((a, b) => b.count - a.count); return finalResults; } } diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index 775ba39..2146156 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -1002,7 +1002,7 @@ async function autocomplete(textArea, prompt, fixedTag = null) { tagword = tagword.toLowerCase().replace(/[\n\r]/g, ""); // Process all parsers - let resultCandidates = await processParsers(textArea, prompt); + let resultCandidates = (await processParsers(textArea, prompt)).filter(x => x.length > 0); // If one ore more result candidates match, use their results if (resultCandidates && resultCandidates.length > 0) { // Flatten our candidate(s) diff --git a/scripts/tag_autocomplete_helper.py b/scripts/tag_autocomplete_helper.py index 1a05f01..f95aa85 100644 --- a/scripts/tag_autocomplete_helper.py +++ b/scripts/tag_autocomplete_helper.py @@ -52,7 +52,9 @@ def is_umi_format(data): break return not issue_found -def parse_umi_format(umi_tags, count, data): +count = 0 +def parse_umi_format(umi_tags, data): + global count for item in data: umi_tags[count] = ','.join(data[item]['Tags']) count += 1 @@ -82,7 +84,6 @@ def get_yaml_wildcards(): yaml_wildcards = {} umi_tags = {} # { tag: count } - count = 0 for path in yaml_files: try: @@ -90,7 +91,7 @@ def get_yaml_wildcards(): data = yaml.safe_load(file) if (data): if (is_umi_format(data)): - parse_umi_format(umi_tags, count, data) + parse_umi_format(umi_tags, data) else: parse_dynamic_prompt_format(yaml_wildcards, data, path) else: From 5067afeee95b9a742742333c67837cd35d5d732a Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Wed, 13 Sep 2023 21:55:09 +0200 Subject: [PATCH 07/12] Add missing null safety --- javascript/tagAutocomplete.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index 2146156..3f69cc0 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -1002,7 +1002,7 @@ async function autocomplete(textArea, prompt, fixedTag = null) { tagword = tagword.toLowerCase().replace(/[\n\r]/g, ""); // Process all parsers - let resultCandidates = (await processParsers(textArea, prompt)).filter(x => x.length > 0); + let resultCandidates = (await processParsers(textArea, prompt))?.filter(x => x.length > 0); // If one ore more result candidates match, use their results if (resultCandidates && resultCandidates.length > 0) { // Flatten our candidate(s) From d8d991531aaa7eafc285583685eaff5b7c85b723 Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Wed, 13 Sep 2023 22:04:59 +0200 Subject: [PATCH 08/12] Don't sort umi tags since they use count --- javascript/tagAutocomplete.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index bf0ea1c..06bb7b0 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -1014,8 +1014,9 @@ async function autocomplete(textArea, prompt, fixedTag = null) { if (resultCandidates && resultCandidates.length > 0) { // Flatten our candidate(s) results = resultCandidates.flat(); - // Sort results - results = results.sort(getSortFunction()); + // Sort results, but not if it's umi tags since they are sorted by count + if (!(resultCandidates.length === 1 && results[0].type === ResultType.umiWildcard)) + results = results.sort(getSortFunction()); // Since some tags are kaomoji, we have to add the normal results in some cases if (tagword.startsWith("<") || tagword.startsWith("*<")) { From 3e33169a3a93d6cb88d95174e2d0153017be92eb Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Wed, 13 Sep 2023 22:30:37 +0200 Subject: [PATCH 09/12] Disable sort order dropdown pointer events while refresh is running Doesn't prevent keyboard focus, but changing the values there is much slower since the list doesn't stay open. --- javascript/tagAutocomplete.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index 06bb7b0..20e6759 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -273,7 +273,12 @@ async function syncOptions() { // Refresh temp files if model sort order changed // Contrary to the other loads, this one shouldn't happen on a first time load if (TAC_CFG && newCFG.modelSortOrder !== TAC_CFG.modelSortOrder) { + const dropdown = gradioApp().querySelector("#setting_tac_modelSortOrder"); + dropdown.style.opacity = 0.5; + dropdown.style.pointerEvents = "none"; await refreshTacTempFiles(true); + dropdown.style.opacity = null; + dropdown.style.pointerEvents = null; } // Update CSS if maxResults changed From b03b1a0211ff8ce2b4ca48af80aa93264a89acc3 Mon Sep 17 00:00:00 2001 From: NoCrypt <57245077+NoCrypt@users.noreply.github.com> Date: Fri, 15 Sep 2023 20:48:16 +0700 Subject: [PATCH 10/12] Add listener for extra network refresh button --- javascript/tagAutocomplete.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index 3f69cc0..6e86427 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -1326,6 +1326,11 @@ async function setup() { // Listener for internal temp files refresh button gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles); + // Also add listener for external network refresh button + gradioApp().querySelector("#txt2img_extra_refresh")?.addEventListener("click", refreshTacTempFiles); + gradioApp().querySelector("#img2img_extra_refresh")?.addEventListener("click", refreshTacTempFiles); + + // Add mutation observer for the model hash text to also allow hash-based blacklist again let modelHashText = gradioApp().querySelector("#sd_checkpoint_hash"); updateModelName(); From 2622e1b596fce9c3988d1932e0d9a235a9085a43 Mon Sep 17 00:00:00 2001 From: NoCrypt <57245077+NoCrypt@users.noreply.github.com> Date: Fri, 15 Sep 2023 21:12:30 +0700 Subject: [PATCH 11/12] Refresh extra: fix python code did not excecuted --- javascript/tagAutocomplete.js | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index 6e86427..9198c00 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -1324,12 +1324,15 @@ async function setup() { }) }); // Listener for internal temp files refresh button - gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles); - - // Also add listener for external network refresh button - gradioApp().querySelector("#txt2img_extra_refresh")?.addEventListener("click", refreshTacTempFiles); - gradioApp().querySelector("#img2img_extra_refresh")?.addEventListener("click", refreshTacTempFiles); + const refreshButton = gradioApp().querySelector("#refresh_tac_refreshTempFiles") + refreshButton?.addEventListener("click", refreshTacTempFiles); + // Also add listener for external network refresh button (plus triggering python code) + ["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => { + gradioApp().querySelector(e)?.addEventListener("click", ()=>{ + refreshButton?.click(); + }); + }) // Add mutation observer for the model hash text to also allow hash-based blacklist again let modelHashText = gradioApp().querySelector("#sd_checkpoint_hash"); From cb54b66eda9cae7349db85de9dada754cf205ef6 Mon Sep 17 00:00:00 2001 From: DominikDoom Date: Fri, 15 Sep 2023 16:32:20 +0200 Subject: [PATCH 12/12] Refactor PR #239 to use new refresh API endpoint of this branch --- javascript/tagAutocomplete.js | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/javascript/tagAutocomplete.js b/javascript/tagAutocomplete.js index 551bb53..87b09b5 100644 --- a/javascript/tagAutocomplete.js +++ b/javascript/tagAutocomplete.js @@ -1338,13 +1338,12 @@ async function setup() { }) }); // Listener for internal temp files refresh button - const refreshButton = gradioApp().querySelector("#refresh_tac_refreshTempFiles") - refreshButton?.addEventListener("click", refreshTacTempFiles); + gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles); // Also add listener for external network refresh button (plus triggering python code) ["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => { gradioApp().querySelector(e)?.addEventListener("click", ()=>{ - refreshButton?.click(); + refreshTacTempFiles(true); }); })