mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-26 19:19:57 +00:00
Merge branch 'feature-sorting' into main
Update including a new sorting option for extra network models & wildcards. For now only by date modified, this might be expanded in the future. A "sort by frequent use" is also in the works.
This commit is contained in:
@@ -27,6 +27,7 @@ class AutocompleteResult {
|
||||
aliases = null;
|
||||
meta = null;
|
||||
hash = null;
|
||||
sortKey = null;
|
||||
|
||||
// Constructor
|
||||
constructor(text, type) {
|
||||
|
||||
@@ -81,6 +81,17 @@ async function fetchAPI(url, json = true, cache = false) {
|
||||
return await response.text();
|
||||
}
|
||||
|
||||
async function postAPI(url, body) {
|
||||
let response = await fetch(url, { method: "POST", body: body });
|
||||
|
||||
if (response.status != 200) {
|
||||
console.error(`Error posting to API endpoint "${url}": ` + response.status, response.statusText);
|
||||
return null;
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
// Extra network preview thumbnails
|
||||
async function getExtraNetworkPreviewURL(filename, type) {
|
||||
const previewJSON = await fetchAPI(`tacapi/v1/thumb-preview/${filename}?type=${type}`, true, true);
|
||||
@@ -200,6 +211,42 @@ function observeElement(element, property, callback, delay = 0) {
|
||||
}
|
||||
}
|
||||
|
||||
// Sort functions
|
||||
function getSortFunction() {
|
||||
let criterion = TAC_CFG.modelSortOrder || "Name";
|
||||
|
||||
const textSort = (a, b, reverse = false) => {
|
||||
const textHolderA = a.type === ResultType.chant ? a.aliases : a.text;
|
||||
const textHolderB = b.type === ResultType.chant ? b.aliases : b.text;
|
||||
|
||||
const aKey = a.sortKey || textHolderA;
|
||||
const bKey = b.sortKey || textHolderB;
|
||||
return reverse ? bKey.localeCompare(aKey) : aKey.localeCompare(bKey);
|
||||
}
|
||||
const numericSort = (a, b, reverse = false) => {
|
||||
const noKey = reverse ? "-1" : Number.MAX_SAFE_INTEGER;
|
||||
let aParsed = parseFloat(a.sortKey || noKey);
|
||||
let bParsed = parseFloat(b.sortKey || noKey);
|
||||
|
||||
if (aParsed === bParsed) {
|
||||
return textSort(a, b, false);
|
||||
}
|
||||
|
||||
return reverse ? bParsed - aParsed : aParsed - bParsed;
|
||||
}
|
||||
|
||||
return (a, b) => {
|
||||
switch (criterion) {
|
||||
case "Date Modified (newest first)":
|
||||
return numericSort(a, b, true);
|
||||
case "Date Modified (oldest first)":
|
||||
return numericSort(a, b, false);
|
||||
default:
|
||||
return textSort(a, b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Queue calling function to process global queues
|
||||
async function processQueue(queue, context, ...args) {
|
||||
for (let i = 0; i < queue.length; i++) {
|
||||
|
||||
@@ -16,7 +16,7 @@ class EmbeddingParser extends BaseTagParser {
|
||||
let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
|
||||
if (versionString)
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[1] && x[1] === versionString); // Filter by tagword
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[2] && x[2] === versionString); // Filter by tagword
|
||||
else
|
||||
tempResults = embeddings.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
@@ -27,7 +27,8 @@ class EmbeddingParser extends BaseTagParser {
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.embedding)
|
||||
result.meta = t[1] + " Embedding";
|
||||
result.sortKey = t[1];
|
||||
result.meta = t[2] + " Embedding";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -38,9 +39,9 @@ class EmbeddingParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (embeddings.length === 0) {
|
||||
try {
|
||||
embeddings = (await readFile(`${tagBasePath}/temp/emb.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().split(",")); // Split into name, version type pairs
|
||||
embeddings = (await loadCSV(`${tagBasePath}/temp/emb.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0].trim(), x[1], x[2]]); // Return name, sortKey, hash tuples
|
||||
} catch (e) {
|
||||
console.error("Error loading embeddings.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ class HypernetParser extends BaseTagParser {
|
||||
if (tagword !== "<" && tagword !== "<h:" && tagword !== "<hypernet:") {
|
||||
let searchTerm = tagword.replace("<hypernet:", "").replace("<h:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x)); // Filter by tagword
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = hypernetworks;
|
||||
}
|
||||
@@ -16,8 +16,9 @@ class HypernetParser extends BaseTagParser {
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.hypernetwork)
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.hypernetwork)
|
||||
result.meta = "Hypernetwork";
|
||||
result.sortKey = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -28,9 +29,9 @@ class HypernetParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (hypernetworks.length === 0) {
|
||||
try {
|
||||
hypernetworks = (await readFile(`${tagBasePath}/temp/hyp.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) //Remove empty lines
|
||||
.map(x => x.trim()); // Remove carriage returns and padding if it exists
|
||||
hypernetworks = (await loadCSV(`${tagBasePath}/temp/hyp.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Remove carriage returns and padding if it exists
|
||||
} catch (e) {
|
||||
console.error("Error loading hypernetworks.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@ class LoraParser extends BaseTagParser {
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lora)
|
||||
result.meta = "Lora";
|
||||
result.hash = t[1];
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -36,7 +37,7 @@ async function load() {
|
||||
try {
|
||||
loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lora.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@ class LycoParser extends BaseTagParser {
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lyco)
|
||||
result.meta = "Lyco";
|
||||
result.hash = t[1];
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -36,7 +37,7 @@ async function load() {
|
||||
try {
|
||||
lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lyco.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -85,13 +85,14 @@ class WildcardFileParser extends BaseTagParser {
|
||||
} else {
|
||||
result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile);
|
||||
result.meta = "Wildcard file";
|
||||
result.sortKey = wcFile[2].trim();
|
||||
}
|
||||
|
||||
finalResults.push(result);
|
||||
alreadyAdded.set(wcFile[1], true);
|
||||
});
|
||||
|
||||
finalResults.sort((a, b) => a.text.localeCompare(b.text));
|
||||
finalResults.sort(getSortFunction());
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
@@ -100,17 +101,17 @@ class WildcardFileParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (wildcardFiles.length === 0 && wildcardExtFiles.length === 0) {
|
||||
try {
|
||||
let wcFileArr = (await readFile(`${tagBasePath}/temp/wc.txt`)).split("\n");
|
||||
let wcBasePath = wcFileArr[0].trim(); // First line should be the base path
|
||||
let wcFileArr = await loadCSV(`${tagBasePath}/temp/wc.txt`);
|
||||
let wcBasePath = wcFileArr[0][0].trim(); // First line should be the base path
|
||||
wildcardFiles = wcFileArr.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => [wcBasePath, x.trim().replace(".txt", "")]); // Remove file extension & newlines
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [wcBasePath, x[0]?.trim().replace(".txt", ""), x[1]]); // Remove file extension & newlines
|
||||
|
||||
// To support multiple sources, we need to separate them using the provided "-----" strings
|
||||
let wcExtFileArr = (await readFile(`${tagBasePath}/temp/wce.txt`)).split("\n");
|
||||
let wcExtFileArr = await loadCSV(`${tagBasePath}/temp/wce.txt`);
|
||||
let splitIndices = [];
|
||||
for (let index = 0; index < wcExtFileArr.length; index++) {
|
||||
if (wcExtFileArr[index].trim() === "-----") {
|
||||
if (wcExtFileArr[index][0].trim() === "-----") {
|
||||
splitIndices.push(index);
|
||||
}
|
||||
}
|
||||
@@ -121,12 +122,10 @@ async function load() {
|
||||
let end = splitIndices[i];
|
||||
|
||||
let wcExtFile = wcExtFileArr.slice(start, end);
|
||||
let base = wcExtFile[0].trim() + "/";
|
||||
let base = wcExtFile[0][0].trim() + "/";
|
||||
wcExtFile = wcExtFile.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().replace(base, "").replace(".txt", "")); // Remove file extension & newlines;
|
||||
|
||||
wcExtFile = wcExtFile.map(x => [base, x]);
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [base, x[0]?.trim().replace(base, "").replace(".txt", ""), x[1]]);
|
||||
wildcardExtFiles.push(...wcExtFile);
|
||||
}
|
||||
|
||||
|
||||
@@ -217,6 +217,7 @@ async function syncOptions() {
|
||||
useLycos: opts["tac_useLycos"],
|
||||
showWikiLinks: opts["tac_showWikiLinks"],
|
||||
showExtraNetworkPreviews: opts["tac_showExtraNetworkPreviews"],
|
||||
modelSortOrder: opts["tac_modelSortOrder"],
|
||||
// Insertion related settings
|
||||
replaceUnderscores: opts["tac_replaceUnderscores"],
|
||||
escapeParentheses: opts["tac_escapeParentheses"],
|
||||
@@ -269,6 +270,17 @@ async function syncOptions() {
|
||||
await loadTags(newCFG);
|
||||
}
|
||||
|
||||
// Refresh temp files if model sort order changed
|
||||
// Contrary to the other loads, this one shouldn't happen on a first time load
|
||||
if (TAC_CFG && newCFG.modelSortOrder !== TAC_CFG.modelSortOrder) {
|
||||
const dropdown = gradioApp().querySelector("#setting_tac_modelSortOrder");
|
||||
dropdown.style.opacity = 0.5;
|
||||
dropdown.style.pointerEvents = "none";
|
||||
await refreshTacTempFiles(true);
|
||||
dropdown.style.opacity = null;
|
||||
dropdown.style.pointerEvents = null;
|
||||
}
|
||||
|
||||
// Update CSS if maxResults changed
|
||||
if (TAC_CFG && newCFG.maxResults !== TAC_CFG.maxResults) {
|
||||
gradioApp().querySelectorAll(".autocompleteResults").forEach(r => {
|
||||
@@ -1007,36 +1019,29 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
if (resultCandidates && resultCandidates.length > 0) {
|
||||
// Flatten our candidate(s)
|
||||
results = resultCandidates.flat();
|
||||
// If there was more than one candidate, sort the results by text to mix them
|
||||
// instead of having them added in the order of the parsers
|
||||
let shouldSort = resultCandidates.length > 1;
|
||||
if (shouldSort) {
|
||||
results = results.sort((a, b) => {
|
||||
let sortByA = a.type === ResultType.chant ? a.aliases : a.text;
|
||||
let sortByB = b.type === ResultType.chant ? b.aliases : b.text;
|
||||
return sortByA.localeCompare(sortByB);
|
||||
});
|
||||
// Sort results, but not if it's umi tags since they are sorted by count
|
||||
if (!(resultCandidates.length === 1 && results[0].type === ResultType.umiWildcard))
|
||||
results = results.sort(getSortFunction());
|
||||
|
||||
// Since some tags are kaomoji, we have to add the normal results in some cases
|
||||
if (tagword.startsWith("<") || tagword.startsWith("*<")) {
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
if (tagword.startsWith("*")) {
|
||||
tagword = tagword.slice(1);
|
||||
searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i');
|
||||
} else {
|
||||
searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i');
|
||||
}
|
||||
let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults);
|
||||
|
||||
genericResults.forEach(g => {
|
||||
let result = new AutocompleteResult(g[0].trim(), ResultType.tag)
|
||||
result.category = g[1];
|
||||
result.count = g[2];
|
||||
result.aliases = g[3];
|
||||
results.push(result);
|
||||
});
|
||||
// Since some tags are kaomoji, we have to add the normal results in some cases
|
||||
if (tagword.startsWith("<") || tagword.startsWith("*<")) {
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
if (tagword.startsWith("*")) {
|
||||
tagword = tagword.slice(1);
|
||||
searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i');
|
||||
} else {
|
||||
searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i');
|
||||
}
|
||||
let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults);
|
||||
|
||||
genericResults.forEach(g => {
|
||||
let result = new AutocompleteResult(g[0].trim(), ResultType.tag)
|
||||
result.category = g[1];
|
||||
result.count = g[2];
|
||||
result.aliases = g[3];
|
||||
results.push(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
// Else search the normal tag list
|
||||
@@ -1223,8 +1228,8 @@ function navigateInList(textArea, event) {
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
async function refreshTacTempFiles() {
|
||||
setTimeout(async () => {
|
||||
async function refreshTacTempFiles(api = false) {
|
||||
const reload = async () => {
|
||||
wildcardFiles = [];
|
||||
wildcardExtFiles = [];
|
||||
umiWildcards = [];
|
||||
@@ -1236,7 +1241,16 @@ async function refreshTacTempFiles() {
|
||||
await processQueue(QUEUE_FILE_LOAD, null);
|
||||
|
||||
console.log("TAC: Refreshed temp files");
|
||||
}, 2000);
|
||||
}
|
||||
|
||||
if (api) {
|
||||
await postAPI("tacapi/v1/refresh-temp-files", null);
|
||||
await reload();
|
||||
} else {
|
||||
setTimeout(async () => {
|
||||
await reload();
|
||||
}, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
function addAutocompleteToArea(area) {
|
||||
@@ -1324,13 +1338,12 @@ async function setup() {
|
||||
})
|
||||
});
|
||||
// Listener for internal temp files refresh button
|
||||
const refreshButton = gradioApp().querySelector("#refresh_tac_refreshTempFiles")
|
||||
refreshButton?.addEventListener("click", refreshTacTempFiles);
|
||||
gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles);
|
||||
|
||||
// Also add listener for external network refresh button (plus triggering python code)
|
||||
["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => {
|
||||
gradioApp().querySelector(e)?.addEventListener("click", ()=>{
|
||||
refreshButton?.click();
|
||||
refreshTacTempFiles(true);
|
||||
});
|
||||
})
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# This helper script scans folders for wildcards and embeddings and writes them
|
||||
# to a temporary file to expose it to the javascript side
|
||||
|
||||
import os
|
||||
import glob
|
||||
import json
|
||||
import urllib.parse
|
||||
@@ -24,12 +25,45 @@ except Exception as e: # Not supported.
|
||||
load_textual_inversion_embeddings = lambda *args, **kwargs: None
|
||||
print("Tag Autocomplete: Cannot reload embeddings instantly:", e)
|
||||
|
||||
# Sorting functions for extra networks / embeddings stuff
|
||||
sort_criteria = {
|
||||
"Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(),
|
||||
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime,
|
||||
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime
|
||||
}
|
||||
|
||||
def sort_models(model_list, sort_method = None, name_has_subpath = False):
|
||||
"""Sorts models according to the setting.
|
||||
|
||||
Input: list of (full_path, display_name, {hash}) models.
|
||||
Returns models in the format of name, sort key, meta.
|
||||
Meta is optional and can be a hash, version string or other required info.
|
||||
"""
|
||||
if len(model_list) == 0:
|
||||
return model_list
|
||||
|
||||
if sort_method is None:
|
||||
sort_method = getattr(shared.opts, "tac_modelSortOrder", "Name")
|
||||
|
||||
# Get sorting method from dictionary
|
||||
sorter = sort_criteria.get(sort_method, sort_criteria["Name"])
|
||||
|
||||
# During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated.
|
||||
# The list itself doesn't need to get sorted at this point.
|
||||
if len(model_list[0]) > 2:
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list]
|
||||
else:
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}"' for path, name in model_list]
|
||||
return results
|
||||
|
||||
|
||||
def get_wildcards():
|
||||
"""Returns a list of all wildcards. Works on nested folders."""
|
||||
wildcard_files = list(WILDCARD_PATH.rglob("*.txt"))
|
||||
resolved = [w.relative_to(WILDCARD_PATH).as_posix(
|
||||
) for w in wildcard_files if w.name != "put wildcards here.txt"]
|
||||
return resolved
|
||||
resolved = [(w, w.relative_to(WILDCARD_PATH).as_posix())
|
||||
for w in wildcard_files
|
||||
if w.name != "put wildcards here.txt"]
|
||||
return sort_models(resolved, name_has_subpath=True)
|
||||
|
||||
|
||||
def get_ext_wildcards():
|
||||
@@ -38,7 +72,10 @@ def get_ext_wildcards():
|
||||
|
||||
for path in WILDCARD_EXT_PATHS:
|
||||
wildcard_files.append(path.as_posix())
|
||||
wildcard_files.extend(p.relative_to(path).as_posix() for p in path.rglob("*.txt") if p.name != "put wildcards here.txt")
|
||||
resolved = [(w, w.relative_to(path).as_posix())
|
||||
for w in path.rglob("*.txt")
|
||||
if w.name != "put wildcards here.txt"]
|
||||
wildcard_files.extend(sort_models(resolved, name_has_subpath=True))
|
||||
wildcard_files.append("-----")
|
||||
|
||||
return wildcard_files
|
||||
@@ -137,14 +174,14 @@ def get_embeddings(sd_model):
|
||||
|
||||
# Add embeddings to the correct list
|
||||
if (emb_a_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_a.keys())
|
||||
emb_v1 = [(Path(v.filename), k, "v1") for (k,v) in emb_type_a.items()]
|
||||
elif (emb_a_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_a.keys())
|
||||
emb_v2 = [(Path(v.filename), k, "v2") for (k,v) in emb_type_a.items()]
|
||||
|
||||
if (emb_b_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_b.keys())
|
||||
emb_v1 = [(Path(v.filename), k, "v1") for (k,v) in emb_type_b.items()]
|
||||
elif (emb_b_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_b.keys())
|
||||
emb_v2 = [(Path(v.filename), k, "v2") for (k,v) in emb_type_b.items()]
|
||||
|
||||
# Get shape of current model
|
||||
#vec = sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
|
||||
@@ -156,7 +193,7 @@ def get_embeddings(sd_model):
|
||||
# results = [e + ",v2" for e in emb_v2] + [e + ",v1" for e in emb_v1]
|
||||
#else:
|
||||
# raise AttributeError # Fallback to old method
|
||||
results = sorted([e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2], key=lambda x: x.lower())
|
||||
results = sort_models(emb_v1) + sort_models(emb_v2)
|
||||
except AttributeError:
|
||||
print("tag_autocomplete_helper: Old webui version or unrecognized model shape, using fallback for embedding completion.")
|
||||
# Get a list of all embeddings in the folder
|
||||
@@ -174,9 +211,8 @@ def get_hypernetworks():
|
||||
|
||||
# Get a list of all hypernetworks in the folder
|
||||
hyp_paths = [Path(h) for h in glob.glob(HYP_PATH.joinpath("**/*").as_posix(), recursive=True)]
|
||||
all_hypernetworks = [str(h.name) for h in hyp_paths if h.suffix in {".pt"}]
|
||||
# Remove file extensions
|
||||
return sorted([h[:h.rfind('.')] for h in all_hypernetworks], key=lambda x: x.lower())
|
||||
all_hypernetworks = [(h, h.stem) for h in hyp_paths if h.suffix in {".pt"}]
|
||||
return sort_models(all_hypernetworks)
|
||||
|
||||
model_keyword_installed = write_model_keyword_path()
|
||||
def get_lora():
|
||||
@@ -187,17 +223,16 @@ def get_lora():
|
||||
lora_paths = [Path(l) for l in glob.glob(LORA_PATH.joinpath("**/*").as_posix(), recursive=True)]
|
||||
# Get hashes
|
||||
valid_loras = [lf for lf in lora_paths if lf.suffix in {".safetensors", ".ckpt", ".pt"}]
|
||||
hashes = {}
|
||||
loras_with_hash = []
|
||||
for l in valid_loras:
|
||||
name = l.relative_to(LORA_PATH).as_posix()
|
||||
if model_keyword_installed:
|
||||
hashes[name] = get_lora_simple_hash(l)
|
||||
hash = get_lora_simple_hash(l)
|
||||
else:
|
||||
hashes[name] = ""
|
||||
hash = ""
|
||||
loras_with_hash.append((l, name, hash))
|
||||
# Sort
|
||||
sorted_loras = dict(sorted(hashes.items()))
|
||||
# Add hashes and return
|
||||
return [f"\"{name}\",{hash}" for name, hash in sorted_loras.items()]
|
||||
return sort_models(loras_with_hash)
|
||||
|
||||
|
||||
def get_lyco():
|
||||
@@ -208,19 +243,16 @@ def get_lyco():
|
||||
|
||||
# Get hashes
|
||||
valid_lycos = [lyf for lyf in lyco_paths if lyf.suffix in {".safetensors", ".ckpt", ".pt"}]
|
||||
hashes = {}
|
||||
lycos_with_hash = []
|
||||
for ly in valid_lycos:
|
||||
name = ly.relative_to(LYCO_PATH).as_posix()
|
||||
if model_keyword_installed:
|
||||
hashes[name] = get_lora_simple_hash(ly)
|
||||
hash = get_lora_simple_hash(ly)
|
||||
else:
|
||||
hashes[name] = ""
|
||||
|
||||
hash = ""
|
||||
lycos_with_hash.append((ly, name, hash))
|
||||
# Sort
|
||||
sorted_lycos = dict(sorted(hashes.items()))
|
||||
# Add hashes and return
|
||||
return [f"\"{name}\",{hash}" for name, hash in sorted_lycos.items()]
|
||||
|
||||
return sort_models(lycos_with_hash)
|
||||
|
||||
def write_tag_base_path():
|
||||
"""Writes the tag base path to a fixed location temporary file"""
|
||||
@@ -376,6 +408,7 @@ def on_ui_settings():
|
||||
"tac_useLycos": shared.OptionInfo(True, "Search for LyCORIS/LoHa"),
|
||||
"tac_showWikiLinks": shared.OptionInfo(False, "Show '?' next to tags, linking to its Danbooru or e621 wiki page").info("Warning: This is an external site and very likely contains NSFW examples!"),
|
||||
"tac_showExtraNetworkPreviews": shared.OptionInfo(True, "Show preview thumbnails for extra networks if available"),
|
||||
"tac_modelSortOrder": shared.OptionInfo("Name", "Model sort order", gr.Dropdown, lambda: {"choices": list(sort_criteria.keys())}).info("Order for extra network models and wildcards in dropdown"),
|
||||
# Insertion related settings
|
||||
"tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"),
|
||||
"tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"),
|
||||
@@ -484,6 +517,10 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": e}, status_code=500)
|
||||
|
||||
@app.post("/tacapi/v1/refresh-temp-files")
|
||||
async def api_refresh_temp_files():
|
||||
refresh_temp_files()
|
||||
|
||||
@app.get("/tacapi/v1/lora-info/{lora_name}")
|
||||
async def get_lora_info(lora_name):
|
||||
return await get_json_info(LORA_PATH, lora_name)
|
||||
|
||||
Reference in New Issue
Block a user