mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-02-06 16:20:08 +00:00
Merge branch 'feature-sorting' into feature-sort-by-frequent-use
This commit is contained in:
@@ -27,6 +27,7 @@ class AutocompleteResult {
|
||||
aliases = null;
|
||||
meta = null;
|
||||
hash = null;
|
||||
sortKey = null;
|
||||
|
||||
// Constructor
|
||||
constructor(text, type) {
|
||||
|
||||
@@ -223,6 +223,42 @@ function observeElement(element, property, callback, delay = 0) {
|
||||
}
|
||||
}
|
||||
|
||||
// Sort functions
|
||||
function getSortFunction() {
|
||||
let criterion = TAC_CFG.modelSortOrder || "Name";
|
||||
|
||||
const textSort = (a, b, reverse = false) => {
|
||||
const textHolderA = a.type === ResultType.chant ? a.aliases : a.text;
|
||||
const textHolderB = b.type === ResultType.chant ? b.aliases : b.text;
|
||||
|
||||
const aKey = a.sortKey || textHolderA;
|
||||
const bKey = b.sortKey || textHolderB;
|
||||
return reverse ? bKey.localeCompare(aKey) : aKey.localeCompare(bKey);
|
||||
}
|
||||
const numericSort = (a, b, reverse = false) => {
|
||||
const noKey = reverse ? "-1" : Number.MAX_SAFE_INTEGER;
|
||||
let aParsed = parseFloat(a.sortKey || noKey);
|
||||
let bParsed = parseFloat(b.sortKey || noKey);
|
||||
|
||||
if (aParsed === bParsed) {
|
||||
return textSort(a, b, false);
|
||||
}
|
||||
|
||||
return reverse ? bParsed - aParsed : aParsed - bParsed;
|
||||
}
|
||||
|
||||
return (a, b) => {
|
||||
switch (criterion) {
|
||||
case "Date Modified (newest first)":
|
||||
return numericSort(a, b, true);
|
||||
case "Date Modified (oldest first)":
|
||||
return numericSort(a, b, false);
|
||||
default:
|
||||
return textSort(a, b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Queue calling function to process global queues
|
||||
async function processQueue(queue, context, ...args) {
|
||||
for (let i = 0; i < queue.length; i++) {
|
||||
|
||||
@@ -16,7 +16,7 @@ class EmbeddingParser extends BaseTagParser {
|
||||
let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
|
||||
if (versionString)
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[1] && x[1] === versionString); // Filter by tagword
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[2] && x[2] === versionString); // Filter by tagword
|
||||
else
|
||||
tempResults = embeddings.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
@@ -27,7 +27,8 @@ class EmbeddingParser extends BaseTagParser {
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.embedding)
|
||||
result.meta = t[1] + " Embedding";
|
||||
result.sortKey = t[1];
|
||||
result.meta = t[2] + " Embedding";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -38,9 +39,9 @@ class EmbeddingParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (embeddings.length === 0) {
|
||||
try {
|
||||
embeddings = (await readFile(`${tagBasePath}/temp/emb.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().split(",")); // Split into name, version type pairs
|
||||
embeddings = (await loadCSV(`${tagBasePath}/temp/emb.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0].trim(), x[1], x[2]]); // Return name, sortKey, hash tuples
|
||||
} catch (e) {
|
||||
console.error("Error loading embeddings.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ class HypernetParser extends BaseTagParser {
|
||||
if (tagword !== "<" && tagword !== "<h:" && tagword !== "<hypernet:") {
|
||||
let searchTerm = tagword.replace("<hypernet:", "").replace("<h:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x)); // Filter by tagword
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = hypernetworks;
|
||||
}
|
||||
@@ -16,8 +16,9 @@ class HypernetParser extends BaseTagParser {
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.hypernetwork)
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.hypernetwork)
|
||||
result.meta = "Hypernetwork";
|
||||
result.sortKey = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -28,9 +29,9 @@ class HypernetParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (hypernetworks.length === 0) {
|
||||
try {
|
||||
hypernetworks = (await readFile(`${tagBasePath}/temp/hyp.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) //Remove empty lines
|
||||
.map(x => x.trim()); // Remove carriage returns and padding if it exists
|
||||
hypernetworks = (await loadCSV(`${tagBasePath}/temp/hyp.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Remove carriage returns and padding if it exists
|
||||
} catch (e) {
|
||||
console.error("Error loading hypernetworks.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@ class LoraParser extends BaseTagParser {
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lora)
|
||||
result.meta = "Lora";
|
||||
result.hash = t[1];
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -36,7 +37,7 @@ async function load() {
|
||||
try {
|
||||
loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lora.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@ class LycoParser extends BaseTagParser {
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lyco)
|
||||
result.meta = "Lyco";
|
||||
result.hash = t[1];
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -36,7 +37,7 @@ async function load() {
|
||||
try {
|
||||
lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lyco.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -149,6 +149,7 @@ class UmiParser extends BaseTagParser {
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
} else if (showAll) {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
@@ -163,6 +164,8 @@ class UmiParser extends BaseTagParser {
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
} else {
|
||||
@@ -178,6 +181,8 @@ class UmiParser extends BaseTagParser {
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,13 +85,14 @@ class WildcardFileParser extends BaseTagParser {
|
||||
} else {
|
||||
result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile);
|
||||
result.meta = "Wildcard file";
|
||||
result.sortKey = wcFile[2].trim();
|
||||
}
|
||||
|
||||
finalResults.push(result);
|
||||
alreadyAdded.set(wcFile[1], true);
|
||||
});
|
||||
|
||||
finalResults.sort((a, b) => a.text.localeCompare(b.text));
|
||||
finalResults.sort(getSortFunction());
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
@@ -100,17 +101,17 @@ class WildcardFileParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (wildcardFiles.length === 0 && wildcardExtFiles.length === 0) {
|
||||
try {
|
||||
let wcFileArr = (await readFile(`${tagBasePath}/temp/wc.txt`)).split("\n");
|
||||
let wcBasePath = wcFileArr[0].trim(); // First line should be the base path
|
||||
let wcFileArr = await loadCSV(`${tagBasePath}/temp/wc.txt`);
|
||||
let wcBasePath = wcFileArr[0][0].trim(); // First line should be the base path
|
||||
wildcardFiles = wcFileArr.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => [wcBasePath, x.trim().replace(".txt", "")]); // Remove file extension & newlines
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [wcBasePath, x[0]?.trim().replace(".txt", ""), x[1]]); // Remove file extension & newlines
|
||||
|
||||
// To support multiple sources, we need to separate them using the provided "-----" strings
|
||||
let wcExtFileArr = (await readFile(`${tagBasePath}/temp/wce.txt`)).split("\n");
|
||||
let wcExtFileArr = await loadCSV(`${tagBasePath}/temp/wce.txt`);
|
||||
let splitIndices = [];
|
||||
for (let index = 0; index < wcExtFileArr.length; index++) {
|
||||
if (wcExtFileArr[index].trim() === "-----") {
|
||||
if (wcExtFileArr[index][0].trim() === "-----") {
|
||||
splitIndices.push(index);
|
||||
}
|
||||
}
|
||||
@@ -121,12 +122,10 @@ async function load() {
|
||||
let end = splitIndices[i];
|
||||
|
||||
let wcExtFile = wcExtFileArr.slice(start, end);
|
||||
let base = wcExtFile[0].trim() + "/";
|
||||
let base = wcExtFile[0][0].trim() + "/";
|
||||
wcExtFile = wcExtFile.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().replace(base, "").replace(".txt", "")); // Remove file extension & newlines;
|
||||
|
||||
wcExtFile = wcExtFile.map(x => [base, x]);
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [base, x[0]?.trim().replace(base, "").replace(".txt", ""), x[1]]);
|
||||
wildcardExtFiles.push(...wcExtFile);
|
||||
}
|
||||
|
||||
|
||||
@@ -217,6 +217,7 @@ async function syncOptions() {
|
||||
useLycos: opts["tac_useLycos"],
|
||||
showWikiLinks: opts["tac_showWikiLinks"],
|
||||
showExtraNetworkPreviews: opts["tac_showExtraNetworkPreviews"],
|
||||
modelSortOrder: opts["tac_modelSortOrder"],
|
||||
// Insertion related settings
|
||||
replaceUnderscores: opts["tac_replaceUnderscores"],
|
||||
escapeParentheses: opts["tac_escapeParentheses"],
|
||||
@@ -269,6 +270,17 @@ async function syncOptions() {
|
||||
await loadTags(newCFG);
|
||||
}
|
||||
|
||||
// Refresh temp files if model sort order changed
|
||||
// Contrary to the other loads, this one shouldn't happen on a first time load
|
||||
if (TAC_CFG && newCFG.modelSortOrder !== TAC_CFG.modelSortOrder) {
|
||||
const dropdown = gradioApp().querySelector("#setting_tac_modelSortOrder");
|
||||
dropdown.style.opacity = 0.5;
|
||||
dropdown.style.pointerEvents = "none";
|
||||
await refreshTacTempFiles(true);
|
||||
dropdown.style.opacity = null;
|
||||
dropdown.style.pointerEvents = null;
|
||||
}
|
||||
|
||||
// Update CSS if maxResults changed
|
||||
if (TAC_CFG && newCFG.maxResults !== TAC_CFG.maxResults) {
|
||||
gradioApp().querySelectorAll(".autocompleteResults").forEach(r => {
|
||||
@@ -1002,41 +1014,34 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
tagword = tagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
|
||||
// Process all parsers
|
||||
let resultCandidates = await processParsers(textArea, prompt);
|
||||
let resultCandidates = (await processParsers(textArea, prompt))?.filter(x => x.length > 0);
|
||||
// If one ore more result candidates match, use their results
|
||||
if (resultCandidates && resultCandidates.length > 0) {
|
||||
// Flatten our candidate(s)
|
||||
results = resultCandidates.flat();
|
||||
// If there was more than one candidate, sort the results by text to mix them
|
||||
// instead of having them added in the order of the parsers
|
||||
let shouldSort = resultCandidates.length > 1;
|
||||
if (shouldSort) {
|
||||
results = results.sort((a, b) => {
|
||||
let sortByA = a.type === ResultType.chant ? a.aliases : a.text;
|
||||
let sortByB = b.type === ResultType.chant ? b.aliases : b.text;
|
||||
return sortByA.localeCompare(sortByB);
|
||||
});
|
||||
// Sort results, but not if it's umi tags since they are sorted by count
|
||||
if (!(resultCandidates.length === 1 && results[0].type === ResultType.umiWildcard))
|
||||
results = results.sort(getSortFunction());
|
||||
|
||||
// Since some tags are kaomoji, we have to add the normal results in some cases
|
||||
if (tagword.startsWith("<") || tagword.startsWith("*<")) {
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
if (tagword.startsWith("*")) {
|
||||
tagword = tagword.slice(1);
|
||||
searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i');
|
||||
} else {
|
||||
searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i');
|
||||
}
|
||||
let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults);
|
||||
|
||||
genericResults.forEach(g => {
|
||||
let result = new AutocompleteResult(g[0].trim(), ResultType.tag)
|
||||
result.category = g[1];
|
||||
result.count = g[2];
|
||||
result.aliases = g[3];
|
||||
results.push(result);
|
||||
});
|
||||
// Since some tags are kaomoji, we have to add the normal results in some cases
|
||||
if (tagword.startsWith("<") || tagword.startsWith("*<")) {
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
if (tagword.startsWith("*")) {
|
||||
tagword = tagword.slice(1);
|
||||
searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i');
|
||||
} else {
|
||||
searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i');
|
||||
}
|
||||
let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults);
|
||||
|
||||
genericResults.forEach(g => {
|
||||
let result = new AutocompleteResult(g[0].trim(), ResultType.tag)
|
||||
result.category = g[1];
|
||||
result.count = g[2];
|
||||
result.aliases = g[3];
|
||||
results.push(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
// Else search the normal tag list
|
||||
@@ -1223,8 +1228,8 @@ function navigateInList(textArea, event) {
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
async function refreshTacTempFiles() {
|
||||
setTimeout(async () => {
|
||||
async function refreshTacTempFiles(api = false) {
|
||||
const reload = async () => {
|
||||
wildcardFiles = [];
|
||||
wildcardExtFiles = [];
|
||||
umiWildcards = [];
|
||||
@@ -1236,7 +1241,16 @@ async function refreshTacTempFiles() {
|
||||
await processQueue(QUEUE_FILE_LOAD, null);
|
||||
|
||||
console.log("TAC: Refreshed temp files");
|
||||
}, 2000);
|
||||
}
|
||||
|
||||
if (api) {
|
||||
await postAPI("tacapi/v1/refresh-temp-files", null);
|
||||
await reload();
|
||||
} else {
|
||||
setTimeout(async () => {
|
||||
await reload();
|
||||
}, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
function addAutocompleteToArea(area) {
|
||||
@@ -1326,6 +1340,13 @@ async function setup() {
|
||||
// Listener for internal temp files refresh button
|
||||
gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles);
|
||||
|
||||
// Also add listener for external network refresh button (plus triggering python code)
|
||||
["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => {
|
||||
gradioApp().querySelector(e)?.addEventListener("click", ()=>{
|
||||
refreshTacTempFiles(true);
|
||||
});
|
||||
})
|
||||
|
||||
// Add mutation observer for the model hash text to also allow hash-based blacklist again
|
||||
let modelHashText = gradioApp().querySelector("#sd_checkpoint_hash");
|
||||
updateModelName();
|
||||
|
||||
Reference in New Issue
Block a user