mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-27 03:29:55 +00:00
Compare commits
29 Commits
feature-fu
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
19a30beed4 | ||
|
|
89fee277e3 | ||
|
|
c4510663ca | ||
|
|
8766965a30 | ||
|
|
34e68e1628 | ||
|
|
41d185b616 | ||
|
|
e0baa58ace | ||
|
|
c1ef12d887 | ||
|
|
4fc122de4b | ||
|
|
c341ccccb6 | ||
|
|
bda8701734 | ||
|
|
63fca457a7 | ||
|
|
38700d4743 | ||
|
|
bb492ba059 | ||
|
|
40ad070a02 | ||
|
|
209b1dd76b | ||
|
|
196fa19bfc | ||
|
|
6ffeeafc49 | ||
|
|
08b7c58ea7 | ||
|
|
6be91449f3 | ||
|
|
b515c15e01 | ||
|
|
827b99c961 | ||
|
|
49ec047af8 | ||
|
|
f94da07ed1 | ||
|
|
e2cfe7341b | ||
|
|
ce51ec52a2 | ||
|
|
f64d728ac6 | ||
|
|
1c6bba2a3d | ||
|
|
9a47c2ec2c |
@@ -20,15 +20,6 @@ var lycos = [];
|
||||
var modelKeywordDict = new Map();
|
||||
var chants = [];
|
||||
var styleNames = [];
|
||||
// uFuzzy haystacks
|
||||
var tacHaystacks = {
|
||||
"tag": [],
|
||||
"extra": [],
|
||||
"tagAlias": [],
|
||||
"extraAlias": [],
|
||||
"translationKeys": [],
|
||||
"translationValues": []
|
||||
}
|
||||
|
||||
// Selected model info for black/whitelisting
|
||||
var currentModelHash = "";
|
||||
|
||||
@@ -30,9 +30,6 @@ class AutocompleteResult {
|
||||
meta = null;
|
||||
hash = null;
|
||||
sortKey = null;
|
||||
// uFuzzy specific
|
||||
highlightedText = null;
|
||||
matchSource = null;
|
||||
|
||||
// Constructor
|
||||
constructor(text, type) {
|
||||
|
||||
@@ -86,6 +86,13 @@ const thirdParty = {
|
||||
"selectors": [
|
||||
"Found tags",
|
||||
]
|
||||
},
|
||||
"TIPO": {
|
||||
"base": "#tab_txt2img",
|
||||
"hasIds": false,
|
||||
"selectors": [
|
||||
"Tag Prompt"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -5,17 +5,14 @@ class LoraParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show lora
|
||||
let tempResults = [];
|
||||
let searchTerm = tagword;
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lora:") {
|
||||
searchTerm = tagword.replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let searchTerm = tagword.replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
filterCondition = (x) => TacFuzzy.check(x, searchTerm);
|
||||
tempResults = loras.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
searchTerm = null;
|
||||
tempResults = loras;
|
||||
}
|
||||
|
||||
@@ -28,12 +25,9 @@ class LoraParser extends BaseTagParser {
|
||||
let name = text.substring(lastSlash + 1, lastDot);
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lora)
|
||||
result.highlightedText = TacFuzzy.manualHighlight(name, searchTerm);
|
||||
result.matchSource = "base";
|
||||
result.meta = "Lora";
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
"--live-translation-color-1": ["lightskyblue", "#2d89ef"],
|
||||
"--live-translation-color-2": ["palegoldenrod", "#eb5700"],
|
||||
"--live-translation-color-3": ["darkseagreen", "darkgreen"],
|
||||
"--match-filter": ["brightness(1.2) drop-shadow(1px 1px 6px black)", "brightness(0.8)"]
|
||||
}
|
||||
const browserVars = {
|
||||
"--results-overflow-y": {
|
||||
@@ -32,7 +31,8 @@ const autocompleteCSS = `
|
||||
position: absolute;
|
||||
z-index: 999;
|
||||
max-width: calc(100% - 1.5rem);
|
||||
margin: 5px 0 0 0;
|
||||
flex-wrap: wrap;
|
||||
gap: 10px;
|
||||
}
|
||||
.autocompleteResults {
|
||||
background-color: var(--results-bg) !important;
|
||||
@@ -45,11 +45,11 @@ const autocompleteCSS = `
|
||||
overflow-y: var(--results-overflow-y);
|
||||
overflow-x: hidden;
|
||||
word-break: break-word;
|
||||
margin-top: 10px; /* Margin to create space below the cursor */
|
||||
}
|
||||
.sideInfo {
|
||||
display: none;
|
||||
position: relative;
|
||||
margin-left: 10px;
|
||||
height: 18rem;
|
||||
max-width: 16rem;
|
||||
}
|
||||
@@ -91,8 +91,9 @@ const autocompleteCSS = `
|
||||
content: "✨";
|
||||
margin-right: 2px;
|
||||
}
|
||||
.acMatchHighlight {
|
||||
filter: var(--match-filter);
|
||||
.acMetaText span.used::after {
|
||||
content: "🔁";
|
||||
margin-right: 2px;
|
||||
}
|
||||
.acWikiLink {
|
||||
padding: 0.5rem;
|
||||
@@ -162,14 +163,6 @@ async function loadTags(c) {
|
||||
}
|
||||
}
|
||||
await loadExtraTags(c);
|
||||
|
||||
// Assign uFuzzy haystacks
|
||||
tacHaystacks.tag = allTags.map(x => x[0]);
|
||||
tacHaystacks.extra = extras.map(x => x[0]);
|
||||
tacHaystacks.tagAlias = allTags.map(x => x[3] || "");
|
||||
tacHaystacks.extraAlias = extras.map(e => e[3] || "");
|
||||
tacHaystacks.translationKeys = Array.from(translations.keys());
|
||||
tacHaystacks.translationValues = Array.from(translations.values());
|
||||
}
|
||||
|
||||
async function loadExtraTags(c) {
|
||||
@@ -246,6 +239,7 @@ async function syncOptions() {
|
||||
useStyleVars: opts["tac_useStyleVars"],
|
||||
// Insertion related settings
|
||||
replaceUnderscores: opts["tac_replaceUnderscores"],
|
||||
replaceUnderscoresExclusionList: opts["tac_undersocreReplacementExclusionList"],
|
||||
escapeParentheses: opts["tac_escapeParentheses"],
|
||||
appendComma: opts["tac_appendComma"],
|
||||
appendSpace: opts["tac_appendSpace"],
|
||||
@@ -369,10 +363,13 @@ function showResults(textArea) {
|
||||
parentDiv.style.display = "flex";
|
||||
|
||||
if (TAC_CFG.slidingPopup) {
|
||||
let caretPosition = getCaretCoordinates(textArea, textArea.selectionEnd).left;
|
||||
let offset = Math.min(textArea.offsetLeft - textArea.scrollLeft + caretPosition, textArea.offsetWidth - parentDiv.offsetWidth);
|
||||
let caretPosition = getCaretCoordinates(textArea, textArea.selectionEnd);
|
||||
// Top cursor offset fix for SDNext modern UI, based on code by https://github.com/Nyx01
|
||||
let offsetTop = textArea.offsetTop + caretPosition.top - textArea.scrollTop + 10; // Adjust this value for desired distance below cursor
|
||||
let offsetLeft = Math.min(textArea.offsetLeft - textArea.scrollLeft + caretPosition.left, textArea.offsetWidth - parentDiv.offsetWidth);
|
||||
|
||||
parentDiv.style.left = `${offset}px`;
|
||||
parentDiv.style.top = `${offsetTop}px`; // Position below the cursor
|
||||
parentDiv.style.left = `${offsetLeft}px`;
|
||||
} else {
|
||||
if (parentDiv.style.left)
|
||||
parentDiv.style.removeProperty("left");
|
||||
@@ -442,8 +439,12 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
if (sanitizeResults && sanitizeResults.length > 0) {
|
||||
sanitizedText = sanitizeResults[0];
|
||||
} else {
|
||||
sanitizedText = TAC_CFG.replaceUnderscores ? text.replaceAll("_", " ") : text;
|
||||
|
||||
const excluded_tags = TAC_CFG.replaceUnderscoresExclusionList?.split(',').map(s => s.trim()) || [];
|
||||
if (TAC_CFG.replaceUnderscores && !excluded_tags.includes(sanitizedText)) {
|
||||
sanitizedText = text.replaceAll("_", " ")
|
||||
} else {
|
||||
sanitizedText = text;
|
||||
}
|
||||
if (TAC_CFG.escapeParentheses && tagType === ResultType.tag) {
|
||||
sanitizedText = sanitizedText
|
||||
.replaceAll("(", "\\(")
|
||||
@@ -633,12 +634,30 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
updateInput(textArea);
|
||||
|
||||
// Update previous tags with the edited prompt to prevent re-searching the same term
|
||||
let weightedTags = [...newPrompt.matchAll(WEIGHT_REGEX)]
|
||||
.map(match => match[1]);
|
||||
let tags = newPrompt.match(TAG_REGEX())
|
||||
if (weightedTags !== null) {
|
||||
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted)))
|
||||
.concat(weightedTags);
|
||||
let weightedTags = [...prompt.matchAll(WEIGHT_REGEX)]
|
||||
.map(match => match[1])
|
||||
.sort((a, b) => a.length - b.length);
|
||||
let tags = [...prompt.match(TAG_REGEX())].sort((a, b) => a.length - b.length);
|
||||
|
||||
if (weightedTags !== null && tags !== null) {
|
||||
// Create a working copy of the normal tags
|
||||
let workingTags = [...tags];
|
||||
|
||||
// For each weighted tag
|
||||
for (const weightedTag of weightedTags) {
|
||||
// Find first matching tag and remove it from working set
|
||||
const matchIndex = workingTags.findIndex(tag =>
|
||||
tag === weightedTag && !tag.startsWith("<[") && !tag.startsWith("$(")
|
||||
);
|
||||
|
||||
if (matchIndex !== -1) {
|
||||
// Remove the matched tag from the working set
|
||||
workingTags.splice(matchIndex, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Combine filtered normal tags with weighted tags
|
||||
tags = workingTags.concat(weightedTags);
|
||||
}
|
||||
previousTags = tags;
|
||||
|
||||
@@ -673,6 +692,30 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
let tagColors = TAC_CFG.colorMap;
|
||||
let mode = (document.querySelector(".dark") || gradioApp().querySelector(".dark")) ? 0 : 1;
|
||||
let nextLength = Math.min(results.length, resultCount + TAC_CFG.resultStepLength);
|
||||
const IS_DAN_OR_E621_TAG_FILE = (tagFileName.toLowerCase().startsWith("danbooru") || tagFileName.toLowerCase().startsWith("e621"));
|
||||
|
||||
const tagCount = {};
|
||||
|
||||
// Indicate if tag was used before
|
||||
if (IS_DAN_OR_E621_TAG_FILE) {
|
||||
const prompt = textArea.value.trim();
|
||||
const tags = prompt.replaceAll('\n', ',').split(',').map(tag => tag.trim()).filter(tag => tag);
|
||||
|
||||
const unsanitizedTags = tags.map(tag => {
|
||||
const weightedTags = [...tag.matchAll(WEIGHT_REGEX)].flat();
|
||||
if (weightedTags.length === 2) {
|
||||
return weightedTags[1];
|
||||
} else {
|
||||
// normal tags
|
||||
return tag;
|
||||
}
|
||||
}).map(tag => tag.replaceAll(" ", "_").replaceAll("\\(", "(").replaceAll("\\)", ")"));
|
||||
|
||||
// Split tags by `,` and count tag
|
||||
for (const tag of unsanitizedTags) {
|
||||
tagCount[tag] = tagCount[tag] ? tagCount[tag] + 1 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (let i = resultCount; i < nextLength; i++) {
|
||||
let result = results[i];
|
||||
@@ -726,31 +769,7 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
displayText += `[${translations.get(result.text)}]`;
|
||||
|
||||
// Print search term bolded in result
|
||||
if (result.highlightedText) {
|
||||
switch (result.matchSource) {
|
||||
case "base":
|
||||
itemText.innerHTML = result.highlightedText;
|
||||
break;
|
||||
case "alias":
|
||||
let aliases = result.highlightedText.split(",");
|
||||
let matchingAlias = aliases.find(a => a.includes("<b class=\"acMatchHighlight\">"));
|
||||
itemText.innerHTML = matchingAlias + " ➝ " + result.text;
|
||||
break;
|
||||
case "translatedBase":
|
||||
itemText.innerHTML = `${result.text}[${result.highlightedText}]`
|
||||
break;
|
||||
case "translatedAlias":
|
||||
let tAliases = result.aliases.split(",");
|
||||
let tMatchingAlias = tAliases.find(a => a.includes("<b class=\"acMatchHighlight\">"));
|
||||
let baseTranslation = `[${translations.get(result.text)}];` || "";
|
||||
itemText.innerHTML = `${tMatchingAlias}[${result.highlightedText}] ➝ ${result.text}${baseTranslation}`;
|
||||
default:
|
||||
itemText.innerHTML = displayText;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
itemText.innerHTML = displayText;
|
||||
}
|
||||
itemText.innerHTML = displayText.replace(tagword, `<b>${tagword}</b>`);
|
||||
|
||||
const splitTypes = [ResultType.wildcardFile, ResultType.yamlWildcard]
|
||||
if (splitTypes.includes(result.type) && itemText.innerHTML.includes("/")) {
|
||||
@@ -762,29 +781,38 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
}
|
||||
|
||||
// Add wiki link if the setting is enabled and a supported tag set loaded
|
||||
if (TAC_CFG.showWikiLinks
|
||||
&& (result.type === ResultType.tag)
|
||||
&& (tagFileName.toLowerCase().startsWith("danbooru") || tagFileName.toLowerCase().startsWith("e621"))) {
|
||||
if (
|
||||
TAC_CFG.showWikiLinks &&
|
||||
result.type === ResultType.tag &&
|
||||
IS_DAN_OR_E621_TAG_FILE
|
||||
) {
|
||||
let wikiLink = document.createElement("a");
|
||||
wikiLink.classList.add("acWikiLink");
|
||||
wikiLink.innerText = "?";
|
||||
wikiLink.title = "Open external wiki page for this tag"
|
||||
wikiLink.title = "Open external wiki page for this tag";
|
||||
|
||||
let linkPart = displayText;
|
||||
// Only use alias result if it is one
|
||||
if (displayText.includes("➝"))
|
||||
linkPart = displayText.split(" ➝ ")[1];
|
||||
if (displayText.includes("➝")) linkPart = displayText.split(" ➝ ")[1];
|
||||
|
||||
// Remove any trailing translations
|
||||
if (linkPart.includes("[")) {
|
||||
linkPart = linkPart.split("[")[0]
|
||||
linkPart = linkPart.split("[")[0];
|
||||
}
|
||||
|
||||
linkPart = encodeURIComponent(linkPart);
|
||||
|
||||
// Set link based on selected file
|
||||
let tagFileNameLower = tagFileName.toLowerCase();
|
||||
if (tagFileNameLower.startsWith("danbooru")) {
|
||||
if (tagFileNameLower.startsWith("danbooru_e621_merged")) {
|
||||
// Use danbooru for categories 0-5, e621 for 6+
|
||||
// Based on the merged categories from https://github.com/DraconicDragon/dbr-e621-lists-archive/tree/main/tag-lists/danbooru_e621_merged
|
||||
// Danbooru is also the fallback if result.category is not set
|
||||
wikiLink.href =
|
||||
result.category && result.category >= 6
|
||||
? `https://e621.net/wiki_pages/${linkPart}`
|
||||
: `https://danbooru.donmai.us/wiki_pages/${linkPart}`;
|
||||
} else if (tagFileNameLower.startsWith("danbooru")) {
|
||||
wikiLink.href = `https://danbooru.donmai.us/wiki_pages/${linkPart}`;
|
||||
} else if (tagFileNameLower.startsWith("e621")) {
|
||||
wikiLink.href = `https://e621.net/wiki_pages/${linkPart}`;
|
||||
@@ -849,13 +877,25 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
// Add small ✨ marker to indicate usage sorting
|
||||
if (result.usageBias) {
|
||||
flexDiv.querySelector(".acMetaText").classList.add("biased");
|
||||
flexDiv.title = "✨ Frequent tag. Ctrl/Cmd + click to reset usage count."
|
||||
flexDiv.title = "✨ Frequent tag. Ctrl/Cmd + click to reset usage count.";
|
||||
}
|
||||
|
||||
// Add 🔁 to indicate if tag was used before
|
||||
if (IS_DAN_OR_E621_TAG_FILE && tagCount[result.text]) {
|
||||
// Fix PR#313#issuecomment-2592551794
|
||||
if (!(result.text === tagword && tagCount[result.text] === 1)) {
|
||||
const textNode = flexDiv.querySelector(".acMetaText");
|
||||
const span = document.createElement("span");
|
||||
textNode.insertBefore(span, textNode.firstChild);
|
||||
span.classList.add("used");
|
||||
span.title = "🔁 The prompt already contains this tag";
|
||||
}
|
||||
}
|
||||
|
||||
// Check if it's a negative prompt
|
||||
let isNegative = textAreaId.includes("n");
|
||||
|
||||
// Add listener
|
||||
// Add click listener
|
||||
li.addEventListener("click", (e) => {
|
||||
if (e.ctrlKey || e.metaKey) {
|
||||
resetUseCount(result.text, result.type, !isNegative, isNegative);
|
||||
@@ -864,6 +904,38 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
insertTextAtCursor(textArea, result, tagword);
|
||||
}
|
||||
});
|
||||
// Add delayed hover listener for extra network previews
|
||||
if (
|
||||
TAC_CFG.showExtraNetworkPreviews &&
|
||||
[
|
||||
ResultType.embedding,
|
||||
ResultType.hypernetwork,
|
||||
ResultType.lora,
|
||||
ResultType.lyco,
|
||||
].includes(result.type)
|
||||
) {
|
||||
li.addEventListener("mouseover", async () => {
|
||||
const me = this;
|
||||
let hoverTimeout;
|
||||
|
||||
hoverTimeout = setTimeout(async () => {
|
||||
// If the tag we hover over is already selected, do nothing
|
||||
if (selectedTag && selectedTag === i) return;
|
||||
|
||||
oldSelectedTag = selectedTag;
|
||||
selectedTag = i;
|
||||
|
||||
// Update selection without scrolling to the item (since we would
|
||||
// immediately trigger the next scroll as the items move under the cursor)
|
||||
updateSelectionStyle(textArea, selectedTag, oldSelectedTag, false);
|
||||
}, 400);
|
||||
// Reset delay timer if we leave the item
|
||||
me.addEventListener("mouseout", () => {
|
||||
clearTimeout(hoverTimeout);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Add element to list
|
||||
resultsList.appendChild(li);
|
||||
}
|
||||
@@ -876,7 +948,7 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
}
|
||||
}
|
||||
|
||||
async function updateSelectionStyle(textArea, newIndex, oldIndex) {
|
||||
async function updateSelectionStyle(textArea, newIndex, oldIndex, scroll = true) {
|
||||
let textAreaId = getTextAreaIdentifier(textArea);
|
||||
let resultDiv = gradioApp().querySelector('.autocompleteResults' + textAreaId);
|
||||
let resultsList = resultDiv.querySelector('ul');
|
||||
@@ -891,8 +963,8 @@ async function updateSelectionStyle(textArea, newIndex, oldIndex) {
|
||||
let selected = items[newIndex];
|
||||
selected.classList.add('selected');
|
||||
|
||||
// Set scrolltop to selected item
|
||||
resultDiv.scrollTop = selected.offsetTop - resultDiv.offsetTop;
|
||||
// Set scrolltop to selected item
|
||||
if (scroll) resultDiv.scrollTop = selected.offsetTop - resultDiv.offsetTop;
|
||||
}
|
||||
|
||||
// Show preview if enabled and the selected type supports it
|
||||
@@ -1076,11 +1148,29 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
// Match tags with RegEx to get the last edited one
|
||||
// We also match for the weighting format (e.g. "tag:1.0") here, and combine the two to get the full tag word set
|
||||
let weightedTags = [...prompt.matchAll(WEIGHT_REGEX)]
|
||||
.map(match => match[1]);
|
||||
let tags = prompt.match(TAG_REGEX())
|
||||
.map(match => match[1])
|
||||
.sort((a, b) => a.length - b.length);
|
||||
let tags = [...prompt.match(TAG_REGEX())].sort((a, b) => a.length - b.length);
|
||||
|
||||
if (weightedTags !== null && tags !== null) {
|
||||
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted) && !tag.startsWith("<[") && !tag.startsWith("$(")))
|
||||
.concat(weightedTags);
|
||||
// Create a working copy of the normal tags
|
||||
let workingTags = [...tags];
|
||||
|
||||
// For each weighted tag
|
||||
for (const weightedTag of weightedTags) {
|
||||
// Find first matching tag and remove it from working set
|
||||
const matchIndex = workingTags.findIndex(tag =>
|
||||
tag === weightedTag && !tag.startsWith("<[") && !tag.startsWith("$(")
|
||||
);
|
||||
|
||||
if (matchIndex !== -1) {
|
||||
// Remove the matched tag from the working set
|
||||
workingTags.splice(matchIndex, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Combine filtered normal tags with weighted tags
|
||||
tags = workingTags.concat(weightedTags);
|
||||
}
|
||||
|
||||
// Guard for no tags
|
||||
@@ -1161,7 +1251,6 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
else
|
||||
fil = (x) => baseFilter(x);
|
||||
|
||||
/*
|
||||
// Add final results
|
||||
allTags.filter(fil).forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.tag)
|
||||
@@ -1170,78 +1259,25 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
result.aliases = t[3];
|
||||
results.push(result);
|
||||
});
|
||||
*/
|
||||
|
||||
let tagIndexSet = new Set()
|
||||
let extraIndexSet = new Set();
|
||||
// Here we store the values instead of the index, as the same translation can apply to different keys
|
||||
// For searching, only the first result is relevant (assuming it is also the best match)
|
||||
let translatedValueSet = new Set();
|
||||
// Add extras
|
||||
if (TAC_CFG.extra.extraFile) {
|
||||
let extraResults = [];
|
||||
|
||||
let tagOut = [];
|
||||
let extraOut = [];
|
||||
// Base text search
|
||||
TacFuzzy.assignResults(TacFuzzy.search(tacHaystacks.tag, tagword),"base", "tag", tagIndexSet, tagOut)
|
||||
TacFuzzy.assignResults(TacFuzzy.search(tacHaystacks.extra, tagword), "base", "extra", extraIndexSet, extraOut)
|
||||
// Alias search
|
||||
if (TAC_CFG.alias.searchByAlias) {
|
||||
TacFuzzy.assignResults(TacFuzzy.search(tacHaystacks.tagAlias, tagword), "alias", "tag", tagIndexSet, tagOut)
|
||||
TacFuzzy.assignResults(TacFuzzy.search(tacHaystacks.extraAlias, tagword), "alias", "extra", extraIndexSet, extraOut)
|
||||
}
|
||||
// Translation search
|
||||
if (TAC_CFG.translation.searchByTranslation) {
|
||||
// Translations need special treatment as they can belong to both tags and extras and need lookup based on their keys.
|
||||
// We also use unicode search here, slower but needed for non-latin translations.
|
||||
TacFuzzy.search(tacHaystacks.translationValues, tagword, true).forEach(pair => {
|
||||
const idx = pair[0];
|
||||
const orderIdx = pair[1];
|
||||
const translationKey = tacHaystacks.translationKeys[idx];
|
||||
|
||||
// Placeholder to make sure we never access an index of null if no matching key was found
|
||||
const notFound = [null, null, null, null];
|
||||
|
||||
// Check if the translation belongs to a tag or its alias. Only search alias if no base text found.
|
||||
const translatedTagBase = allTags.find(t => t[0] === translationKey);
|
||||
const translatedTagAlias = !translatedTagBase
|
||||
? allTags.find(t => t[3]?.split(",").some(a => a === translationKey))
|
||||
: null;
|
||||
const translatedTag = translatedTagBase || translatedTagAlias || notFound; // Combined result for easier checks later
|
||||
// Check if the translation belongs to an extra or its alias. Only search alias if no base text found.
|
||||
const translatedExtraBase = extras.find(e => e[0] === translationKey);
|
||||
const translatedExtraAlias = !translatedExtraBase
|
||||
? extras.find(e => e[3]?.split(",").some(a => a === translationKey))
|
||||
: null;
|
||||
const translatedExtra = translatedExtraBase || translatedExtraAlias || notFound; // Combined result for easier checks later
|
||||
|
||||
// For translations, we can sadly only exit early after making sure we don't have a duplicate (which is most of the work).
|
||||
// This is a side effect of translations mapping to multiple keys for the search direction, eg. different aliases.
|
||||
if (translatedValueSet.has(translatedTag[0] || translatedExtra[0])) return;
|
||||
|
||||
const resultType = translatedTag[0] ? ResultType.tag : ResultType.extra;
|
||||
const result = new AutocompleteResult(translatedTag[0] || translatedExtra[0], resultType);
|
||||
result.highlightedText = TacFuzzy.toStr(orderIdx);
|
||||
|
||||
result.matchSource = (translatedTagBase || translatedExtraBase) ? "translatedBase" : "translatedAlias";
|
||||
result.category = translatedTag[1] || translatedExtra[1] || 0;
|
||||
|
||||
if (translatedTag[0])
|
||||
result.count = translatedTag[2] || 0;
|
||||
else if (translatedExtra[0])
|
||||
result.meta = translatedExtra[2] || "Custom tag";
|
||||
|
||||
result.aliases = translatedTag[3] || translatedExtra[3] || "";
|
||||
|
||||
if (translatedTag[0]) {
|
||||
tagOut.push(result);
|
||||
} else if (translatedExtra[0]) {
|
||||
extraOut.push(result);
|
||||
}
|
||||
translatedValueSet.add(translatedTag[0] || translatedExtra[0]);
|
||||
extras.filter(fil).forEach(e => {
|
||||
let result = new AutocompleteResult(e[0].trim(), ResultType.extra)
|
||||
result.category = e[1] || 0; // If no category is given, use 0 as the default
|
||||
result.meta = e[2] || "Custom tag";
|
||||
result.aliases = e[3] || "";
|
||||
extraResults.push(result);
|
||||
});
|
||||
}
|
||||
|
||||
// Append results for each set
|
||||
results = results.concat([...extraOut]).concat([...tagOut]);
|
||||
if (TAC_CFG.extra.addMode === "Insert before") {
|
||||
results = extraResults.concat(results);
|
||||
} else {
|
||||
results = results.concat(extraResults);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Guard for empty results
|
||||
@@ -1263,7 +1299,7 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
const name = r.type === ResultType.chant ? r.aliases : r.text;
|
||||
// Add to alias list or tag list depending on if the name includes the tagword
|
||||
// (the same criteria is used in the filter in calculateUsageBias)
|
||||
if (aliasTypes.includes(r.type) && r.matchSource === "alias") {
|
||||
if (aliasTypes.includes(r.type) && !name.includes(tagword)) {
|
||||
aliasNames.push(name);
|
||||
} else {
|
||||
tagNames.push(name);
|
||||
@@ -1479,6 +1515,12 @@ function addAutocompleteToArea(area) {
|
||||
if (!e.inputType && !tacSelfTrigger) return;
|
||||
tacSelfTrigger = false;
|
||||
|
||||
// Block hide we are composing (IME), so enter doesn't close the results
|
||||
if (e.isComposing) {
|
||||
hideBlocked = true;
|
||||
setTimeout(() => { hideBlocked = false; }, 100);
|
||||
}
|
||||
|
||||
debounce(autocomplete(area, area.value), TAC_CFG.delayTime);
|
||||
checkKeywordInsertionUndo(area, e);
|
||||
});
|
||||
@@ -1541,9 +1583,16 @@ async function setup() {
|
||||
gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles);
|
||||
|
||||
// Also add listener for external network refresh button (plus triggering python code)
|
||||
["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => {
|
||||
gradioApp().querySelector(e)?.addEventListener("click", ()=>{
|
||||
refreshTacTempFiles(true);
|
||||
let alreadyAdded = new Set();
|
||||
["#img2img_extra_refresh", "#txt2img_extra_refresh", ".extra-network-control--refresh"].forEach(e => {
|
||||
const elems = gradioApp().querySelectorAll(e);
|
||||
elems.forEach(elem => {
|
||||
if (!elem || alreadyAdded.has(elem)) return;
|
||||
|
||||
alreadyAdded.add(elem);
|
||||
elem.addEventListener("click", ()=>{
|
||||
refreshTacTempFiles(true);
|
||||
});
|
||||
});
|
||||
})
|
||||
|
||||
@@ -1597,7 +1646,7 @@ async function setup() {
|
||||
} else {
|
||||
acStyle.appendChild(document.createTextNode(css));
|
||||
}
|
||||
gradioApp().appendChild(acStyle);
|
||||
document.head.appendChild(acStyle);
|
||||
|
||||
// Callback
|
||||
await processQueue(QUEUE_AFTER_SETUP, null);
|
||||
|
||||
@@ -20,9 +20,27 @@ except ImportError:
|
||||
TAGS_PATH = Path(scripts.basedir()).joinpath("tags").absolute()
|
||||
|
||||
# The path to the folder containing the wildcards and embeddings
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards").absolute()
|
||||
try: # SD.Next
|
||||
WILDCARD_PATH = Path(shared.opts.wildcards_dir).absolute()
|
||||
except Exception: # A1111
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards").absolute()
|
||||
EMB_PATH = Path(shared.cmd_opts.embeddings_dir).absolute()
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir).absolute()
|
||||
|
||||
# Forge Classic detection
|
||||
try:
|
||||
from modules_forge.forge_version import version as forge_version
|
||||
IS_FORGE_CLASSIC = forge_version == "classic"
|
||||
except ImportError:
|
||||
IS_FORGE_CLASSIC = False
|
||||
|
||||
# Forge Classic skips it
|
||||
if not IS_FORGE_CLASSIC:
|
||||
try:
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir).absolute()
|
||||
except AttributeError:
|
||||
HYP_PATH = None
|
||||
else:
|
||||
HYP_PATH = None
|
||||
|
||||
try:
|
||||
LORA_PATH = Path(shared.cmd_opts.lora_dir).absolute()
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
# This helper script scans folders for wildcards and embeddings and writes them
|
||||
# to a temporary file to expose it to the javascript side
|
||||
|
||||
import sys
|
||||
import glob
|
||||
import importlib
|
||||
import json
|
||||
import sqlite3
|
||||
import sys
|
||||
import urllib.parse
|
||||
from asyncio import sleep
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import yaml
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import Response, FileResponse, JSONResponse
|
||||
from modules import script_callbacks, sd_hijack, shared, hashes
|
||||
from fastapi.responses import FileResponse, JSONResponse, Response
|
||||
from modules import hashes, script_callbacks, sd_hijack, sd_models, shared
|
||||
from pydantic import BaseModel
|
||||
|
||||
from scripts.model_keyword_support import (get_lora_simple_hash,
|
||||
@@ -25,7 +26,7 @@ try:
|
||||
try:
|
||||
from scripts import tag_frequency_db as tdb
|
||||
except ModuleNotFoundError:
|
||||
from inspect import getframeinfo, currentframe
|
||||
from inspect import currentframe, getframeinfo
|
||||
filename = getframeinfo(currentframe()).filename
|
||||
parent = Path(filename).resolve().parent
|
||||
sys.path.append(str(parent))
|
||||
@@ -41,9 +42,32 @@ except (ImportError, ValueError, sqlite3.Error) as e:
|
||||
print(f"Tag Autocomplete: Tag frequency database error - \"{e}\"")
|
||||
db = None
|
||||
|
||||
def get_embed_db(sd_model=None):
|
||||
"""Returns the embedding database, if available."""
|
||||
try:
|
||||
return sd_hijack.model_hijack.embedding_db
|
||||
except Exception:
|
||||
try: # sd next with diffusers backend
|
||||
sdnext_model = sd_model if sd_model is not None else shared.sd_model
|
||||
return sdnext_model.embedding_db
|
||||
except Exception:
|
||||
try: # forge webui
|
||||
forge_model = sd_model if sd_model is not None else sd_models.model_data.get_sd_model()
|
||||
if type(forge_model).__name__ == "FakeInitialModel":
|
||||
return None
|
||||
else:
|
||||
processer = getattr(forge_model, "text_processing_engine", getattr(forge_model, "text_processing_engine_l"))
|
||||
return processer.embeddings
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# Attempt to get embedding load function, using the same call as api.
|
||||
try:
|
||||
load_textual_inversion_embeddings = sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings
|
||||
embed_db = get_embed_db()
|
||||
if embed_db is not None:
|
||||
load_textual_inversion_embeddings = embed_db.load_textual_inversion_embeddings
|
||||
else:
|
||||
load_textual_inversion_embeddings = lambda *args, **kwargs: None
|
||||
except Exception as e: # Not supported.
|
||||
load_textual_inversion_embeddings = lambda *args, **kwargs: None
|
||||
print("Tag Autocomplete: Cannot reload embeddings instantly:", e)
|
||||
@@ -51,8 +75,8 @@ except Exception as e: # Not supported.
|
||||
# Sorting functions for extra networks / embeddings stuff
|
||||
sort_criteria = {
|
||||
"Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(),
|
||||
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime,
|
||||
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime
|
||||
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime if path.exists() else name.lower(),
|
||||
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime if path.exists() else name.lower()
|
||||
}
|
||||
|
||||
def sort_models(model_list, sort_method = None, name_has_subpath = False):
|
||||
@@ -74,9 +98,9 @@ def sort_models(model_list, sort_method = None, name_has_subpath = False):
|
||||
# During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated.
|
||||
# The list itself doesn't need to get sorted at this point.
|
||||
if len(model_list[0]) > 2:
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list]
|
||||
results = [f'"{name}","{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list]
|
||||
else:
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}"' for path, name in model_list]
|
||||
results = [f'"{name}","{sorter(path, name, name_has_subpath)}"' for path, name in model_list]
|
||||
return results
|
||||
|
||||
|
||||
@@ -110,7 +134,11 @@ def is_umi_format(data):
|
||||
"""Returns True if the YAML file is in UMI format."""
|
||||
issue_found = False
|
||||
for item in data:
|
||||
if not (data[item] and 'Tags' in data[item] and isinstance(data[item]['Tags'], list)):
|
||||
try:
|
||||
if not (data[item] and 'Tags' in data[item] and isinstance(data[item]['Tags'], list)):
|
||||
issue_found = True
|
||||
break
|
||||
except:
|
||||
issue_found = True
|
||||
break
|
||||
return not issue_found
|
||||
@@ -132,9 +160,12 @@ def parse_dynamic_prompt_format(yaml_wildcards, data, path):
|
||||
elif not (isinstance(value, list) and all(isinstance(v, str) for v in value)):
|
||||
del d[key]
|
||||
|
||||
recurse_dict(data)
|
||||
# Add to yaml_wildcards
|
||||
yaml_wildcards[path.name] = data
|
||||
try:
|
||||
recurse_dict(data)
|
||||
# Add to yaml_wildcards
|
||||
yaml_wildcards[path.name] = data
|
||||
except:
|
||||
return
|
||||
|
||||
|
||||
def get_yaml_wildcards():
|
||||
@@ -159,9 +190,13 @@ def get_yaml_wildcards():
|
||||
parse_dynamic_prompt_format(yaml_wildcards, data, path)
|
||||
else:
|
||||
print('No data found in ' + path.name)
|
||||
except (yaml.YAMLError, UnicodeDecodeError) as e:
|
||||
except (yaml.YAMLError, UnicodeDecodeError, AttributeError, TypeError) as e:
|
||||
# YAML file not in wildcard format or couldn't be read
|
||||
print(f'Issue in parsing YAML file {path.name}: {e}')
|
||||
continue
|
||||
except Exception as e:
|
||||
# Something else went wrong, just skip
|
||||
continue
|
||||
|
||||
# Sort by count
|
||||
umi_sorted = sorted(umi_tags.items(), key=lambda item: item[1], reverse=True)
|
||||
@@ -190,35 +225,45 @@ def get_embeddings(sd_model):
|
||||
results = []
|
||||
|
||||
try:
|
||||
# The sd_model embedding_db reference only exists in sd.next with diffusers backend
|
||||
try:
|
||||
loaded_sdnext = sd_model.embedding_db.word_embeddings
|
||||
skipped_sdnext = sd_model.embedding_db.skipped_embeddings
|
||||
except (NameError, AttributeError):
|
||||
loaded_sdnext = {}
|
||||
skipped_sdnext = {}
|
||||
embed_db = get_embed_db(sd_model)
|
||||
# Re-register callback if needed
|
||||
global load_textual_inversion_embeddings
|
||||
if embed_db is not None and load_textual_inversion_embeddings != embed_db.load_textual_inversion_embeddings:
|
||||
load_textual_inversion_embeddings = embed_db.load_textual_inversion_embeddings
|
||||
|
||||
# Get embedding dict from sd_hijack to separate v1/v2 embeddings
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
loaded = loaded | loaded_sdnext
|
||||
skipped = skipped | skipped_sdnext
|
||||
loaded = embed_db.word_embeddings
|
||||
skipped = embed_db.skipped_embeddings
|
||||
|
||||
# Add embeddings to the correct list
|
||||
for key, emb in (skipped | loaded).items():
|
||||
if emb.filename is None:
|
||||
continue
|
||||
|
||||
if emb.shape is None:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
elif emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "vXL"))
|
||||
filename = getattr(emb, "filename", None)
|
||||
|
||||
if filename is None:
|
||||
if emb.shape is None:
|
||||
emb_unknown.append((Path(key), key, ""))
|
||||
elif emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(key), key, "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(key), key, "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(key), key, "vXL"))
|
||||
else:
|
||||
emb_unknown.append((Path(key), key, ""))
|
||||
|
||||
else:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
if emb.filename is None:
|
||||
continue
|
||||
|
||||
if emb.shape is None:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
elif emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "vXL"))
|
||||
else:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
|
||||
results = sort_models(emb_v1) + sort_models(emb_v2) + sort_models(emb_vXL) + sort_models(emb_unknown)
|
||||
except AttributeError:
|
||||
@@ -289,7 +334,7 @@ try:
|
||||
import sys
|
||||
from modules import extensions
|
||||
sys.path.append(Path(extensions.extensions_builtin_dir).joinpath("Lora").as_posix())
|
||||
import lora # pyright: ignore [reportMissingImports]
|
||||
import lora # pyright: ignore [reportMissingImports]
|
||||
|
||||
def _get_lora():
|
||||
return [
|
||||
@@ -430,8 +475,11 @@ def refresh_embeddings(force: bool, *args, **kwargs):
|
||||
# Fix for SD.Next infinite refresh loop due to gradio not updating after model load on demand.
|
||||
# This will just skip embedding loading if no model is loaded yet (or there really are no embeddings).
|
||||
# Try catch is just for safety incase sd_hijack access fails for some reason.
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
embed_db = get_embed_db()
|
||||
if embed_db is None:
|
||||
return
|
||||
loaded = embed_db.word_embeddings
|
||||
skipped = embed_db.skipped_embeddings
|
||||
if len((loaded | skipped)) > 0:
|
||||
load_textual_inversion_embeddings(force_reload=force)
|
||||
get_embeddings(None)
|
||||
@@ -444,7 +492,8 @@ def refresh_temp_files(*args, **kwargs):
|
||||
if skip_wildcard_refresh:
|
||||
WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
write_temp_files(skip_wildcard_refresh)
|
||||
refresh_embeddings(force=True)
|
||||
force_embed_refresh = getattr(shared.opts, "tac_forceRefreshEmbeddings", False)
|
||||
refresh_embeddings(force=force_embed_refresh)
|
||||
|
||||
def write_style_names(*args, **kwargs):
|
||||
styles = get_style_names()
|
||||
@@ -454,7 +503,14 @@ def write_style_names(*args, **kwargs):
|
||||
def write_temp_files(skip_wildcard_refresh = False):
|
||||
# Write wildcards to wc.txt if found
|
||||
if WILDCARD_PATH.exists() and not skip_wildcard_refresh:
|
||||
wildcards = [WILDCARD_PATH.relative_to(FILE_DIR).as_posix()] + get_wildcards()
|
||||
try:
|
||||
# Attempt to create a relative path, but fall back to an absolute path if not possible
|
||||
relative_wildcard_path = WILDCARD_PATH.relative_to(FILE_DIR).as_posix()
|
||||
except ValueError:
|
||||
# If the paths are not relative, use the absolute path
|
||||
relative_wildcard_path = WILDCARD_PATH.as_posix()
|
||||
|
||||
wildcards = [relative_wildcard_path] + get_wildcards()
|
||||
if wildcards:
|
||||
write_to_temp_file('wc.txt', wildcards)
|
||||
|
||||
@@ -466,7 +522,7 @@ def write_temp_files(skip_wildcard_refresh = False):
|
||||
# Write yaml extension wildcards to umi_tags.txt and wc_yaml.json if found
|
||||
get_yaml_wildcards()
|
||||
|
||||
if HYP_PATH.exists():
|
||||
if HYP_PATH is not None and HYP_PATH.exists():
|
||||
hypernets = get_hypernetworks()
|
||||
if hypernets:
|
||||
write_to_temp_file('hyp.txt', hypernets)
|
||||
@@ -541,6 +597,7 @@ def on_ui_settings():
|
||||
"tac_wildcardExclusionList": shared.OptionInfo("", "Wildcard folder exclusion list").info("Add folder names that shouldn't be searched for wildcards, separated by comma.").needs_restart(),
|
||||
"tac_skipWildcardRefresh": shared.OptionInfo(False, "Don't re-scan for wildcard files when pressing the extra networks refresh button").info("Useful to prevent hanging if you use a very large wildcard collection."),
|
||||
"tac_useEmbeddings": shared.OptionInfo(True, "Search for embeddings"),
|
||||
"tac_forceRefreshEmbeddings": shared.OptionInfo(False, "Force refresh embeddings when pressing the extra networks refresh button").info("Turn this on if you have issues with new embeddings not registering correctly in TAC. Warning: Seems to cause reloading issues in gradio for some users."),
|
||||
"tac_includeEmbeddingsInNormalResults": shared.OptionInfo(False, "Include embeddings in normal tag results").info("The 'JumpTo...' keybinds (End & Home key by default) will select the first non-embedding result of their direction on the first press for quick navigation in longer lists."),
|
||||
"tac_useHypernetworks": shared.OptionInfo(True, "Search for hypernetworks"),
|
||||
"tac_useLoras": shared.OptionInfo(True, "Search for Loras"),
|
||||
@@ -559,6 +616,7 @@ def on_ui_settings():
|
||||
"tac_frequencyIncludeAlias": shared.OptionInfo(False, "Frequency sorting matches aliases for frequent tags").info("Tag frequency will be increased for the main tag even if an alias is used for completion. This option can be used to override the default behavior of alias results being ignored for frequency sorting."),
|
||||
# Insertion related settings
|
||||
"tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"),
|
||||
"tac_undersocreReplacementExclusionList": shared.OptionInfo("0_0,(o)_(o),+_+,+_-,._.,<o>_<o>,<|>_<|>,=_=,>_<,3_3,6_9,>_o,@_@,^_^,o_o,u_u,x_x,|_|,||_||", "Underscore replacement exclusion list").info("Add tags that shouldn't have underscores replaced with spaces, separated by comma."),
|
||||
"tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"),
|
||||
"tac_appendComma": shared.OptionInfo(True, "Append comma on tag autocompletion"),
|
||||
"tac_appendSpace": shared.OptionInfo(True, "Append space on tag autocompletion").info("will append after comma if the above is enabled"),
|
||||
@@ -635,6 +693,23 @@ def on_ui_settings():
|
||||
"9": ["#df3647", "#8e1c2b"],
|
||||
"10": ["#c98f2b", "#7b470e"],
|
||||
"11": ["#e87ebe", "#a83583"]
|
||||
},
|
||||
"danbooru_e621_merged": {
|
||||
"-1": ["red", "maroon"],
|
||||
"0": ["lightblue", "dodgerblue"],
|
||||
"1": ["indianred", "firebrick"],
|
||||
"3": ["violet", "darkorchid"],
|
||||
"4": ["lightgreen", "darkgreen"],
|
||||
"5": ["orange", "darkorange"],
|
||||
"6": ["red", "maroon"],
|
||||
"7": ["lightblue", "dodgerblue"],
|
||||
"8": ["gold", "goldenrod"],
|
||||
"9": ["gold", "goldenrod"],
|
||||
"10": ["violet", "darkorchid"],
|
||||
"11": ["lightgreen", "darkgreen"],
|
||||
"12": ["tomato", "darksalmon"],
|
||||
"14": ["whitesmoke", "black"],
|
||||
"15": ["seagreen", "darkseagreen"]
|
||||
}
|
||||
}\
|
||||
"""
|
||||
@@ -673,7 +748,7 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
json_candidates = glob.glob(base_path.as_posix() + f"/**/{filename}.json", recursive=True)
|
||||
json_candidates = glob.glob(base_path.as_posix() + f"/**/{glob.escape(filename)}.json", recursive=True)
|
||||
if json_candidates is not None and len(json_candidates) > 0 and Path(json_candidates[0]).is_file():
|
||||
return FileResponse(json_candidates[0])
|
||||
except Exception as e:
|
||||
@@ -684,7 +759,7 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
img_glob = glob.glob(base_path.as_posix() + f"/**/{filename}.*", recursive=True)
|
||||
img_glob = glob.glob(base_path.as_posix() + f"/**/{glob.escape(filename)}.*", recursive=True)
|
||||
img_candidates = [img for img in img_glob if Path(img).suffix in [".png", ".jpg", ".jpeg", ".webp", ".gif"] and Path(img).is_file()]
|
||||
if img_candidates is not None and len(img_candidates) > 0:
|
||||
if blob:
|
||||
@@ -696,6 +771,7 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
|
||||
@app.post("/tacapi/v1/refresh-temp-files")
|
||||
async def api_refresh_temp_files():
|
||||
await sleep(0) # might help with refresh blocking gradio
|
||||
refresh_temp_files()
|
||||
|
||||
@app.post("/tacapi/v1/refresh-embeddings")
|
||||
@@ -712,7 +788,7 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
|
||||
@app.get("/tacapi/v1/lora-cached-hash/{lora_name}")
|
||||
async def get_lora_cached_hash(lora_name: str):
|
||||
path_glob = glob.glob(LORA_PATH.as_posix() + f"/**/{lora_name}.*", recursive=True)
|
||||
path_glob = glob.glob(LORA_PATH.as_posix() + f"/**/{glob.escape(lora_name)}.*", recursive=True)
|
||||
paths = [lora for lora in path_glob if Path(lora).suffix in [".safetensors", ".ckpt", ".pt"] and Path(lora).is_file()]
|
||||
if paths is not None and len(paths) > 0:
|
||||
path = paths[0]
|
||||
@@ -831,5 +907,5 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
@app.get("/tacapi/v1/get-all-use-counts")
|
||||
async def get_all_tag_counts():
|
||||
return db_request(lambda: db.get_all_tags(), get=True)
|
||||
|
||||
|
||||
script_callbacks.on_app_started(api_tac)
|
||||
|
||||
238328
tags/danbooru.csv
238328
tags/danbooru.csv
File diff suppressed because it is too large
Load Diff
221787
tags/danbooru_e621_merged.csv
Normal file
221787
tags/danbooru_e621_merged.csv
Normal file
File diff suppressed because one or more lines are too long
126978
tags/derpibooru.csv
126978
tags/derpibooru.csv
File diff suppressed because it is too large
Load Diff
200358
tags/e621.csv
200358
tags/e621.csv
File diff suppressed because one or more lines are too long
160178
tags/noob_characters-chants.json
Normal file
160178
tags/noob_characters-chants.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user