mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-27 03:29:55 +00:00
Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7fdad1bf62 | ||
|
|
a91a098243 | ||
|
|
c663abcbcb | ||
|
|
bec222f2b3 | ||
|
|
d4db6a7907 | ||
|
|
52593e6ac8 | ||
|
|
849e346924 | ||
|
|
25b285bea3 | ||
|
|
984a7e772a | ||
|
|
964b4fcff3 | ||
|
|
54641ddbfc | ||
|
|
c048684909 | ||
|
|
da9acfea2a | ||
|
|
552c6517b8 | ||
|
|
f626eb3467 | ||
|
|
2ba513bedc | ||
|
|
89d36da47e | ||
|
|
5f2f746310 | ||
|
|
454c13ef6d | ||
|
|
6deefda279 | ||
|
|
b57042edd0 | ||
|
|
ceba61163e | ||
|
|
16201605d0 | ||
|
|
0c3397aee6 | ||
|
|
4f582f4528 | ||
|
|
d2b5142d7d | ||
|
|
f11abe60c2 | ||
|
|
16bf9d9a51 | ||
|
|
bdd8cf68c7 | ||
|
|
63a0d2e73e | ||
|
|
34ba08d804 | ||
|
|
f1a437ff48 | ||
|
|
97cbada882 | ||
|
|
860a4034bb | ||
|
|
255d7420fd | ||
|
|
6b34d8ccd1 | ||
|
|
b35ee10f8e | ||
|
|
fc8540589a | ||
|
|
3d1ca6893a |
@@ -142,6 +142,9 @@ Count in the extra file is optional, since there isn't always a post count for c
|
||||
The extra files can also be used to just add new / custom tags not included in the main set, provided `onlyAliasExtraFile` is false.
|
||||
If an extra tag doesn't match any existing tag, it will be added to the list as a new tag instead. For this, it will need to include the post count and alias columns even if they don't contain anything, so it could be in the form of `tag,type,,`.
|
||||
|
||||
##### WARNING
|
||||
Do not use e621.csv or danbooru.csv as an extra file. Alias comparison has exponential runtime, so for the combination of danbooru+e621, it will need to do 10,000,000,000 (yes, ten billion) lookups and usually take multiple minutes to load.
|
||||
|
||||
## CSV tag data
|
||||
The script expects a CSV file with tags saved in the following way:
|
||||
```csv
|
||||
|
||||
29
javascript/_result.js
Normal file
29
javascript/_result.js
Normal file
@@ -0,0 +1,29 @@
|
||||
// Result data type for cleaner use of optional completion result properties
|
||||
|
||||
// Type enum
|
||||
const ResultType = Object.freeze({
|
||||
"tag": 1,
|
||||
"embedding": 2,
|
||||
"wildcardTag": 3,
|
||||
"wildcardFile": 4,
|
||||
"yamlWildcard": 5
|
||||
});
|
||||
|
||||
// Class to hold result data and annotations to make it clearer to use
|
||||
class AutocompleteResult {
|
||||
// Main properties
|
||||
text = "";
|
||||
type = ResultType.tag;
|
||||
|
||||
// Additional info, only used in some cases
|
||||
category = null;
|
||||
count = null;
|
||||
aliases = null;
|
||||
meta = null;
|
||||
|
||||
// Constructor
|
||||
constructor(text, type) {
|
||||
this.text = text;
|
||||
this.type = type;
|
||||
}
|
||||
}
|
||||
@@ -35,8 +35,8 @@ function getTextAreas() {
|
||||
} else { // Otherwise, we have to find the text areas by their adjacent labels
|
||||
let base = gradioApp().querySelector(entry.base);
|
||||
|
||||
// Safety check
|
||||
if (!base) continue;
|
||||
// Safety check
|
||||
if (!base) continue;
|
||||
|
||||
let allTextAreas = [...base.querySelectorAll("textarea")];
|
||||
|
||||
|
||||
96
javascript/_utils.js
Normal file
96
javascript/_utils.js
Normal file
@@ -0,0 +1,96 @@
|
||||
// Utility functions for tag autocomplete
|
||||
|
||||
// Parse the CSV file into a 2D array. Doesn't use regex, so it is very lightweight.
|
||||
function parseCSV(str) {
|
||||
var arr = [];
|
||||
var quote = false; // 'true' means we're inside a quoted field
|
||||
|
||||
// Iterate over each character, keep track of current row and column (of the returned array)
|
||||
for (var row = 0, col = 0, c = 0; c < str.length; c++) {
|
||||
var cc = str[c], nc = str[c + 1]; // Current character, next character
|
||||
arr[row] = arr[row] || []; // Create a new row if necessary
|
||||
arr[row][col] = arr[row][col] || ''; // Create a new column (start with empty string) if necessary
|
||||
|
||||
// If the current character is a quotation mark, and we're inside a
|
||||
// quoted field, and the next character is also a quotation mark,
|
||||
// add a quotation mark to the current column and skip the next character
|
||||
if (cc == '"' && quote && nc == '"') { arr[row][col] += cc; ++c; continue; }
|
||||
|
||||
// If it's just one quotation mark, begin/end quoted field
|
||||
if (cc == '"') { quote = !quote; continue; }
|
||||
|
||||
// If it's a comma and we're not in a quoted field, move on to the next column
|
||||
if (cc == ',' && !quote) { ++col; continue; }
|
||||
|
||||
// If it's a newline (CRLF) and we're not in a quoted field, skip the next character
|
||||
// and move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\r' && nc == '\n' && !quote) { ++row; col = 0; ++c; continue; }
|
||||
|
||||
// If it's a newline (LF or CR) and we're not in a quoted field,
|
||||
// move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\n' && !quote) { ++row; col = 0; continue; }
|
||||
if (cc == '\r' && !quote) { ++row; col = 0; continue; }
|
||||
|
||||
// Otherwise, append the current character to the current column
|
||||
arr[row][col] += cc;
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
|
||||
// Load file
|
||||
async function readFile(filePath, json = false) {
|
||||
let response = await fetch(`file=${filePath}`);
|
||||
|
||||
if (response.status != 200) {
|
||||
console.error(`Error loading file "${filePath}": ` + response.status, response.statusText);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (json)
|
||||
return await response.json();
|
||||
else
|
||||
return await response.text();
|
||||
}
|
||||
|
||||
// Load CSV
|
||||
async function loadCSV(path) {
|
||||
let text = await readFile(path);
|
||||
return parseCSV(text);
|
||||
}
|
||||
|
||||
// Debounce function to prevent spamming the autocomplete function
|
||||
var dbTimeOut;
|
||||
const debounce = (func, wait = 300) => {
|
||||
return function (...args) {
|
||||
if (dbTimeOut) {
|
||||
clearTimeout(dbTimeOut);
|
||||
}
|
||||
|
||||
dbTimeOut = setTimeout(() => {
|
||||
func.apply(this, args);
|
||||
}, wait);
|
||||
}
|
||||
}
|
||||
|
||||
// Difference function to fix duplicates not being seen as changes in normal filter
|
||||
function difference(a, b) {
|
||||
if (a.length == 0) {
|
||||
return b;
|
||||
}
|
||||
if (b.length == 0) {
|
||||
return a;
|
||||
}
|
||||
|
||||
return [...b.reduce((acc, v) => acc.set(v, (acc.get(v) || 0) - 1),
|
||||
a.reduce((acc, v) => acc.set(v, (acc.get(v) || 0) + 1), new Map())
|
||||
)].reduce((acc, [v, count]) => acc.concat(Array(Math.abs(count)).fill(v)), []);
|
||||
}
|
||||
|
||||
function escapeRegExp(string) {
|
||||
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
|
||||
}
|
||||
function escapeHTML(unsafeText) {
|
||||
let div = document.createElement('div');
|
||||
div.textContent = unsafeText;
|
||||
return div.innerHTML;
|
||||
}
|
||||
@@ -7,7 +7,9 @@ const styleColors = {
|
||||
"--results-bg-odd": ["#111827", "#f9fafb"],
|
||||
"--results-hover": ["#1f2937", "#f5f6f8"],
|
||||
"--results-selected": ["#374151", "#e5e7eb"],
|
||||
"--post-count-color": ["#6b6f7b", "#a2a9b4"]
|
||||
"--meta-text-color": ["#6b6f7b", "#a2a9b4"],
|
||||
"--embedding-v1-color": ["lightsteelblue", "#2b5797"],
|
||||
"--embedding-v2-color": ["skyblue", "#2d89ef"],
|
||||
}
|
||||
const browserVars = {
|
||||
"--results-overflow-y": {
|
||||
@@ -21,7 +23,9 @@ const autocompleteCSS = `
|
||||
background-color: transparent;
|
||||
min-width: fit-content;
|
||||
align-self: center;
|
||||
margin: 0 5px;
|
||||
}
|
||||
#quicksettings [id^=setting_tac] > label > span {
|
||||
margin-bottom: 0px;
|
||||
}
|
||||
[id^=refresh_tac] {
|
||||
max-width: 2.5em;
|
||||
@@ -59,80 +63,35 @@ const autocompleteCSS = `
|
||||
overflow: hidden;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.acPostCount {
|
||||
.acMetaText {
|
||||
position: relative;
|
||||
text-align: end;
|
||||
padding: 0 0 0 15px;
|
||||
flex-grow: 1;
|
||||
color: var(--post-count-color);
|
||||
color: var(--meta-text-color);
|
||||
}
|
||||
.acWikiLink {
|
||||
padding: 0.5rem;
|
||||
margin: -0.5rem 0 -0.5rem -0.5rem;
|
||||
}
|
||||
.acWikiLink:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
.acListItem.acEmbeddingV1 {
|
||||
color: var(--embedding-v1-color);
|
||||
}
|
||||
.acListItem.acEmbeddingV2 {
|
||||
color: var(--embedding-v2-color);
|
||||
}
|
||||
`;
|
||||
|
||||
// Parse the CSV file into a 2D array. Doesn't use regex, so it is very lightweight.
|
||||
function parseCSV(str) {
|
||||
var arr = [];
|
||||
var quote = false; // 'true' means we're inside a quoted field
|
||||
|
||||
// Iterate over each character, keep track of current row and column (of the returned array)
|
||||
for (var row = 0, col = 0, c = 0; c < str.length; c++) {
|
||||
var cc = str[c], nc = str[c + 1]; // Current character, next character
|
||||
arr[row] = arr[row] || []; // Create a new row if necessary
|
||||
arr[row][col] = arr[row][col] || ''; // Create a new column (start with empty string) if necessary
|
||||
|
||||
// If the current character is a quotation mark, and we're inside a
|
||||
// quoted field, and the next character is also a quotation mark,
|
||||
// add a quotation mark to the current column and skip the next character
|
||||
if (cc == '"' && quote && nc == '"') { arr[row][col] += cc; ++c; continue; }
|
||||
|
||||
// If it's just one quotation mark, begin/end quoted field
|
||||
if (cc == '"') { quote = !quote; continue; }
|
||||
|
||||
// If it's a comma and we're not in a quoted field, move on to the next column
|
||||
if (cc == ',' && !quote) { ++col; continue; }
|
||||
|
||||
// If it's a newline (CRLF) and we're not in a quoted field, skip the next character
|
||||
// and move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\r' && nc == '\n' && !quote) { ++row; col = 0; ++c; continue; }
|
||||
|
||||
// If it's a newline (LF or CR) and we're not in a quoted field,
|
||||
// move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\n' && !quote) { ++row; col = 0; continue; }
|
||||
if (cc == '\r' && !quote) { ++row; col = 0; continue; }
|
||||
|
||||
// Otherwise, append the current character to the current column
|
||||
arr[row][col] += cc;
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
|
||||
// Load file
|
||||
async function readFile(filePath, json = false) {
|
||||
let response = await fetch(`file=${filePath}`);
|
||||
|
||||
if (response.status != 200) {
|
||||
console.error(`Error loading file "${filePath}": ` + response.status, response.statusText);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (json)
|
||||
return await response.json();
|
||||
else
|
||||
return await response.text();
|
||||
}
|
||||
|
||||
// Load CSV
|
||||
async function loadCSV(path) {
|
||||
let text = await readFile(path);
|
||||
return parseCSV(text);
|
||||
}
|
||||
|
||||
var tagBasePath = "";
|
||||
var allTags = [];
|
||||
var translations = new Map();
|
||||
|
||||
async function loadTags(c) {
|
||||
// Load main tags and aliases
|
||||
if (allTags.length === 0) {
|
||||
if (allTags.length === 0 && c.tagFile && c.tagFile !== "None") {
|
||||
try {
|
||||
allTags = await loadCSV(`${tagBasePath}/${c.tagFile}?${new Date().getTime()}`);
|
||||
} catch (e) {
|
||||
@@ -206,7 +165,9 @@ async function syncOptions() {
|
||||
txt2img: opts["tac_activeIn.txt2img"],
|
||||
img2img: opts["tac_activeIn.img2img"],
|
||||
negativePrompts: opts["tac_activeIn.negativePrompts"],
|
||||
thirdParty: opts["tac_activeIn.thirdParty"]
|
||||
thirdParty: opts["tac_activeIn.thirdParty"],
|
||||
modelList: opts["tac_activeIn.modelList"],
|
||||
modelListMode: opts["tac_activeIn.modelListMode"]
|
||||
},
|
||||
// Results related settings
|
||||
maxResults: opts["tac_maxResults"],
|
||||
@@ -215,6 +176,7 @@ async function syncOptions() {
|
||||
delayTime: opts["tac_delayTime"],
|
||||
useWildcards: opts["tac_useWildcards"],
|
||||
useEmbeddings: opts["tac_useEmbeddings"],
|
||||
showWikiLinks: opts["tac_showWikiLinks"],
|
||||
// Insertion related settings
|
||||
replaceUnderscores: opts["tac_replaceUnderscores"],
|
||||
escapeParentheses: opts["tac_escapeParentheses"],
|
||||
@@ -266,34 +228,6 @@ async function syncOptions() {
|
||||
CFG = newCFG;
|
||||
}
|
||||
|
||||
// Debounce function to prevent spamming the autocomplete function
|
||||
var dbTimeOut;
|
||||
const debounce = (func, wait = 300) => {
|
||||
return function (...args) {
|
||||
if (dbTimeOut) {
|
||||
clearTimeout(dbTimeOut);
|
||||
}
|
||||
|
||||
dbTimeOut = setTimeout(() => {
|
||||
func.apply(this, args);
|
||||
}, wait);
|
||||
}
|
||||
}
|
||||
|
||||
// Difference function to fix duplicates not being seen as changes in normal filter
|
||||
function difference(a, b) {
|
||||
if (a.length == 0) {
|
||||
return b;
|
||||
}
|
||||
if (b.length == 0) {
|
||||
return a;
|
||||
}
|
||||
|
||||
return [...b.reduce((acc, v) => acc.set(v, (acc.get(v) || 0) - 1),
|
||||
a.reduce((acc, v) => acc.set(v, (acc.get(v) || 0) + 1), new Map())
|
||||
)].reduce((acc, [v, count]) => acc.concat(Array(Math.abs(count)).fill(v)), []);
|
||||
}
|
||||
|
||||
// Create the result list div and necessary styling
|
||||
function createResultsDiv(textArea) {
|
||||
let resultsDiv = document.createElement("div");
|
||||
@@ -332,34 +266,54 @@ function hideResults(textArea) {
|
||||
selectedTag = null;
|
||||
}
|
||||
|
||||
function escapeRegExp(string) {
|
||||
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
|
||||
}
|
||||
function escapeHTML(unsafeText) {
|
||||
let div = document.createElement('div');
|
||||
div.textContent = unsafeText;
|
||||
return div.innerHTML;
|
||||
var currentModelHash = "";
|
||||
var currentModelName = "";
|
||||
// Function to check activation criteria
|
||||
function isEnabled() {
|
||||
if (CFG.activeIn.global) {
|
||||
let modelList = CFG.activeIn.modelList
|
||||
.split(",")
|
||||
.map(x => x.trim())
|
||||
.filter(x => x.length > 0);
|
||||
|
||||
let shortHash = currentModelHash.substring(0, 10);
|
||||
if (CFG.activeIn.modelListMode.toLowerCase() === "blacklist") {
|
||||
// If the current model is in the blacklist, disable
|
||||
return modelList.filter(x => x === currentModelName || x === currentModelHash || x === shortHash).length === 0;
|
||||
} else {
|
||||
// If the current model is in the whitelist, enable.
|
||||
// An empty whitelist is ignored.
|
||||
return modelList.length === 0 || modelList.filter(x => x === currentModelName || x === currentModelHash || x === shortHash).length > 0;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const WEIGHT_REGEX = /[([]([^,()[\]:| ]+)(?::(?:\d+(?:\.\d+)?|\.\d+))?[)\]]/g;
|
||||
const TAG_REGEX = /([^\s,|]+)/g
|
||||
const TAG_REGEX = /(<[^\t\n\r,>]+>?|[^\s,|<>]+|<)/g
|
||||
const WC_REGEX = /\b__([^, ]+)__([^, ]*)\b/g;
|
||||
const UMI_PROMPT_REGEX = /<[^\s]*?\[[^,<>]*[\]|]?>?/gi;
|
||||
const UMI_TAG_REGEX = /(?:\[|\||--)([^<>\[\]\-|]+)/gi;
|
||||
let hideBlocked = false;
|
||||
|
||||
// On click, insert the tag into the prompt textbox with respect to the cursor position
|
||||
function insertTextAtCursor(textArea, result, tagword) {
|
||||
let text = result[0];
|
||||
let tagType = result[1];
|
||||
let text = result.text;
|
||||
let tagType = result.type;
|
||||
|
||||
let cursorPos = textArea.selectionStart;
|
||||
var sanitizedText = text
|
||||
|
||||
// Replace differently depending on if it's a tag or wildcard
|
||||
if (tagType === "wildcardFile") {
|
||||
if (tagType === ResultType.wildcardFile) {
|
||||
sanitizedText = "__" + text.replace("Wildcards: ", "") + "__";
|
||||
} else if (tagType === "wildcardTag") {
|
||||
} else if (tagType === ResultType.wildcardTag) {
|
||||
sanitizedText = text.replace(/^.*?: /g, "");
|
||||
} else if (tagType === "embedding") {
|
||||
sanitizedText = `<${text.replace(/^.*?: /g, "")}>`;
|
||||
} else if (tagType === ResultType.yamlWildcard && !yamlWildcards.includes(text)) {
|
||||
sanitizedText = text.replaceAll("_", " "); // Replace underscores only if the yaml tag is not using them
|
||||
} else if (tagType === ResultType.embedding) {
|
||||
sanitizedText = `${text.replace(/^.*?: /g, "")}`;
|
||||
} else {
|
||||
sanitizedText = CFG.replaceUnderscores ? text.replaceAll("_", " ") : text;
|
||||
}
|
||||
@@ -382,7 +336,7 @@ function insertTextAtCursor(textArea, result, tagword) {
|
||||
let afterInsertCursorPos = editStart + match.index + sanitizedText.length;
|
||||
|
||||
var optionalComma = "";
|
||||
if (CFG.appendComma && tagType !== "wildcardFile") {
|
||||
if (CFG.appendComma && ![ResultType.wildcardFile, ResultType.yamlWildcard].includes(tagType)) {
|
||||
optionalComma = surrounding.match(new RegExp(`${escapeRegExp(tagword)}[,:]`, "i")) !== null ? "" : ", ";
|
||||
}
|
||||
|
||||
@@ -409,8 +363,22 @@ function insertTextAtCursor(textArea, result, tagword) {
|
||||
}
|
||||
previousTags = tags;
|
||||
|
||||
// If it was a yaml wildcard, also update the umiPreviousTags
|
||||
if (tagType === ResultType.yamlWildcard && originalTagword.length > 0) {
|
||||
let umiSubPrompts = [...newPrompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
let umiTags = [];
|
||||
umiSubPrompts.forEach(umiSubPrompt => {
|
||||
umiTags = umiTags.concat([...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map(x => x[1].toLowerCase()));
|
||||
});
|
||||
|
||||
umiPreviousTags = umiTags;
|
||||
|
||||
hideResults(textArea);
|
||||
}
|
||||
|
||||
// Hide results after inserting
|
||||
if (tagType === "wildcardFile") {
|
||||
if (tagType === ResultType.wildcardFile) {
|
||||
// If it's a wildcard, we want to keep the results open so the user can select another wildcard
|
||||
hideBlocked = true;
|
||||
autocomplete(textArea, prompt, sanitizedText);
|
||||
@@ -449,17 +417,16 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
|
||||
let itemText = document.createElement("div");
|
||||
itemText.classList.add("acListItem");
|
||||
flexDiv.appendChild(itemText);
|
||||
|
||||
let displayText = "";
|
||||
// If the tag matches the tagword, we don't need to display the alias
|
||||
if (result[3] && !result[0].includes(tagword)) { // Alias
|
||||
let splitAliases = result[3].split(",");
|
||||
if (result.aliases && !result.text.includes(tagword)) { // Alias
|
||||
let splitAliases = result.aliases.split(",");
|
||||
let bestAlias = splitAliases.find(a => a.toLowerCase().includes(tagword));
|
||||
|
||||
// search in translations if no alias matches
|
||||
if (!bestAlias) {
|
||||
let tagOrAlias = pair => pair[0] === result[0] || result[3].split(",").includes(pair[0]);
|
||||
let tagOrAlias = pair => pair[0] === result.text || splitAliases.includes(pair[0]);
|
||||
var tArray = [...translations];
|
||||
if (tArray) {
|
||||
var translationKey = [...translations].find(pair => tagOrAlias(pair) && pair[1].includes(tagword));
|
||||
@@ -471,41 +438,70 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
displayText = escapeHTML(bestAlias);
|
||||
|
||||
// Append translation for alias if it exists and is not what the user typed
|
||||
if (translations.has(bestAlias) && translations.get(bestAlias) !== bestAlias && bestAlias !== result[0])
|
||||
if (translations.has(bestAlias) && translations.get(bestAlias) !== bestAlias && bestAlias !== result.text)
|
||||
displayText += `[${translations.get(bestAlias)}]`;
|
||||
|
||||
if (!CFG.alias.onlyShowAlias && result[0] !== bestAlias)
|
||||
displayText += " ➝ " + result[0];
|
||||
if (!CFG.alias.onlyShowAlias && result.text !== bestAlias)
|
||||
displayText += " ➝ " + result.text;
|
||||
} else { // No alias
|
||||
displayText = escapeHTML(result[0]);
|
||||
displayText = escapeHTML(result.text);
|
||||
}
|
||||
|
||||
// Append translation for result if it exists
|
||||
if (translations.has(result[0]))
|
||||
displayText += `[${translations.get(result[0])}]`;
|
||||
if (translations.has(result.text))
|
||||
displayText += `[${translations.get(result.text)}]`;
|
||||
|
||||
// Print search term bolded in result
|
||||
itemText.innerHTML = displayText.replace(tagword, `<b>${tagword}</b>`);
|
||||
|
||||
// Add wiki link if the setting is enabled and a supported tag set loaded
|
||||
if (CFG.showWikiLinks
|
||||
&& (result.type === ResultType.tag)
|
||||
&& (tagFileName.toLowerCase().startsWith("danbooru") || tagFileName.toLowerCase().startsWith("e621"))) {
|
||||
let wikiLink = document.createElement("a");
|
||||
wikiLink.classList.add("acWikiLink");
|
||||
wikiLink.innerText = "?";
|
||||
|
||||
let linkPart = displayText;
|
||||
// Only use alias result if it is one
|
||||
if (displayText.includes("➝"))
|
||||
linkPart = displayText.split(" ➝ ")[1];
|
||||
|
||||
// Set link based on selected file
|
||||
let tagFileNameLower = tagFileName.toLowerCase();
|
||||
if (tagFileNameLower.startsWith("danbooru")) {
|
||||
wikiLink.href = `https://danbooru.donmai.us/wiki_pages/${linkPart}`;
|
||||
} else if (tagFileNameLower.startsWith("e621")) {
|
||||
wikiLink.href = `https://e621.net/wiki_pages/${linkPart}`;
|
||||
}
|
||||
|
||||
wikiLink.target = "_blank";
|
||||
flexDiv.appendChild(wikiLink);
|
||||
}
|
||||
|
||||
flexDiv.appendChild(itemText);
|
||||
|
||||
// Add post count & color if it's a tag
|
||||
// Wildcards & Embeds have no tag type
|
||||
if (!result[1].startsWith("wildcard") && result[1] !== "embedding") {
|
||||
// Set the color of the tag
|
||||
let tagType = result[1];
|
||||
let colorGroup = tagColors[tagFileName];
|
||||
// Default to danbooru scheme if no matching one is found
|
||||
if (!colorGroup)
|
||||
colorGroup = tagColors["danbooru"];
|
||||
// Wildcards & Embeds have no tag category
|
||||
if (![ResultType.wildcardFile, ResultType.wildcardTag, ResultType.embedding].includes(result.type)) {
|
||||
if (result.category) {
|
||||
// Set the color of the tag
|
||||
let cat = result.category;
|
||||
let colorGroup = tagColors[tagFileName];
|
||||
// Default to danbooru scheme if no matching one is found
|
||||
if (!colorGroup)
|
||||
colorGroup = tagColors["danbooru"];
|
||||
|
||||
// Set tag type to invalid if not found
|
||||
if (!colorGroup[tagType])
|
||||
tagType = "-1";
|
||||
// Set tag type to invalid if not found
|
||||
if (!colorGroup[cat])
|
||||
cat = "-1";
|
||||
|
||||
itemText.style = `color: ${colorGroup[tagType][mode]};`;
|
||||
flexDiv.style = `color: ${colorGroup[cat][mode]};`;
|
||||
}
|
||||
|
||||
// Post count
|
||||
if (result[2] && !isNaN(result[2])) {
|
||||
let postCount = result[2];
|
||||
if (result.count && !isNaN(result.count)) {
|
||||
let postCount = result.count;
|
||||
let formatter;
|
||||
|
||||
// Danbooru formats numbers with a padded fraction for 1M or 1k, but not for 10/100k
|
||||
@@ -518,9 +514,23 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
|
||||
let countDiv = document.createElement("div");
|
||||
countDiv.textContent = formattedCount;
|
||||
countDiv.classList.add("acPostCount");
|
||||
countDiv.classList.add("acMetaText");
|
||||
flexDiv.appendChild(countDiv);
|
||||
}
|
||||
} else if (result.meta) { // Check if it is an embedding we have version info for
|
||||
let metaDiv = document.createElement("div");
|
||||
metaDiv.textContent = result.meta;
|
||||
metaDiv.classList.add("acMetaText");
|
||||
|
||||
// Add version info classes if it is an embedding
|
||||
if (result.type === ResultType.embedding) {
|
||||
if (result.meta.startsWith("v1"))
|
||||
itemText.classList.add("acEmbeddingV1");
|
||||
else if (result.meta.startsWith("v2"))
|
||||
itemText.classList.add("acEmbeddingV2");
|
||||
}
|
||||
|
||||
flexDiv.appendChild(metaDiv);
|
||||
}
|
||||
|
||||
// Add listener
|
||||
@@ -555,17 +565,22 @@ function updateSelectionStyle(textArea, newIndex, oldIndex) {
|
||||
|
||||
var wildcardFiles = [];
|
||||
var wildcardExtFiles = [];
|
||||
var yamlWildcards = [];
|
||||
var umiPreviousTags = [];
|
||||
var embeddings = [];
|
||||
var results = [];
|
||||
var tagword = "";
|
||||
var originalTagword = "";
|
||||
var resultCount = 0;
|
||||
async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
// Return if the function is deactivated in the UI
|
||||
if (!CFG.activeIn.global) return;
|
||||
if (!isEnabled()) return;
|
||||
|
||||
// Guard for empty prompt
|
||||
if (prompt.length === 0) {
|
||||
hideResults(textArea);
|
||||
previousTags = [];
|
||||
tagword = "";
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -575,11 +590,19 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
let weightedTags = [...prompt.matchAll(WEIGHT_REGEX)]
|
||||
.map(match => match[1]);
|
||||
let tags = prompt.match(TAG_REGEX)
|
||||
if (weightedTags !== null) {
|
||||
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted)))
|
||||
if (weightedTags !== null && tags !== null) {
|
||||
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted) && !tag.startsWith("<[")))
|
||||
.concat(weightedTags);
|
||||
}
|
||||
|
||||
// Guard for no tags
|
||||
if (!tags || tags.length === 0) {
|
||||
previousTags = [];
|
||||
tagword = "";
|
||||
hideResults(textArea);
|
||||
return;
|
||||
}
|
||||
|
||||
let tagCountChange = tags.length - previousTags.length;
|
||||
let diff = difference(tags, previousTags);
|
||||
previousTags = tags;
|
||||
@@ -601,11 +624,12 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
tagword = fixedTag;
|
||||
}
|
||||
|
||||
results = [];
|
||||
tagword = tagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
|
||||
if (CFG.useWildcards && [...tagword.matchAll(/\b__([^, ]+)__([^, ]*)\b/g)].length > 0) {
|
||||
if (CFG.useWildcards && [...tagword.matchAll(WC_REGEX)].length > 0) {
|
||||
// Show wildcards from a file with that name
|
||||
wcMatch = [...tagword.matchAll(/\b__([^, ]+)__([^, ]*)\b/g)]
|
||||
wcMatch = [...tagword.matchAll(WC_REGEX)]
|
||||
let wcFile = wcMatch[0][1];
|
||||
let wcWord = wcMatch[0][2];
|
||||
|
||||
@@ -620,8 +644,13 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
let wildcards = (await readFile(`${wcPair[0]}/${wcPair[1]}.txt?${new Date().getTime()}`)).split("\n")
|
||||
.filter(x => x.trim().length > 0 && !x.startsWith('#')); // Remove empty lines and comments
|
||||
|
||||
results = wildcards.filter(x => (wcWord !== null && wcWord.length > 0) ? x.toLowerCase().includes(wcWord) : x) // Filter by tagword
|
||||
.map(x => [wcFile + ": " + x.trim(), "wildcardTag"]); // Mark as wildcard
|
||||
|
||||
let tempResults = wildcards.filter(x => (wcWord !== null && wcWord.length > 0) ? x.toLowerCase().includes(wcWord) : x) // Filter by tagword
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.wildcardTag);
|
||||
result.meta = wcFile;
|
||||
results.push(result);
|
||||
});
|
||||
} else if (CFG.useWildcards && (tagword.startsWith("__") && !tagword.endsWith("__") || tagword === "__")) {
|
||||
// Show available wildcard files
|
||||
let tempResults = [];
|
||||
@@ -631,12 +660,191 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
} else {
|
||||
tempResults = wildcardFiles.concat(wildcardExtFiles);
|
||||
}
|
||||
results = tempResults.map(x => ["Wildcards: " + x[1].trim(), "wildcardFile"]); // Mark as wildcard
|
||||
|
||||
// Add final results
|
||||
tempResults.forEach(wcFile => {
|
||||
let result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile);
|
||||
result.meta = "Wildcard file";
|
||||
results.push(result);
|
||||
})
|
||||
} else if (CFG.useWildcards && [...tagword.matchAll(UMI_PROMPT_REGEX)].length > 0) {
|
||||
// We are in a UMI yaml tag definition, parse further
|
||||
let umiSubPrompts = [...prompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
let umiTags = [];
|
||||
let umiTagsWithOperators = []
|
||||
|
||||
const insertAt = (str,char,pos) => str.slice(0,pos) + char + str.slice(pos);
|
||||
|
||||
umiSubPrompts.forEach(umiSubPrompt => {
|
||||
umiTags = umiTags.concat([...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map(x => x[1].toLowerCase()));
|
||||
|
||||
const start = umiSubPrompt.index;
|
||||
const end = umiSubPrompt.index + umiSubPrompt[0].length;
|
||||
if (textArea.selectionStart >= start && textArea.selectionStart <= end) {
|
||||
umiTagsWithOperators = insertAt(umiSubPrompt[0], '###', textArea.selectionStart - start);
|
||||
}
|
||||
});
|
||||
|
||||
const promptSplitToTags = umiTagsWithOperators.replace(']###[', '][').split("][");
|
||||
|
||||
const clean = (str) => str
|
||||
.replaceAll('>', '')
|
||||
.replaceAll('<', '')
|
||||
.replaceAll('[', '')
|
||||
.replaceAll(']', '')
|
||||
.trim();
|
||||
|
||||
const matches = promptSplitToTags.reduce((acc, curr) => {
|
||||
isOptional = curr.includes("|");
|
||||
isNegative = curr.startsWith("--");
|
||||
let out;
|
||||
if (isOptional) {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).split('|').map(x => ({
|
||||
hasCursor: x.includes("###"),
|
||||
isNegative: x.startsWith("--"),
|
||||
tag: clean(x).replaceAll("###", '').replaceAll("--", '')
|
||||
}))
|
||||
};
|
||||
acc.optional.push(out);
|
||||
acc.all.push(...out.tags.map(x => x.tag));
|
||||
} else if (isNegative) {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).replaceAll("###", '').split('|'),
|
||||
};
|
||||
out.tags = out.tags.map(x => x.startsWith("--") ? x.substring(2) : x);
|
||||
acc.negative.push(out);
|
||||
acc.all.push(...out.tags);
|
||||
} else {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).replaceAll("###", '').split('|'),
|
||||
};
|
||||
acc.positive.push(out);
|
||||
acc.all.push(...out.tags);
|
||||
}
|
||||
return acc;
|
||||
}, { positive: [], negative: [], optional: [], all: [] });
|
||||
|
||||
//console.log({ matches })
|
||||
|
||||
const filteredWildcards = (tagword) => {
|
||||
const wildcards = yamlWildcards.filter(x => {
|
||||
let tags = x[1];
|
||||
const matchesNeg =
|
||||
matches.negative.length === 0
|
||||
|| matches.negative.every(x =>
|
||||
x.hasCursor
|
||||
|| x.tags.every(t => !tags[t])
|
||||
);
|
||||
if (!matchesNeg) return false;
|
||||
const matchesPos =
|
||||
matches.positive.length === 0
|
||||
|| matches.positive.every(x =>
|
||||
x.hasCursor
|
||||
|| x.tags.every(t => tags[t])
|
||||
);
|
||||
if (!matchesPos) return false;
|
||||
const matchesOpt =
|
||||
matches.optional.length === 0
|
||||
|| matches.optional.some(x =>
|
||||
x.tags.some(t =>
|
||||
t.hasCursor
|
||||
|| t.isNegative
|
||||
? !tags[t.tag]
|
||||
: tags[t.tag]
|
||||
));
|
||||
if (!matchesOpt) return false;
|
||||
return true;
|
||||
}).reduce((acc, val) => {
|
||||
Object.keys(val[1]).forEach(tag => acc[tag] = acc[tag] + 1 || 1);
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return Object.entries(wildcards)
|
||||
.sort((a, b) => b[1] - a[1])
|
||||
.filter(x =>
|
||||
x[0] === tagword
|
||||
|| !matches.all.includes(x[0])
|
||||
);
|
||||
}
|
||||
|
||||
if (umiTags.length > 0) {
|
||||
// Get difference for subprompt
|
||||
let tagCountChange = umiTags.length - umiPreviousTags.length;
|
||||
let diff = difference(umiTags, umiPreviousTags);
|
||||
umiPreviousTags = umiTags;
|
||||
|
||||
// Show all condition
|
||||
let showAll = tagword.endsWith("[") || tagword.endsWith("[--") || tagword.endsWith("|");
|
||||
|
||||
// Exit early if the user closed the bracket manually
|
||||
if ((!diff || diff.length === 0 || (diff.length === 1 && tagCountChange < 0)) && !showAll) {
|
||||
if (!hideBlocked) hideResults(textArea);
|
||||
return;
|
||||
}
|
||||
|
||||
let umiTagword = diff[0] || '';
|
||||
let tempResults = [];
|
||||
if (umiTagword && umiTagword.length > 0) {
|
||||
umiTagword = umiTagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
originalTagword = tagword;
|
||||
tagword = umiTagword;
|
||||
let filteredWildcardsSorted = filteredWildcards(umiTagword);
|
||||
let searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(umiTagword)}`, 'i')
|
||||
let baseFilter = x => x[0].toLowerCase().search(searchRegex) > -1;
|
||||
let spaceIncludeFilter = x => x[0].toLowerCase().replaceAll(" ", "_").search(searchRegex) > -1;
|
||||
tempResults = filteredWildcardsSorted.filter(x => baseFilter(x) || spaceIncludeFilter(x)) // Filter by tagword
|
||||
|
||||
// Add final results
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.yamlWildcard)
|
||||
result.count = t[1];
|
||||
results.push(result);
|
||||
});
|
||||
} else if (showAll) {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
// Add final results
|
||||
filteredWildcardsSorted.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.yamlWildcard)
|
||||
result.count = t[1];
|
||||
results.push(result);
|
||||
});
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
}
|
||||
} else {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
// Add final results
|
||||
filteredWildcardsSorted.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.yamlWildcard)
|
||||
result.count = t[1];
|
||||
results.push(result);
|
||||
});
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
}
|
||||
} else if (CFG.useEmbeddings && tagword.match(/<[^,> ]*>?/g)) {
|
||||
// Show embeddings
|
||||
let tempResults = [];
|
||||
if (tagword !== "<") {
|
||||
tempResults = embeddings.filter(x => x.toLowerCase().includes(tagword.replace("<", ""))) // Filter by tagword
|
||||
let searchTerm = tagword.replace("<", "")
|
||||
let versionString;
|
||||
if (searchTerm.startsWith("v1") || searchTerm.startsWith("v2")) {
|
||||
versionString = searchTerm.slice(0, 2);
|
||||
searchTerm = searchTerm.slice(2);
|
||||
}
|
||||
if (versionString)
|
||||
tempResults = embeddings.filter(x => x[0].toLowerCase().includes(searchTerm) && x[1] && x[1] === versionString); // Filter by tagword
|
||||
else
|
||||
tempResults = embeddings.filter(x => x[0].toLowerCase().includes(searchTerm)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = embeddings;
|
||||
}
|
||||
@@ -649,8 +857,21 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
} else {
|
||||
searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i');
|
||||
}
|
||||
genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, CFG.maxResults);
|
||||
results = genericResults.concat(tempResults.map(x => ["Embeddings: " + x.trim(), "embedding"])); // Mark as embedding
|
||||
let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, CFG.maxResults);
|
||||
|
||||
// Add final results
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.embedding)
|
||||
result.meta = t[1] + " Embedding";
|
||||
results.push(result);
|
||||
});
|
||||
genericResults.forEach(g => {
|
||||
let result = new AutocompleteResult(g[0].trim(), ResultType.tag)
|
||||
result.category = g[1];
|
||||
result.count = g[2];
|
||||
result.aliases = g[3];
|
||||
results.push(result);
|
||||
});
|
||||
} else {
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
@@ -662,13 +883,13 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
}
|
||||
// If onlyShowAlias is enabled, we don't need to include normal results
|
||||
if (CFG.alias.onlyShowAlias) {
|
||||
results = allTags.filter(x => x[3] && x[3].toLowerCase().search(searchRegex) >- 1);
|
||||
results = allTags.filter(x => x[3] && x[3].toLowerCase().search(searchRegex) > -1);
|
||||
} else {
|
||||
// Else both normal tags and aliases/translations are included depending on the config
|
||||
let baseFilter = (x) => x[0].toLowerCase().search(searchRegex) >- 1;
|
||||
let aliasFilter = (x) => x[3] && x[3].toLowerCase().search(searchRegex) >- 1;
|
||||
let translationFilter = (x) => (translations.has(x[0]) && translations.get(x[0]).toLowerCase().search(searchRegex) >- 1)
|
||||
|| x[3] && x[3].split(",").some(y => translations.has(y) && translations.get(y).toLowerCase().search(searchRegex) >- 1);
|
||||
let baseFilter = (x) => x[0].toLowerCase().search(searchRegex) > -1;
|
||||
let aliasFilter = (x) => x[3] && x[3].toLowerCase().search(searchRegex) > -1;
|
||||
let translationFilter = (x) => (translations.has(x[0]) && translations.get(x[0]).toLowerCase().search(searchRegex) > -1)
|
||||
|| x[3] && x[3].split(",").some(y => translations.has(y) && translations.get(y).toLowerCase().search(searchRegex) > -1);
|
||||
|
||||
let fil;
|
||||
if (CFG.alias.searchByAlias && CFG.translation.searchByTranslation)
|
||||
@@ -680,7 +901,14 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
else
|
||||
fil = (x) => baseFilter(x);
|
||||
|
||||
results = allTags.filter(fil);
|
||||
// Add final results
|
||||
allTags.filter(fil).forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.tag)
|
||||
result.category = t[1];
|
||||
result.count = t[2];
|
||||
result.aliases = t[3];
|
||||
results.push(result);
|
||||
});
|
||||
}
|
||||
// Slice if the user has set a max result count
|
||||
if (!CFG.showAllResults) {
|
||||
@@ -690,6 +918,7 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
|
||||
// Guard for empty results
|
||||
if (!results.length) {
|
||||
//console.log('No results found for "' + tagword + '"');
|
||||
hideResults(textArea);
|
||||
return;
|
||||
}
|
||||
@@ -700,8 +929,8 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
|
||||
var oldSelectedTag = null;
|
||||
function navigateInList(textArea, event) {
|
||||
// Return if the function is deactivated in the UI
|
||||
if (!CFG.activeIn.global) return;
|
||||
// Return if the function is deactivated in the UI or the current model is excluded due to white/blacklist settings
|
||||
if (!isEnabled()) return;
|
||||
|
||||
validKeys = ["ArrowUp", "ArrowDown", "PageUp", "PageDown", "Home", "End", "Enter", "Tab", "Escape"];
|
||||
|
||||
@@ -822,12 +1051,31 @@ async function setup() {
|
||||
console.error("Error loading wildcards: " + e);
|
||||
}
|
||||
}
|
||||
// Load yaml wildcards
|
||||
if (yamlWildcards.length === 0) {
|
||||
try {
|
||||
let yamlTags = (await readFile(`${tagBasePath}/temp/wcet.txt?${new Date().getTime()}`)).split("\n");
|
||||
// Split into tag, count pairs
|
||||
yamlWildcards = yamlTags.map(x => x
|
||||
.trim()
|
||||
.split(","))
|
||||
.map(([i, ...rest]) => [
|
||||
i,
|
||||
rest.reduce((a, b) => {
|
||||
a[b.toLowerCase()] = true;
|
||||
return a;
|
||||
}, {}),
|
||||
]);
|
||||
} catch (e) {
|
||||
console.error("Error loading yaml wildcards: " + e);
|
||||
}
|
||||
}
|
||||
// Load embeddings
|
||||
if (embeddings.length === 0) {
|
||||
try {
|
||||
embeddings = (await readFile(`${tagBasePath}/temp/emb.txt?${new Date().getTime()}`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.replace(".bin", "").replace(".pt", "").replace(".png", "")); // Remove file extensions
|
||||
.map(x => x.trim().split(",")); // Split into name, version type pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading embeddings.txt: " + e);
|
||||
}
|
||||
@@ -837,7 +1085,7 @@ async function setup() {
|
||||
let textAreas = getTextAreas();
|
||||
|
||||
// Add event listener to apply settings button so we can mirror the changes to our internal config
|
||||
let applySettingsButton = gradioApp().querySelector("#tab_settings > div > .gr-button-primary");
|
||||
let applySettingsButton = gradioApp().querySelector("#tab_settings #settings_submit") || gradioApp().querySelector("#tab_settings > div > .gr-button-primary");
|
||||
applySettingsButton.addEventListener("click", () => {
|
||||
// Wait 500ms to make sure the settings have been applied to the webui opts object
|
||||
setTimeout(async () => {
|
||||
@@ -855,6 +1103,28 @@ async function setup() {
|
||||
});
|
||||
});
|
||||
|
||||
// Add change listener to model dropdown to react to model changes
|
||||
let modelDropdown = gradioApp().querySelector("#setting_sd_model_checkpoint select");
|
||||
currentModelName = modelDropdown.value;
|
||||
modelDropdown.addEventListener("change", () => {
|
||||
setTimeout(() => {
|
||||
currentModelName = modelDropdown.value;
|
||||
}, 100);
|
||||
});
|
||||
// Add mutation observer for the model hash text to also allow hash-based blacklist again
|
||||
let modelHashText = gradioApp().querySelector("#sd_checkpoint_hash");
|
||||
if (modelHashText) {
|
||||
currentModelHash = modelHashText.title
|
||||
let modelHashObserver = new MutationObserver((mutationList, observer) => {
|
||||
for (const mutation of mutationList) {
|
||||
if (mutation.type === "attributes" && mutation.attributeName === "title") {
|
||||
currentModelHash = mutation.target.title;
|
||||
}
|
||||
}
|
||||
});
|
||||
modelHashObserver.observe(modelHashText, { attributes: true });
|
||||
}
|
||||
|
||||
// Not found, we're on a page without prompt textareas
|
||||
if (textAreas.every(v => v === null || v === undefined)) return;
|
||||
// Already added or unnecessary to add
|
||||
@@ -936,4 +1206,4 @@ onUiUpdate(async () => {
|
||||
await syncOptions();
|
||||
// Rest of setup
|
||||
setup();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -3,7 +3,10 @@
|
||||
|
||||
import gradio as gr
|
||||
from pathlib import Path
|
||||
from modules import scripts, script_callbacks, shared
|
||||
from modules import scripts, script_callbacks, shared, sd_hijack
|
||||
import yaml
|
||||
import time
|
||||
import threading
|
||||
|
||||
# Webui root path
|
||||
FILE_DIR = Path().absolute()
|
||||
@@ -53,9 +56,86 @@ def get_ext_wildcards():
|
||||
return wildcard_files
|
||||
|
||||
|
||||
def get_embeddings():
|
||||
"""Returns a list of all embeddings"""
|
||||
return [str(e.relative_to(EMB_PATH)) for e in EMB_PATH.glob("**/*") if e.suffix in {".bin", ".pt", ".png"}]
|
||||
def get_ext_wildcard_tags():
|
||||
"""Returns a list of all tags found in extension YAML files found under a Tags: key."""
|
||||
wildcard_tags = {} # { tag: count }
|
||||
yaml_files = []
|
||||
for path in WILDCARD_EXT_PATHS:
|
||||
yaml_files.extend(p for p in path.rglob("*.yml"))
|
||||
yaml_files.extend(p for p in path.rglob("*.yaml"))
|
||||
count = 0
|
||||
for path in yaml_files:
|
||||
try:
|
||||
with open(path, encoding="utf8") as file:
|
||||
data = yaml.safe_load(file)
|
||||
for item in data:
|
||||
wildcard_tags[count] = ','.join(data[item]['Tags'])
|
||||
count += 1
|
||||
except yaml.YAMLError as exc:
|
||||
print(exc)
|
||||
# Sort by count
|
||||
sorted_tags = sorted(wildcard_tags.items(), key=lambda item: item[1], reverse=True)
|
||||
output = []
|
||||
for tag, count in sorted_tags:
|
||||
output.append(f"{tag},{count}")
|
||||
return output
|
||||
|
||||
|
||||
def get_embeddings(sd_model):
|
||||
"""Write a list of all embeddings with their version"""
|
||||
|
||||
# Version constants
|
||||
V1_SHAPE = 768
|
||||
V2_SHAPE = 1024
|
||||
emb_v1 = []
|
||||
emb_v2 = []
|
||||
results = []
|
||||
|
||||
try:
|
||||
# Get embedding dict from sd_hijack to separate v1/v2 embeddings
|
||||
emb_type_a = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
emb_type_b = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
# Get the shape of the first item in the dict
|
||||
emb_a_shape = -1
|
||||
emb_b_shape = -1
|
||||
if (len(emb_type_a) > 0):
|
||||
emb_a_shape = next(iter(emb_type_a.items()))[1].shape
|
||||
if (len(emb_type_b) > 0):
|
||||
emb_b_shape = next(iter(emb_type_b.items()))[1].shape
|
||||
|
||||
# Add embeddings to the correct list
|
||||
if (emb_a_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_a.keys())
|
||||
elif (emb_a_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_a.keys())
|
||||
|
||||
if (emb_b_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_b.keys())
|
||||
elif (emb_b_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_b.keys())
|
||||
|
||||
# Get shape of current model
|
||||
#vec = sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
|
||||
#model_shape = vec.shape[1]
|
||||
# Show relevant entries at the top
|
||||
#if (model_shape == V1_SHAPE):
|
||||
# results = [e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2]
|
||||
#elif (model_shape == V2_SHAPE):
|
||||
# results = [e + ",v2" for e in emb_v2] + [e + ",v1" for e in emb_v1]
|
||||
#else:
|
||||
# raise AttributeError # Fallback to old method
|
||||
results = sorted([e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2], key=lambda x: x.lower())
|
||||
except AttributeError:
|
||||
print("tag_autocomplete_helper: Old webui version or unrecognized model shape, using fallback for embedding completion.")
|
||||
# Get a list of all embeddings in the folder
|
||||
all_embeds = [str(e.relative_to(EMB_PATH)) for e in EMB_PATH.rglob("*") if e.suffix in {".bin", ".pt", ".png",'.webp', '.jxl', '.avif'}]
|
||||
# Remove files with a size of 0
|
||||
all_embeds = [e for e in all_embeds if EMB_PATH.joinpath(e).stat().st_size > 0]
|
||||
# Remove file extensions
|
||||
all_embeds = [e[:e.rfind('.')] for e in all_embeds]
|
||||
results = [e + "," for e in all_embeds]
|
||||
|
||||
write_to_temp_file('emb.txt', results)
|
||||
|
||||
|
||||
def write_tag_base_path():
|
||||
@@ -97,7 +177,10 @@ if not TEMP_PATH.exists():
|
||||
# even if no wildcards or embeddings are found
|
||||
write_to_temp_file('wc.txt', [])
|
||||
write_to_temp_file('wce.txt', [])
|
||||
write_to_temp_file('emb.txt', [])
|
||||
write_to_temp_file('wcet.txt', [])
|
||||
# Only reload embeddings if the file doesn't exist, since they are already re-written on model load
|
||||
if not TEMP_PATH.joinpath("emb.txt").exists():
|
||||
write_to_temp_file('emb.txt', [])
|
||||
|
||||
# Write wildcards to wc.txt if found
|
||||
if WILDCARD_PATH.exists():
|
||||
@@ -110,24 +193,30 @@ if WILDCARD_EXT_PATHS is not None:
|
||||
wildcards_ext = get_ext_wildcards()
|
||||
if wildcards_ext:
|
||||
write_to_temp_file('wce.txt', wildcards_ext)
|
||||
# Write yaml extension wildcards to wcet.txt if found
|
||||
wildcards_yaml_ext = get_ext_wildcard_tags()
|
||||
if wildcards_yaml_ext:
|
||||
write_to_temp_file('wcet.txt', wildcards_yaml_ext)
|
||||
|
||||
# Write embeddings to emb.txt if found
|
||||
if EMB_PATH.exists():
|
||||
embeddings = get_embeddings()
|
||||
if embeddings:
|
||||
write_to_temp_file('emb.txt', embeddings)
|
||||
# Get embeddings after the model loaded callback
|
||||
script_callbacks.on_model_loaded(get_embeddings)
|
||||
|
||||
|
||||
# Register autocomplete options
|
||||
def on_ui_settings():
|
||||
TAC_SECTION = ("tac", "Tag Autocomplete")
|
||||
# Main tag file
|
||||
shared.opts.add_option("tac_tagFile", shared.OptionInfo("danbooru.csv", "Tag filename", gr.Dropdown, lambda: {"choices": csv_files}, refresh=update_tag_files, section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_tagFile", shared.OptionInfo("danbooru.csv", "Tag filename", gr.Dropdown, lambda: {"choices": csv_files_withnone}, refresh=update_tag_files, section=TAC_SECTION))
|
||||
# Active in settings
|
||||
shared.opts.add_option("tac_active", shared.OptionInfo(True, "Enable Tag Autocompletion", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.txt2img", shared.OptionInfo(True, "Active in txt2img (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.img2img", shared.OptionInfo(True, "Active in img2img (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.negativePrompts", shared.OptionInfo(True, "Active in negative prompts (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.thirdParty", shared.OptionInfo(True, "Active in third party textboxes [Dataset Tag Editor] (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.modelList", shared.OptionInfo("", "List of model names (with file extension) or their hashes to use as black/whitelist, separated by commas.", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.modelListMode", shared.OptionInfo("Blacklist", "Mode to use for model list", gr.Dropdown, lambda: {"choices": ["Blacklist","Whitelist"]}, section=TAC_SECTION))
|
||||
# Results related settings
|
||||
shared.opts.add_option("tac_maxResults", shared.OptionInfo(5, "Maximum results", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_showAllResults", shared.OptionInfo(False, "Show all results", section=TAC_SECTION))
|
||||
@@ -135,6 +224,7 @@ def on_ui_settings():
|
||||
shared.opts.add_option("tac_delayTime", shared.OptionInfo(100, "Time in ms to wait before triggering completion again (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_useWildcards", shared.OptionInfo(True, "Search for wildcards", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_useEmbeddings", shared.OptionInfo(True, "Search for embeddings", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_showWikiLinks", shared.OptionInfo(False, "Show '?' next to tags, linking to its Danbooru or e621 wiki page (Warning: This is an external site and very likely contains NSFW examples!)", section=TAC_SECTION))
|
||||
# Insertion related settings
|
||||
shared.opts.add_option("tac_replaceUnderscores", shared.OptionInfo(True, "Replace underscores with spaces on insertion", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_escapeParentheses", shared.OptionInfo(True, "Escape parentheses on insertion", section=TAC_SECTION))
|
||||
@@ -147,7 +237,7 @@ def on_ui_settings():
|
||||
shared.opts.add_option("tac_translation.oldFormat", shared.OptionInfo(False, "Translation file uses old 3-column translation format instead of the new 2-column one", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_translation.searchByTranslation", shared.OptionInfo(True, "Search by translation", section=TAC_SECTION))
|
||||
# Extra file settings
|
||||
shared.opts.add_option("tac_extra.extraFile", shared.OptionInfo("None", "Extra filename", gr.Dropdown, lambda: {"choices": csv_files_withnone}, refresh=update_tag_files, section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_extra.extraFile", shared.OptionInfo("None", "Extra filename (do not use e621.csv here!)", gr.Dropdown, lambda: {"choices": csv_files_withnone}, refresh=update_tag_files, section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_extra.onlyAliasExtraFile", shared.OptionInfo(False, "Extra file in alias only format", section=TAC_SECTION))
|
||||
|
||||
script_callbacks.on_ui_settings(on_ui_settings)
|
||||
script_callbacks.on_ui_settings(on_ui_settings)
|
||||
|
||||
Reference in New Issue
Block a user