mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-27 03:29:55 +00:00
Compare commits
66 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76bd983ba3 | ||
|
|
2de1c720ee | ||
|
|
37e1c15e6d | ||
|
|
c16d110de3 | ||
|
|
f2c3574da7 | ||
|
|
b4fe4f717a | ||
|
|
9ff721ffcb | ||
|
|
f74cecf0aa | ||
|
|
b540400110 | ||
|
|
d29298e0cc | ||
|
|
cbeced9121 | ||
|
|
8dd8ccc527 | ||
|
|
beba0ca714 | ||
|
|
bb82f208c0 | ||
|
|
890f1a48c2 | ||
|
|
c70a18919b | ||
|
|
732a0075f8 | ||
|
|
86ead9b43d | ||
|
|
db3319b0d3 | ||
|
|
a588e0b989 | ||
|
|
b22435dd32 | ||
|
|
b0347d1ca7 | ||
|
|
fad8b3dc88 | ||
|
|
95eb9dd6e9 | ||
|
|
93ee32175d | ||
|
|
86fafeebf5 | ||
|
|
29d1e7212d | ||
|
|
8e14221739 | ||
|
|
cd80710708 | ||
|
|
3e0a7cc796 | ||
|
|
98000bd2fc | ||
|
|
d1d3cd2bf5 | ||
|
|
b70b0b72cb | ||
|
|
a831592c3c | ||
|
|
e00199cf06 | ||
|
|
dc34db53e4 | ||
|
|
a925129981 | ||
|
|
e418a867b3 | ||
|
|
040be35162 | ||
|
|
316d45e2fa | ||
|
|
8ab0e2504b | ||
|
|
b29b496b88 | ||
|
|
e144f0d388 | ||
|
|
ae01f41f30 | ||
|
|
fb27ac9187 | ||
|
|
770bb495a5 | ||
|
|
7fdad1bf62 | ||
|
|
a91a098243 | ||
|
|
c663abcbcb | ||
|
|
bec222f2b3 | ||
|
|
d4db6a7907 | ||
|
|
52593e6ac8 | ||
|
|
849e346924 | ||
|
|
25b285bea3 | ||
|
|
984a7e772a | ||
|
|
964b4fcff3 | ||
|
|
54641ddbfc | ||
|
|
c048684909 | ||
|
|
da9acfea2a | ||
|
|
552c6517b8 | ||
|
|
f626eb3467 | ||
|
|
2ba513bedc | ||
|
|
89d36da47e | ||
|
|
5f2f746310 | ||
|
|
454c13ef6d | ||
|
|
6deefda279 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1 +1,2 @@
|
||||
tags/temp/
|
||||
__pycache__/
|
||||
|
||||
@@ -14,6 +14,7 @@ Since some Stable Diffusion models were trained using this information, for exam
|
||||
You can install it using the inbuilt available extensions list, clone the files manually as described [below](#installation), or use a pre-packaged version from [Releases](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/releases).
|
||||
|
||||
## Common Problems & Known Issues:
|
||||
- Depending on your browser settings, sometimes an old version of the script can get cached. Try `CTRL+F5` to force-reload the site without cache if e.g. a new feature doesn't appear for you after an update.
|
||||
- If `replaceUnderscores` is active, the script will currently only partially replace edited tags containing multiple words in brackets.
|
||||
For example, editing `atago (azur lane)`, it would be replaced with e.g. `taihou (azur lane), lane)`, since the script currently doesn't see the second part of the bracket as the same tag. So in those cases you should delete the old tag beforehand.
|
||||
|
||||
@@ -41,8 +42,8 @@ git clone "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" extens
|
||||
|
||||
Or create a folder there manually and place the `javascript`, `scripts` and `tags` folders in it.
|
||||
|
||||
### In the root folder (old)
|
||||
Copy the `javascript`, `scripts` and `tags` folder into your web UI installation root. It will run automatically the next time the web UI is started.
|
||||
### In the root folder (legacy)
|
||||
This installation method is for old webui versions pre-extension system, it will not work on current versions!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -39,8 +39,8 @@ git clone "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" extens
|
||||
|
||||
或者手动创建一个文件夹,将 `javascript`、`scripts`和`tags`文件夹放在其中。
|
||||
|
||||
### 在根目录下(旧方法)
|
||||
只需要将`javascript`,`scripts`和`tags`文件夹复制到你的Web UI安装根目录下.下次启动Web UI时它将自动启动。
|
||||
### 在根目录下(过时的方法)
|
||||
这种安装方法适用于添加扩展系统之前的旧版webui,在目前的版本上是行不通的。
|
||||
|
||||
---
|
||||
在这两种配置中,标签文件夹包含`colors.json`和脚本用于自动完成的标签数据。
|
||||
|
||||
51
javascript/__globals.js
Normal file
51
javascript/__globals.js
Normal file
@@ -0,0 +1,51 @@
|
||||
// Core components
|
||||
var CFG = null;
|
||||
var tagBasePath = "";
|
||||
|
||||
// Tag completion data loaded from files
|
||||
var allTags = [];
|
||||
var translations = new Map();
|
||||
var extras = [];
|
||||
// Same for tag-likes
|
||||
var wildcardFiles = [];
|
||||
var wildcardExtFiles = [];
|
||||
var yamlWildcards = [];
|
||||
var embeddings = [];
|
||||
var hypernetworks = [];
|
||||
var loras = [];
|
||||
|
||||
// Selected model info for black/whitelisting
|
||||
var currentModelHash = "";
|
||||
var currentModelName = "";
|
||||
|
||||
// Current results
|
||||
var results = [];
|
||||
var resultCount = 0;
|
||||
|
||||
// Relevant for parsing
|
||||
var previousTags = [];
|
||||
var tagword = "";
|
||||
var originalTagword = "";
|
||||
let hideBlocked = false;
|
||||
|
||||
// Tag selection for keyboard navigation
|
||||
var selectedTag = null;
|
||||
var oldSelectedTag = null;
|
||||
|
||||
// UMI
|
||||
var umiPreviousTags = [];
|
||||
|
||||
/// Extendability system:
|
||||
/// Provides "queues" for other files of the script (or really any js)
|
||||
/// to add functions to be called at certain points in the script.
|
||||
/// Similar to a callback system, but primitive.
|
||||
|
||||
// Queues
|
||||
const QUEUE_AFTER_INSERT = [];
|
||||
const QUEUE_AFTER_SETUP = [];
|
||||
const QUEUE_FILE_LOAD = [];
|
||||
const QUEUE_AFTER_CONFIG_CHANGE = [];
|
||||
const QUEUE_SANITIZE = [];
|
||||
|
||||
// List of parsers to try
|
||||
const PARSERS = [];
|
||||
21
javascript/_baseParser.js
Normal file
21
javascript/_baseParser.js
Normal file
@@ -0,0 +1,21 @@
|
||||
class FunctionNotOverriddenError extends Error {
|
||||
constructor(message = "", ...args) {
|
||||
super(message, ...args);
|
||||
this.message = message + " is an abstract base function and must be overwritten.";
|
||||
}
|
||||
}
|
||||
|
||||
class BaseTagParser {
|
||||
triggerCondition = null;
|
||||
|
||||
constructor (triggerCondition) {
|
||||
if (new.target === BaseTagParser) {
|
||||
throw new TypeError("Cannot construct abstract BaseCompletionParser directly");
|
||||
}
|
||||
this.triggerCondition = triggerCondition;
|
||||
}
|
||||
|
||||
parse() {
|
||||
throw new FunctionNotOverriddenError("parse()");
|
||||
}
|
||||
}
|
||||
32
javascript/_result.js
Normal file
32
javascript/_result.js
Normal file
@@ -0,0 +1,32 @@
|
||||
// Result data type for cleaner use of optional completion result properties
|
||||
|
||||
// Type enum
|
||||
const ResultType = Object.freeze({
|
||||
"tag": 1,
|
||||
"extra": 2,
|
||||
"embedding": 3,
|
||||
"wildcardTag": 4,
|
||||
"wildcardFile": 5,
|
||||
"yamlWildcard": 6,
|
||||
"hypernetwork": 7,
|
||||
"lora": 8
|
||||
});
|
||||
|
||||
// Class to hold result data and annotations to make it clearer to use
|
||||
class AutocompleteResult {
|
||||
// Main properties
|
||||
text = "";
|
||||
type = ResultType.tag;
|
||||
|
||||
// Additional info, only used in some cases
|
||||
category = null;
|
||||
count = null;
|
||||
aliases = null;
|
||||
meta = null;
|
||||
|
||||
// Constructor
|
||||
constructor(text, type) {
|
||||
this.text = text;
|
||||
this.type = type;
|
||||
}
|
||||
}
|
||||
130
javascript/_utils.js
Normal file
130
javascript/_utils.js
Normal file
@@ -0,0 +1,130 @@
|
||||
// Utility functions for tag autocomplete
|
||||
|
||||
// Parse the CSV file into a 2D array. Doesn't use regex, so it is very lightweight.
|
||||
function parseCSV(str) {
|
||||
var arr = [];
|
||||
var quote = false; // 'true' means we're inside a quoted field
|
||||
|
||||
// Iterate over each character, keep track of current row and column (of the returned array)
|
||||
for (var row = 0, col = 0, c = 0; c < str.length; c++) {
|
||||
var cc = str[c], nc = str[c + 1]; // Current character, next character
|
||||
arr[row] = arr[row] || []; // Create a new row if necessary
|
||||
arr[row][col] = arr[row][col] || ''; // Create a new column (start with empty string) if necessary
|
||||
|
||||
// If the current character is a quotation mark, and we're inside a
|
||||
// quoted field, and the next character is also a quotation mark,
|
||||
// add a quotation mark to the current column and skip the next character
|
||||
if (cc == '"' && quote && nc == '"') { arr[row][col] += cc; ++c; continue; }
|
||||
|
||||
// If it's just one quotation mark, begin/end quoted field
|
||||
if (cc == '"') { quote = !quote; continue; }
|
||||
|
||||
// If it's a comma and we're not in a quoted field, move on to the next column
|
||||
if (cc == ',' && !quote) { ++col; continue; }
|
||||
|
||||
// If it's a newline (CRLF) and we're not in a quoted field, skip the next character
|
||||
// and move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\r' && nc == '\n' && !quote) { ++row; col = 0; ++c; continue; }
|
||||
|
||||
// If it's a newline (LF or CR) and we're not in a quoted field,
|
||||
// move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\n' && !quote) { ++row; col = 0; continue; }
|
||||
if (cc == '\r' && !quote) { ++row; col = 0; continue; }
|
||||
|
||||
// Otherwise, append the current character to the current column
|
||||
arr[row][col] += cc;
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
|
||||
// Load file
|
||||
async function readFile(filePath, json = false, cache = false) {
|
||||
if (!cache)
|
||||
filePath += `?${new Date().getTime()}`;
|
||||
|
||||
let response = await fetch(`file=${filePath}`);
|
||||
|
||||
if (response.status != 200) {
|
||||
console.error(`Error loading file "${filePath}": ` + response.status, response.statusText);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (json)
|
||||
return await response.json();
|
||||
else
|
||||
return await response.text();
|
||||
}
|
||||
|
||||
// Load CSV
|
||||
async function loadCSV(path) {
|
||||
let text = await readFile(path);
|
||||
return parseCSV(text);
|
||||
}
|
||||
|
||||
// Debounce function to prevent spamming the autocomplete function
|
||||
var dbTimeOut;
|
||||
const debounce = (func, wait = 300) => {
|
||||
return function (...args) {
|
||||
if (dbTimeOut) {
|
||||
clearTimeout(dbTimeOut);
|
||||
}
|
||||
|
||||
dbTimeOut = setTimeout(() => {
|
||||
func.apply(this, args);
|
||||
}, wait);
|
||||
}
|
||||
}
|
||||
|
||||
// Difference function to fix duplicates not being seen as changes in normal filter
|
||||
function difference(a, b) {
|
||||
if (a.length == 0) {
|
||||
return b;
|
||||
}
|
||||
if (b.length == 0) {
|
||||
return a;
|
||||
}
|
||||
|
||||
return [...b.reduce((acc, v) => acc.set(v, (acc.get(v) || 0) - 1),
|
||||
a.reduce((acc, v) => acc.set(v, (acc.get(v) || 0) + 1), new Map())
|
||||
)].reduce((acc, [v, count]) => acc.concat(Array(Math.abs(count)).fill(v)), []);
|
||||
}
|
||||
|
||||
function escapeRegExp(string) {
|
||||
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
|
||||
}
|
||||
function escapeHTML(unsafeText) {
|
||||
let div = document.createElement('div');
|
||||
div.textContent = unsafeText;
|
||||
return div.innerHTML;
|
||||
}
|
||||
|
||||
// Queue calling function to process global queues
|
||||
async function processQueue(queue, context, ...args) {
|
||||
for (let i = 0; i < queue.length; i++) {
|
||||
await queue[i].call(context, ...args);
|
||||
}
|
||||
}
|
||||
// The same but with return values
|
||||
async function processQueueReturn(queue, context, ...args)
|
||||
{
|
||||
let qeueueReturns = [];
|
||||
for (let i = 0; i < queue.length; i++) {
|
||||
let returnValue = await queue[i].call(context, ...args);
|
||||
if (returnValue)
|
||||
qeueueReturns.push(returnValue);
|
||||
}
|
||||
return qeueueReturns;
|
||||
}
|
||||
// Specific to tag completion parsers
|
||||
async function processParsers(textArea, prompt) {
|
||||
// Get all parsers that have a successful trigger condition
|
||||
let matchingParsers = PARSERS.filter(parser => parser.triggerCondition());
|
||||
// Guard condition
|
||||
if (matchingParsers.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let parseFunctions = matchingParsers.map(parser => parser.parse);
|
||||
// Process them and return the results
|
||||
return await processQueueReturn(parseFunctions, null, textArea, prompt);
|
||||
}
|
||||
58
javascript/ext_embeddings.js
Normal file
58
javascript/ext_embeddings.js
Normal file
@@ -0,0 +1,58 @@
|
||||
const EMB_REGEX = /<(?!l:|h:)[^,> ]*>?/g;
|
||||
const EMB_TRIGGER = () => CFG.useEmbeddings && tagword.match(EMB_REGEX);
|
||||
|
||||
class EmbeddingParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show embeddings
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<e:") {
|
||||
let searchTerm = tagword.replace("<e:", "").replace("<", "");
|
||||
let versionString;
|
||||
if (searchTerm.startsWith("v1") || searchTerm.startsWith("v2")) {
|
||||
versionString = searchTerm.slice(0, 2);
|
||||
searchTerm = searchTerm.slice(2);
|
||||
}
|
||||
if (versionString)
|
||||
tempResults = embeddings.filter(x => x[0].toLowerCase().includes(searchTerm) && x[1] && x[1] === versionString); // Filter by tagword
|
||||
else
|
||||
tempResults = embeddings.filter(x => x[0].toLowerCase().includes(searchTerm)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = embeddings;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.embedding)
|
||||
result.meta = t[1] + " Embedding";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (embeddings.length === 0) {
|
||||
try {
|
||||
embeddings = (await readFile(`${tagBasePath}/temp/emb.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().split(",")); // Split into name, version type pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading embeddings.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.embedding) {
|
||||
return text.replace(/^.*?: /g, "");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
PARSERS.push(new EmbeddingParser(EMB_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
50
javascript/ext_hypernets.js
Normal file
50
javascript/ext_hypernets.js
Normal file
@@ -0,0 +1,50 @@
|
||||
const HYP_REGEX = /<(?!e:|l:)[^,> ]*>?/g;
|
||||
const HYP_TRIGGER = () => CFG.useHypernetworks && tagword.match(HYP_REGEX);
|
||||
|
||||
class HypernetParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show hypernetworks
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<h:") {
|
||||
let searchTerm = tagword.replace("<h:", "").replace("<", "");
|
||||
tempResults = hypernetworks.filter(x => x.toLowerCase().includes(searchTerm)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = hypernetworks;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.hypernetwork)
|
||||
result.meta = "Hypernetwork";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (hypernetworks.length === 0) {
|
||||
try {
|
||||
hypernetworks = (await readFile(`${tagBasePath}/temp/hyp.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) //Remove empty lines
|
||||
.map(x => x.trim()); // Remove carriage returns and padding if it exists
|
||||
} catch (e) {
|
||||
console.error("Error loading hypernetworks.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.hypernetwork) {
|
||||
return `<hypernet:${text}:${CFG.extraNetworksDefaultMultiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
PARSERS.push(new HypernetParser(HYP_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
50
javascript/ext_loras.js
Normal file
50
javascript/ext_loras.js
Normal file
@@ -0,0 +1,50 @@
|
||||
const LORA_REGEX = /<(?!e:|h:)[^,> ]*>?/g;
|
||||
const LORA_TRIGGER = () => CFG.useLoras && tagword.match(LORA_REGEX);
|
||||
|
||||
class LoraParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show lora
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<l:") {
|
||||
let searchTerm = tagword.replace("<l:", "").replace("<", "");
|
||||
tempResults = loras.filter(x => x.toLowerCase().includes(searchTerm)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = loras;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.lora)
|
||||
result.meta = "Lora";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (loras.length === 0) {
|
||||
try {
|
||||
loras = (await readFile(`${tagBasePath}/temp/lora.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim()); // Remove carriage returns and padding if it exists
|
||||
} catch (e) {
|
||||
console.error("Error loading lora.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.lora) {
|
||||
return `<lora:${text}:${CFG.extraNetworksDefaultMultiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
PARSERS.push(new LoraParser(LORA_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
240
javascript/ext_umi.js
Normal file
240
javascript/ext_umi.js
Normal file
@@ -0,0 +1,240 @@
|
||||
const UMI_PROMPT_REGEX = /<[^\s]*?\[[^,<>]*[\]|]?>?/gi;
|
||||
const UMI_TAG_REGEX = /(?:\[|\||--)([^<>\[\]\-|]+)/gi;
|
||||
|
||||
const UMI_TRIGGER = () => CFG.useWildcards && [...tagword.matchAll(UMI_PROMPT_REGEX)].length > 0;
|
||||
|
||||
class UmiParser extends BaseTagParser {
|
||||
parse(textArea, prompt) {
|
||||
// We are in a UMI yaml tag definition, parse further
|
||||
let umiSubPrompts = [...prompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
let umiTags = [];
|
||||
let umiTagsWithOperators = []
|
||||
|
||||
const insertAt = (str,char,pos) => str.slice(0,pos) + char + str.slice(pos);
|
||||
|
||||
umiSubPrompts.forEach(umiSubPrompt => {
|
||||
umiTags = umiTags.concat([...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map(x => x[1].toLowerCase()));
|
||||
|
||||
const start = umiSubPrompt.index;
|
||||
const end = umiSubPrompt.index + umiSubPrompt[0].length;
|
||||
if (textArea.selectionStart >= start && textArea.selectionStart <= end) {
|
||||
umiTagsWithOperators = insertAt(umiSubPrompt[0], '###', textArea.selectionStart - start);
|
||||
}
|
||||
});
|
||||
|
||||
// Safety check since UMI parsing sometimes seems to trigger outside of an UMI subprompt and thus fails
|
||||
if (umiTagsWithOperators.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const promptSplitToTags = umiTagsWithOperators.replace(']###[', '][').split("][");
|
||||
|
||||
const clean = (str) => str
|
||||
.replaceAll('>', '')
|
||||
.replaceAll('<', '')
|
||||
.replaceAll('[', '')
|
||||
.replaceAll(']', '')
|
||||
.trim();
|
||||
|
||||
const matches = promptSplitToTags.reduce((acc, curr) => {
|
||||
let isOptional = curr.includes("|");
|
||||
let isNegative = curr.startsWith("--");
|
||||
let out;
|
||||
if (isOptional) {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).split('|').map(x => ({
|
||||
hasCursor: x.includes("###"),
|
||||
isNegative: x.startsWith("--"),
|
||||
tag: clean(x).replaceAll("###", '').replaceAll("--", '')
|
||||
}))
|
||||
};
|
||||
acc.optional.push(out);
|
||||
acc.all.push(...out.tags.map(x => x.tag));
|
||||
} else if (isNegative) {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).replaceAll("###", '').split('|'),
|
||||
};
|
||||
out.tags = out.tags.map(x => x.startsWith("--") ? x.substring(2) : x);
|
||||
acc.negative.push(out);
|
||||
acc.all.push(...out.tags);
|
||||
} else {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).replaceAll("###", '').split('|'),
|
||||
};
|
||||
acc.positive.push(out);
|
||||
acc.all.push(...out.tags);
|
||||
}
|
||||
return acc;
|
||||
}, { positive: [], negative: [], optional: [], all: [] });
|
||||
|
||||
//console.log({ matches })
|
||||
|
||||
const filteredWildcards = (tagword) => {
|
||||
const wildcards = yamlWildcards.filter(x => {
|
||||
let tags = x[1];
|
||||
const matchesNeg =
|
||||
matches.negative.length === 0
|
||||
|| matches.negative.every(x =>
|
||||
x.hasCursor
|
||||
|| x.tags.every(t => !tags[t])
|
||||
);
|
||||
if (!matchesNeg) return false;
|
||||
const matchesPos =
|
||||
matches.positive.length === 0
|
||||
|| matches.positive.every(x =>
|
||||
x.hasCursor
|
||||
|| x.tags.every(t => tags[t])
|
||||
);
|
||||
if (!matchesPos) return false;
|
||||
const matchesOpt =
|
||||
matches.optional.length === 0
|
||||
|| matches.optional.some(x =>
|
||||
x.tags.some(t =>
|
||||
t.hasCursor
|
||||
|| t.isNegative
|
||||
? !tags[t.tag]
|
||||
: tags[t.tag]
|
||||
));
|
||||
if (!matchesOpt) return false;
|
||||
return true;
|
||||
}).reduce((acc, val) => {
|
||||
Object.keys(val[1]).forEach(tag => acc[tag] = acc[tag] + 1 || 1);
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return Object.entries(wildcards)
|
||||
.sort((a, b) => b[1] - a[1])
|
||||
.filter(x =>
|
||||
x[0] === tagword
|
||||
|| !matches.all.includes(x[0])
|
||||
);
|
||||
}
|
||||
|
||||
if (umiTags.length > 0) {
|
||||
// Get difference for subprompt
|
||||
let tagCountChange = umiTags.length - umiPreviousTags.length;
|
||||
let diff = difference(umiTags, umiPreviousTags);
|
||||
umiPreviousTags = umiTags;
|
||||
|
||||
// Show all condition
|
||||
let showAll = tagword.endsWith("[") || tagword.endsWith("[--") || tagword.endsWith("|");
|
||||
|
||||
// Exit early if the user closed the bracket manually
|
||||
if ((!diff || diff.length === 0 || (diff.length === 1 && tagCountChange < 0)) && !showAll) {
|
||||
if (!hideBlocked) hideResults(textArea);
|
||||
return;
|
||||
}
|
||||
|
||||
let umiTagword = diff[0] || '';
|
||||
let tempResults = [];
|
||||
if (umiTagword && umiTagword.length > 0) {
|
||||
umiTagword = umiTagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
originalTagword = tagword;
|
||||
tagword = umiTagword;
|
||||
let filteredWildcardsSorted = filteredWildcards(umiTagword);
|
||||
let searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(umiTagword)}`, 'i')
|
||||
let baseFilter = x => x[0].toLowerCase().search(searchRegex) > -1;
|
||||
let spaceIncludeFilter = x => x[0].toLowerCase().replaceAll(" ", "_").search(searchRegex) > -1;
|
||||
tempResults = filteredWildcardsSorted.filter(x => baseFilter(x) || spaceIncludeFilter(x)) // Filter by tagword
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.yamlWildcard)
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
} else if (showAll) {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
filteredWildcardsSorted.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.yamlWildcard)
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
return finalResults;
|
||||
}
|
||||
} else {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
filteredWildcardsSorted.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.yamlWildcard)
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function updateUmiTags( tagType, sanitizedText, newPrompt, textArea) {
|
||||
// If it was a yaml wildcard, also update the umiPreviousTags
|
||||
if (tagType === ResultType.yamlWildcard && originalTagword.length > 0) {
|
||||
let umiSubPrompts = [...newPrompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
let umiTags = [];
|
||||
umiSubPrompts.forEach(umiSubPrompt => {
|
||||
umiTags = umiTags.concat([...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map(x => x[1].toLowerCase()));
|
||||
});
|
||||
|
||||
umiPreviousTags = umiTags;
|
||||
|
||||
hideResults(textArea);
|
||||
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (yamlWildcards.length === 0) {
|
||||
try {
|
||||
let yamlTags = (await readFile(`${tagBasePath}/temp/wcet.txt`)).split("\n");
|
||||
// Split into tag, count pairs
|
||||
yamlWildcards = yamlTags.map(x => x
|
||||
.trim()
|
||||
.split(","))
|
||||
.map(([i, ...rest]) => [
|
||||
i,
|
||||
rest.reduce((a, b) => {
|
||||
a[b.toLowerCase()] = true;
|
||||
return a;
|
||||
}, {}),
|
||||
]);
|
||||
} catch (e) {
|
||||
console.error("Error loading yaml wildcards: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
// Replace underscores only if the yaml tag is not using them
|
||||
if (tagType === ResultType.yamlWildcard && !yamlWildcards.includes(text)) {
|
||||
return text.replaceAll("_", " ");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Add UMI parser
|
||||
PARSERS.push(new UmiParser(UMI_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_AFTER_INSERT.push(updateUmiTags);
|
||||
123
javascript/ext_wildcards.js
Normal file
123
javascript/ext_wildcards.js
Normal file
@@ -0,0 +1,123 @@
|
||||
// Regex
|
||||
const WC_REGEX = /\b__([^,]+)__([^, ]*)\b/g;
|
||||
|
||||
// Trigger conditions
|
||||
const WC_TRIGGER = () => CFG.useWildcards && [...tagword.matchAll(WC_REGEX)].length > 0;
|
||||
const WC_FILE_TRIGGER = () => CFG.useWildcards && (tagword.startsWith("__") && !tagword.endsWith("__") || tagword === "__");
|
||||
|
||||
class WildcardParser extends BaseTagParser {
|
||||
async parse() {
|
||||
// Show wildcards from a file with that name
|
||||
let wcMatch = [...tagword.matchAll(WC_REGEX)]
|
||||
let wcFile = wcMatch[0][1];
|
||||
let wcWord = wcMatch[0][2];
|
||||
|
||||
// Look in normal wildcard files
|
||||
let wcFound = wildcardFiles.find(x => x[1].toLowerCase() === wcFile);
|
||||
// Use found wildcard file or look in external wildcard files
|
||||
let wcPair = wcFound || wildcardExtFiles.find(x => x[1].toLowerCase() === wcFile);
|
||||
|
||||
let wildcards = (await readFile(`${wcPair[0]}/${wcPair[1]}.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0 && !x.startsWith('#')); // Remove empty lines and comments
|
||||
|
||||
let finalResults = [];
|
||||
let tempResults = wildcards.filter(x => (wcWord !== null && wcWord.length > 0) ? x.toLowerCase().includes(wcWord) : x) // Filter by tagword
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.wildcardTag);
|
||||
result.meta = wcFile;
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
class WildcardFileParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show available wildcard files
|
||||
let tempResults = [];
|
||||
if (tagword !== "__") {
|
||||
let lmb = (x) => x[1].toLowerCase().includes(tagword.replace("__", ""))
|
||||
tempResults = wildcardFiles.filter(lmb).concat(wildcardExtFiles.filter(lmb)) // Filter by tagword
|
||||
} else {
|
||||
tempResults = wildcardFiles.concat(wildcardExtFiles);
|
||||
}
|
||||
|
||||
let finalResults = [];
|
||||
// Get final results
|
||||
tempResults.forEach(wcFile => {
|
||||
let result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile);
|
||||
result.meta = "Wildcard file";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (wildcardFiles.length === 0 && wildcardExtFiles.length === 0) {
|
||||
try {
|
||||
let wcFileArr = (await readFile(`${tagBasePath}/temp/wc.txt`)).split("\n");
|
||||
let wcBasePath = wcFileArr[0].trim(); // First line should be the base path
|
||||
wildcardFiles = wcFileArr.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => [wcBasePath, x.trim().replace(".txt", "")]); // Remove file extension & newlines
|
||||
|
||||
// To support multiple sources, we need to separate them using the provided "-----" strings
|
||||
let wcExtFileArr = (await readFile(`${tagBasePath}/temp/wce.txt`)).split("\n");
|
||||
let splitIndices = [];
|
||||
for (let index = 0; index < wcExtFileArr.length; index++) {
|
||||
if (wcExtFileArr[index].trim() === "-----") {
|
||||
splitIndices.push(index);
|
||||
}
|
||||
}
|
||||
// For each group, add them to the wildcardFiles array with the base path as the first element
|
||||
for (let i = 0; i < splitIndices.length; i++) {
|
||||
let start = splitIndices[i - 1] || 0;
|
||||
if (i > 0) start++; // Skip the "-----" line
|
||||
let end = splitIndices[i];
|
||||
|
||||
let wcExtFile = wcExtFileArr.slice(start, end);
|
||||
let base = wcExtFile[0].trim() + "/";
|
||||
wcExtFile = wcExtFile.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().replace(base, "").replace(".txt", "")); // Remove file extension & newlines;
|
||||
|
||||
wcExtFile = wcExtFile.map(x => [base, x]);
|
||||
wildcardExtFiles.push(...wcExtFile);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Error loading wildcards: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.wildcardFile) {
|
||||
return `__${text}__`;
|
||||
} else if (tagType === ResultType.wildcardTag) {
|
||||
return text.replace(/^.*?: /g, "");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function keepOpenIfWildcard(tagType, sanitizedText, newPrompt, textArea) {
|
||||
// If it's a wildcard, we want to keep the results open so the user can select another wildcard
|
||||
if (tagType === ResultType.wildcardFile) {
|
||||
hideBlocked = true;
|
||||
autocomplete(textArea, newPrompt, sanitizedText);
|
||||
setTimeout(() => { hideBlocked = false; }, 100);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Register the parsers
|
||||
PARSERS.push(new WildcardParser(WC_TRIGGER));
|
||||
PARSERS.push(new WildcardFileParser(WC_FILE_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_AFTER_INSERT.push(keepOpenIfWildcard);
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,8 +3,10 @@
|
||||
|
||||
import gradio as gr
|
||||
from pathlib import Path
|
||||
from modules import scripts, script_callbacks, shared
|
||||
from modules import scripts, script_callbacks, shared, sd_hijack
|
||||
import yaml
|
||||
import time
|
||||
import threading
|
||||
|
||||
# Webui root path
|
||||
FILE_DIR = Path().absolute()
|
||||
@@ -18,7 +20,12 @@ TAGS_PATH = Path(scripts.basedir()).joinpath('tags')
|
||||
# The path to the folder containing the wildcards and embeddings
|
||||
WILDCARD_PATH = FILE_DIR.joinpath('scripts/wildcards')
|
||||
EMB_PATH = Path(shared.cmd_opts.embeddings_dir)
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir)
|
||||
|
||||
try:
|
||||
LORA_PATH = Path(shared.cmd_opts.lora_dir)
|
||||
except AttributeError:
|
||||
LORA_PATH = None
|
||||
|
||||
def find_ext_wildcard_paths():
|
||||
"""Returns the path to the extension wildcards folder"""
|
||||
@@ -67,8 +74,11 @@ def get_ext_wildcard_tags():
|
||||
with open(path, encoding="utf8") as file:
|
||||
data = yaml.safe_load(file)
|
||||
for item in data:
|
||||
wildcard_tags[count] = ','.join(data[item]['Tags'])
|
||||
count += 1
|
||||
if data[item] and 'Tags' in data[item]:
|
||||
wildcard_tags[count] = ','.join(data[item]['Tags'])
|
||||
count += 1
|
||||
else:
|
||||
print('Issue with tags found in ' + path.name + ' at item ' + item)
|
||||
except yaml.YAMLError as exc:
|
||||
print(exc)
|
||||
# Sort by count
|
||||
@@ -78,9 +88,78 @@ def get_ext_wildcard_tags():
|
||||
output.append(f"{tag},{count}")
|
||||
return output
|
||||
|
||||
def get_embeddings():
|
||||
"""Returns a list of all embeddings"""
|
||||
return [str(e.relative_to(EMB_PATH)) for e in EMB_PATH.glob("**/*") if e.suffix in {".bin", ".pt", ".png"}]
|
||||
|
||||
def get_embeddings(sd_model):
|
||||
"""Write a list of all embeddings with their version"""
|
||||
|
||||
# Version constants
|
||||
V1_SHAPE = 768
|
||||
V2_SHAPE = 1024
|
||||
emb_v1 = []
|
||||
emb_v2 = []
|
||||
results = []
|
||||
|
||||
try:
|
||||
# Get embedding dict from sd_hijack to separate v1/v2 embeddings
|
||||
emb_type_a = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
emb_type_b = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
# Get the shape of the first item in the dict
|
||||
emb_a_shape = -1
|
||||
emb_b_shape = -1
|
||||
if (len(emb_type_a) > 0):
|
||||
emb_a_shape = next(iter(emb_type_a.items()))[1].shape
|
||||
if (len(emb_type_b) > 0):
|
||||
emb_b_shape = next(iter(emb_type_b.items()))[1].shape
|
||||
|
||||
# Add embeddings to the correct list
|
||||
if (emb_a_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_a.keys())
|
||||
elif (emb_a_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_a.keys())
|
||||
|
||||
if (emb_b_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_b.keys())
|
||||
elif (emb_b_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_b.keys())
|
||||
|
||||
# Get shape of current model
|
||||
#vec = sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
|
||||
#model_shape = vec.shape[1]
|
||||
# Show relevant entries at the top
|
||||
#if (model_shape == V1_SHAPE):
|
||||
# results = [e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2]
|
||||
#elif (model_shape == V2_SHAPE):
|
||||
# results = [e + ",v2" for e in emb_v2] + [e + ",v1" for e in emb_v1]
|
||||
#else:
|
||||
# raise AttributeError # Fallback to old method
|
||||
results = sorted([e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2], key=lambda x: x.lower())
|
||||
except AttributeError:
|
||||
print("tag_autocomplete_helper: Old webui version or unrecognized model shape, using fallback for embedding completion.")
|
||||
# Get a list of all embeddings in the folder
|
||||
all_embeds = [str(e.relative_to(EMB_PATH)) for e in EMB_PATH.rglob("*") if e.suffix in {".bin", ".pt", ".png",'.webp', '.jxl', '.avif'}]
|
||||
# Remove files with a size of 0
|
||||
all_embeds = [e for e in all_embeds if EMB_PATH.joinpath(e).stat().st_size > 0]
|
||||
# Remove file extensions
|
||||
all_embeds = [e[:e.rfind('.')] for e in all_embeds]
|
||||
results = [e + "," for e in all_embeds]
|
||||
|
||||
write_to_temp_file('emb.txt', results)
|
||||
|
||||
def get_hypernetworks():
|
||||
"""Write a list of all hypernetworks"""
|
||||
|
||||
# Get a list of all hypernetworks in the folder
|
||||
all_hypernetworks = [str(h.name) for h in HYP_PATH.rglob("*") if h.suffix in {".pt"}]
|
||||
# Remove file extensions
|
||||
return [h[:h.rfind('.')] for h in all_hypernetworks]
|
||||
|
||||
def get_lora():
|
||||
"""Write a list of all lora"""
|
||||
|
||||
# Get a list of all lora in the folder
|
||||
all_lora = [str(l.name) for l in LORA_PATH.rglob("*") if l.suffix in {".safetensors", ".ckpt", ".pt"}]
|
||||
# Remove file extensions
|
||||
return [l[:l.rfind('.')] for l in all_lora]
|
||||
|
||||
|
||||
def write_tag_base_path():
|
||||
@@ -123,7 +202,11 @@ if not TEMP_PATH.exists():
|
||||
write_to_temp_file('wc.txt', [])
|
||||
write_to_temp_file('wce.txt', [])
|
||||
write_to_temp_file('wcet.txt', [])
|
||||
write_to_temp_file('emb.txt', [])
|
||||
write_to_temp_file('hyp.txt', [])
|
||||
write_to_temp_file('lora.txt', [])
|
||||
# Only reload embeddings if the file doesn't exist, since they are already re-written on model load
|
||||
if not TEMP_PATH.joinpath("emb.txt").exists():
|
||||
write_to_temp_file('emb.txt', [])
|
||||
|
||||
# Write wildcards to wc.txt if found
|
||||
if WILDCARD_PATH.exists():
|
||||
@@ -143,9 +226,18 @@ if WILDCARD_EXT_PATHS is not None:
|
||||
|
||||
# Write embeddings to emb.txt if found
|
||||
if EMB_PATH.exists():
|
||||
embeddings = get_embeddings()
|
||||
if embeddings:
|
||||
write_to_temp_file('emb.txt', embeddings)
|
||||
# Get embeddings after the model loaded callback
|
||||
script_callbacks.on_model_loaded(get_embeddings)
|
||||
|
||||
if HYP_PATH.exists():
|
||||
hypernets = get_hypernetworks()
|
||||
if hypernets:
|
||||
write_to_temp_file('hyp.txt', hypernets)
|
||||
|
||||
if LORA_PATH is not None and LORA_PATH.exists():
|
||||
lora = get_lora()
|
||||
if lora:
|
||||
write_to_temp_file('lora.txt', lora)
|
||||
|
||||
# Register autocomplete options
|
||||
def on_ui_settings():
|
||||
@@ -158,6 +250,8 @@ def on_ui_settings():
|
||||
shared.opts.add_option("tac_activeIn.img2img", shared.OptionInfo(True, "Active in img2img (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.negativePrompts", shared.OptionInfo(True, "Active in negative prompts (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.thirdParty", shared.OptionInfo(True, "Active in third party textboxes [Dataset Tag Editor] (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.modelList", shared.OptionInfo("", "List of model names (with file extension) or their hashes to use as black/whitelist, separated by commas.", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_activeIn.modelListMode", shared.OptionInfo("Blacklist", "Mode to use for model list", gr.Dropdown, lambda: {"choices": ["Blacklist","Whitelist"]}, section=TAC_SECTION))
|
||||
# Results related settings
|
||||
shared.opts.add_option("tac_maxResults", shared.OptionInfo(5, "Maximum results", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_showAllResults", shared.OptionInfo(False, "Show all results", section=TAC_SECTION))
|
||||
@@ -165,6 +259,9 @@ def on_ui_settings():
|
||||
shared.opts.add_option("tac_delayTime", shared.OptionInfo(100, "Time in ms to wait before triggering completion again (Requires restart)", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_useWildcards", shared.OptionInfo(True, "Search for wildcards", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_useEmbeddings", shared.OptionInfo(True, "Search for embeddings", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_useHypernetworks", shared.OptionInfo(True, "Search for hypernetworks", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_useLoras", shared.OptionInfo(True, "Search for Loras", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_showWikiLinks", shared.OptionInfo(False, "Show '?' next to tags, linking to its Danbooru or e621 wiki page (Warning: This is an external site and very likely contains NSFW examples!)", section=TAC_SECTION))
|
||||
# Insertion related settings
|
||||
shared.opts.add_option("tac_replaceUnderscores", shared.OptionInfo(True, "Replace underscores with spaces on insertion", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_escapeParentheses", shared.OptionInfo(True, "Escape parentheses on insertion", section=TAC_SECTION))
|
||||
@@ -177,7 +274,7 @@ def on_ui_settings():
|
||||
shared.opts.add_option("tac_translation.oldFormat", shared.OptionInfo(False, "Translation file uses old 3-column translation format instead of the new 2-column one", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_translation.searchByTranslation", shared.OptionInfo(True, "Search by translation", section=TAC_SECTION))
|
||||
# Extra file settings
|
||||
shared.opts.add_option("tac_extra.extraFile", shared.OptionInfo("None", "Extra filename (do not use e621.csv here!)", gr.Dropdown, lambda: {"choices": csv_files_withnone}, refresh=update_tag_files, section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_extra.onlyAliasExtraFile", shared.OptionInfo(False, "Extra file in alias only format", section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_extra.extraFile", shared.OptionInfo("extra-quality-tags.csv", "Extra filename (for small sets of custom tags)", gr.Dropdown, lambda: {"choices": csv_files_withnone}, refresh=update_tag_files, section=TAC_SECTION))
|
||||
shared.opts.add_option("tac_extra.addMode", shared.OptionInfo("Insert before", "Mode to add the extra tags to the main tag list", gr.Dropdown, lambda: {"choices": ["Insert before","Insert after"]}, section=TAC_SECTION))
|
||||
|
||||
script_callbacks.on_ui_settings(on_ui_settings)
|
||||
|
||||
6
tags/Extra-quality-tags.csv
Normal file
6
tags/Extra-quality-tags.csv
Normal file
@@ -0,0 +1,6 @@
|
||||
masterpiece,5,Quality tag,
|
||||
best_quality,5,Quality tag,
|
||||
high_quality,5,Quality tag,
|
||||
normal_quality,5,Quality tag,
|
||||
low_quality,5,Quality tag,
|
||||
worst_quality,5,Quality tag,
|
||||
|
Reference in New Issue
Block a user