mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-26 19:19:57 +00:00
Compare commits
59 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08d3436f3b | ||
|
|
afa13306ef | ||
|
|
a63ce64f4e | ||
|
|
d37e37acfa | ||
|
|
7778142520 | ||
|
|
cde90c13c4 | ||
|
|
231b121fe0 | ||
|
|
c659ed2155 | ||
|
|
0a4c17cada | ||
|
|
6e65811d4a | ||
|
|
03673c060e | ||
|
|
1c11c4ad5a | ||
|
|
f840586b6b | ||
|
|
886704e351 | ||
|
|
41626d22c3 | ||
|
|
5ef346cde3 | ||
|
|
edf76d9df2 | ||
|
|
837dc39811 | ||
|
|
f1870b7e87 | ||
|
|
fcacf7dd66 | ||
|
|
82f819f336 | ||
|
|
effda54526 | ||
|
|
58804796f0 | ||
|
|
668ca800b8 | ||
|
|
a7233a594f | ||
|
|
5ebe22ddfc | ||
|
|
44c5450b28 | ||
|
|
5fd48f53de | ||
|
|
446ac14e7f | ||
|
|
b3e71e840d | ||
|
|
998514bebb | ||
|
|
94ec8884c3 | ||
|
|
1a9157fe6e | ||
|
|
5911248ab9 | ||
|
|
1c693c0263 | ||
|
|
cb54b66eda | ||
|
|
92a937ad01 | ||
|
|
ba9dce8d90 | ||
|
|
2622e1b596 | ||
|
|
b03b1a0211 | ||
|
|
3e33169a3a | ||
|
|
d8d991531a | ||
|
|
f626b9453d | ||
|
|
5067afeee9 | ||
|
|
018c6c8198 | ||
|
|
2846d79b7d | ||
|
|
783a847978 | ||
|
|
44effca702 | ||
|
|
475ef59197 | ||
|
|
3953260485 | ||
|
|
46d07d703a | ||
|
|
bd1dbe92c2 | ||
|
|
66fa745d6f | ||
|
|
37b5dca66e | ||
|
|
5db035cc3a | ||
|
|
90cf3147fd | ||
|
|
4d4f23e551 | ||
|
|
80b47c61bb | ||
|
|
57821aae6a |
@@ -486,6 +486,7 @@ Example with Chinese translation:
|
||||
## List of translations
|
||||
- [🇨🇳 Chinese tags](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/23) by @HalfMAI, using machine translation and manual correction for the most common tags (uses legacy format)
|
||||
- [🇨🇳 Chinese tags](https://github.com/sgmklp/tag-for-autocompletion-with-translation) by @sgmklp, smaller set of manual translations based on https://github.com/zcyzcy88/TagTable
|
||||
- [🇯🇵 Japanese tags](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/265) by @applemango, both machine and human translations available
|
||||
|
||||
> ### 🫵 I need your help!
|
||||
> Translations are a community effort. If you have translated a tag file or want to create one, please open a Pull Request or Issue so your link can be added here.
|
||||
|
||||
@@ -410,8 +410,9 @@ https://www.w3.org/TR/uievents-key/#named-key-attribute-value
|
||||

|
||||
|
||||
## 翻訳リスト
|
||||
- [🇨🇳 Chinese tags](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/23) by @HalfMAI, 最も一般的なタグを機械翻訳と手作業で修正(レガシーフォーマットを使用)
|
||||
- [🇨🇳 Chinese tags](https://github.com/sgmklp/tag-for-autocompletion-with-translation) by @sgmklp, [こちら](https://github.com/zcyzcy88/TagTable)をベースにして、より小さくした手動での翻訳セット。
|
||||
- [🇨🇳 中国語訳](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/23) by @HalfMAI, 最も一般的なタグを機械翻訳と手作業で修正(レガシーフォーマットを使用)
|
||||
- [🇨🇳 中国語訳](https://github.com/sgmklp/tag-for-autocompletion-with-translation) by @sgmklp, [こちら](https://github.com/zcyzcy88/TagTable)をベースにして、より小さくした手動での翻訳セット。
|
||||
- [🇯🇵 日本語訳](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/265) by @applemango, 機械翻訳と人力翻訳の両方が利用可能。
|
||||
|
||||
> ### 🫵 あなたの助けが必要です!
|
||||
> 翻訳はコミュニティの努力により支えられています。もしあなたがタグファイルを翻訳したことがある場合、または作成したい場合は、あなたの成果をここに追加できるように、Pull RequestまたはIssueを開いてください。
|
||||
|
||||
@@ -13,6 +13,12 @@
|
||||
你可以按照[以下方法](#installation)下载或拷贝文件,也可以使用[Releases](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/releases)中打包好的文件。
|
||||
|
||||
## 常见问题 & 已知缺陷:
|
||||
- 很多中国用户都报告过此扩展名和其他扩展名的 JavaScript 文件被阻止的问题。
|
||||
常见的罪魁祸首是 IDM / Internet Download Manager 浏览器插件,它似乎出于安全目的阻止了本地文件请求。
|
||||
如果您安装了 IDM,请确保在使用 webui 时禁用以下插件:
|
||||
|
||||

|
||||
|
||||
- 当`replaceUnderscores`选项开启时, 脚本只会替换Tag的一部分如果Tag包含多个单词,比如将`atago (azur lane)`修改`atago`为`taihou`并使用自动补全时.会得到 `taihou (azur lane), lane)`的结果, 因为脚本没有把后面的部分认为成同一个Tag。
|
||||
|
||||
## 演示与截图
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
var TAC_CFG = null;
|
||||
var tagBasePath = "";
|
||||
var modelKeywordPath = "";
|
||||
var tacSelfTrigger = false;
|
||||
|
||||
// Tag completion data loaded from files
|
||||
var allTags = [];
|
||||
@@ -18,6 +19,7 @@ var loras = [];
|
||||
var lycos = [];
|
||||
var modelKeywordDict = new Map();
|
||||
var chants = [];
|
||||
var styleNames = [];
|
||||
|
||||
// Selected model info for black/whitelisting
|
||||
var currentModelHash = "";
|
||||
@@ -36,6 +38,7 @@ let hideBlocked = false;
|
||||
// Tag selection for keyboard navigation
|
||||
var selectedTag = null;
|
||||
var oldSelectedTag = null;
|
||||
var resultCountBeforeNormalTags = 0;
|
||||
|
||||
// Lora keyword undo/redo history
|
||||
var textBeforeKeywordInsertion = "";
|
||||
|
||||
@@ -12,7 +12,8 @@ const ResultType = Object.freeze({
|
||||
"hypernetwork": 8,
|
||||
"lora": 9,
|
||||
"lyco": 10,
|
||||
"chant": 11
|
||||
"chant": 11,
|
||||
"styleName": 12
|
||||
});
|
||||
|
||||
// Class to hold result data and annotations to make it clearer to use
|
||||
@@ -27,6 +28,7 @@ class AutocompleteResult {
|
||||
aliases = null;
|
||||
meta = null;
|
||||
hash = null;
|
||||
sortKey = null;
|
||||
|
||||
// Constructor
|
||||
constructor(text, type) {
|
||||
|
||||
@@ -79,6 +79,13 @@ const thirdParty = {
|
||||
"[id^=script_img2img_adetailer_ad_prompt] textarea",
|
||||
"[id^=script_img2img_adetailer_ad_negative_prompt] textarea"
|
||||
]
|
||||
},
|
||||
"deepdanbooru-object-recognition": {
|
||||
"base": "#tab_deepdanboru_object_recg_tab",
|
||||
"hasIds": false,
|
||||
"selectors": [
|
||||
"Found tags",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,4 +194,4 @@ function getTextAreaIdentifier(textArea) {
|
||||
break;
|
||||
}
|
||||
return modifier;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
// Utility functions for tag autocomplete
|
||||
|
||||
// Parse the CSV file into a 2D array. Doesn't use regex, so it is very lightweight.
|
||||
// We are ignoring newlines in quote fields since we expect one-line entries and parsing would break for unclosed quotes otherwise
|
||||
function parseCSV(str) {
|
||||
var arr = [];
|
||||
var quote = false; // 'true' means we're inside a quoted field
|
||||
const arr = [];
|
||||
let quote = false; // 'true' means we're inside a quoted field
|
||||
|
||||
// Iterate over each character, keep track of current row and column (of the returned array)
|
||||
for (var row = 0, col = 0, c = 0; c < str.length; c++) {
|
||||
var cc = str[c], nc = str[c + 1]; // Current character, next character
|
||||
for (let row = 0, col = 0, c = 0; c < str.length; c++) {
|
||||
let cc = str[c], nc = str[c+1]; // Current character, next character
|
||||
arr[row] = arr[row] || []; // Create a new row if necessary
|
||||
arr[row][col] = arr[row][col] || ''; // Create a new column (start with empty string) if necessary
|
||||
|
||||
@@ -22,14 +23,12 @@ function parseCSV(str) {
|
||||
// If it's a comma and we're not in a quoted field, move on to the next column
|
||||
if (cc == ',' && !quote) { ++col; continue; }
|
||||
|
||||
// If it's a newline (CRLF) and we're not in a quoted field, skip the next character
|
||||
// and move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\r' && nc == '\n' && !quote) { ++row; col = 0; ++c; continue; }
|
||||
// If it's a newline (CRLF), skip the next character and move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\r' && nc == '\n') { ++row; col = 0; ++c; quote = false; continue; }
|
||||
|
||||
// If it's a newline (LF or CR) and we're not in a quoted field,
|
||||
// move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\n' && !quote) { ++row; col = 0; continue; }
|
||||
if (cc == '\r' && !quote) { ++row; col = 0; continue; }
|
||||
// If it's a newline (LF or CR) move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\n') { ++row; col = 0; quote = false; continue; }
|
||||
if (cc == '\r') { ++row; col = 0; quote = false; continue; }
|
||||
|
||||
// Otherwise, append the current character to the current column
|
||||
arr[row][col] += cc;
|
||||
@@ -41,7 +40,7 @@ function parseCSV(str) {
|
||||
async function readFile(filePath, json = false, cache = false) {
|
||||
if (!cache)
|
||||
filePath += `?${new Date().getTime()}`;
|
||||
|
||||
|
||||
let response = await fetch(`file=${filePath}`);
|
||||
|
||||
if (response.status != 200) {
|
||||
@@ -81,6 +80,17 @@ async function fetchAPI(url, json = true, cache = false) {
|
||||
return await response.text();
|
||||
}
|
||||
|
||||
async function postAPI(url, body) {
|
||||
let response = await fetch(url, { method: "POST", body: body });
|
||||
|
||||
if (response.status != 200) {
|
||||
console.error(`Error posting to API endpoint "${url}": ` + response.status, response.statusText);
|
||||
return null;
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
// Extra network preview thumbnails
|
||||
async function getExtraNetworkPreviewURL(filename, type) {
|
||||
const previewJSON = await fetchAPI(`tacapi/v1/thumb-preview/${filename}?type=${type}`, true, true);
|
||||
@@ -98,6 +108,29 @@ async function getExtraNetworkPreviewURL(filename, type) {
|
||||
}
|
||||
}
|
||||
|
||||
lastStyleRefresh = 0;
|
||||
// Refresh style file if needed
|
||||
async function refreshStyleNamesIfChanged() {
|
||||
// Only refresh once per second
|
||||
currentTimestamp = new Date().getTime();
|
||||
if (currentTimestamp - lastStyleRefresh < 1000) return;
|
||||
lastStyleRefresh = currentTimestamp;
|
||||
|
||||
const response = await fetch(`tacapi/v1/refresh-styles-if-changed?${new Date().getTime()}`)
|
||||
if (response.status === 304) {
|
||||
// Not modified
|
||||
} else if (response.status === 200) {
|
||||
// Reload
|
||||
QUEUE_FILE_LOAD.forEach(async fn => {
|
||||
if (fn.toString().includes("styleNames"))
|
||||
await fn.call(null, true);
|
||||
})
|
||||
} else {
|
||||
// Error
|
||||
console.error(`Error refreshing styles.txt: ` + response.status, response.statusText);
|
||||
}
|
||||
}
|
||||
|
||||
// Debounce function to prevent spamming the autocomplete function
|
||||
var dbTimeOut;
|
||||
const debounce = (func, wait = 300) => {
|
||||
@@ -146,7 +179,7 @@ function flatten(obj, roots = [], sep = ".") {
|
||||
{}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Sliding window function to get possible combination groups of an array
|
||||
function toNgrams(inputArray, size) {
|
||||
@@ -200,6 +233,42 @@ function observeElement(element, property, callback, delay = 0) {
|
||||
}
|
||||
}
|
||||
|
||||
// Sort functions
|
||||
function getSortFunction() {
|
||||
let criterion = TAC_CFG.modelSortOrder || "Name";
|
||||
|
||||
const textSort = (a, b, reverse = false) => {
|
||||
const textHolderA = a.type === ResultType.chant ? a.aliases : a.text;
|
||||
const textHolderB = b.type === ResultType.chant ? b.aliases : b.text;
|
||||
|
||||
const aKey = a.sortKey || textHolderA;
|
||||
const bKey = b.sortKey || textHolderB;
|
||||
return reverse ? bKey.localeCompare(aKey) : aKey.localeCompare(bKey);
|
||||
}
|
||||
const numericSort = (a, b, reverse = false) => {
|
||||
const noKey = reverse ? "-1" : Number.MAX_SAFE_INTEGER;
|
||||
let aParsed = parseFloat(a.sortKey || noKey);
|
||||
let bParsed = parseFloat(b.sortKey || noKey);
|
||||
|
||||
if (aParsed === bParsed) {
|
||||
return textSort(a, b, false);
|
||||
}
|
||||
|
||||
return reverse ? bParsed - aParsed : aParsed - bParsed;
|
||||
}
|
||||
|
||||
return (a, b) => {
|
||||
switch (criterion) {
|
||||
case "Date Modified (newest first)":
|
||||
return numericSort(a, b, true);
|
||||
case "Date Modified (oldest first)":
|
||||
return numericSort(a, b, false);
|
||||
default:
|
||||
return textSort(a, b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Queue calling function to process global queues
|
||||
async function processQueue(queue, context, ...args) {
|
||||
for (let i = 0; i < queue.length; i++) {
|
||||
|
||||
@@ -41,7 +41,7 @@ async function load() {
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.chant) {
|
||||
return text.replace(/^.*?: /g, "");
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const EMB_REGEX = /<(?!l:|h:|c:)[^,> ]*>?/g;
|
||||
const EMB_TRIGGER = () => TAC_CFG.useEmbeddings && tagword.match(EMB_REGEX);
|
||||
const EMB_TRIGGER = () => TAC_CFG.useEmbeddings && (tagword.match(EMB_REGEX) || TAC_CFG.includeEmbeddingsInNormalResults);
|
||||
|
||||
class EmbeddingParser extends BaseTagParser {
|
||||
parse() {
|
||||
@@ -11,12 +11,15 @@ class EmbeddingParser extends BaseTagParser {
|
||||
if (searchTerm.startsWith("v1") || searchTerm.startsWith("v2")) {
|
||||
versionString = searchTerm.slice(0, 2);
|
||||
searchTerm = searchTerm.slice(2);
|
||||
} else if (searchTerm.startsWith("vxl")) {
|
||||
versionString = searchTerm.slice(0, 3);
|
||||
searchTerm = searchTerm.slice(3);
|
||||
}
|
||||
|
||||
let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
|
||||
if (versionString)
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[1] && x[1] === versionString); // Filter by tagword
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[2] && x[2].toLowerCase() === versionString.toLowerCase()); // Filter by tagword
|
||||
else
|
||||
tempResults = embeddings.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
@@ -27,7 +30,8 @@ class EmbeddingParser extends BaseTagParser {
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.embedding)
|
||||
result.meta = t[1] + " Embedding";
|
||||
result.sortKey = t[1];
|
||||
result.meta = t[2] + " Embedding";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -38,9 +42,9 @@ class EmbeddingParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (embeddings.length === 0) {
|
||||
try {
|
||||
embeddings = (await readFile(`${tagBasePath}/temp/emb.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().split(",")); // Split into name, version type pairs
|
||||
embeddings = (await loadCSV(`${tagBasePath}/temp/emb.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0].trim(), x[1], x[2]]); // Return name, sortKey, hash tuples
|
||||
} catch (e) {
|
||||
console.error("Error loading embeddings.txt: " + e);
|
||||
}
|
||||
@@ -49,7 +53,7 @@ async function load() {
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.embedding) {
|
||||
return text.replace(/^.*?: /g, "");
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ class HypernetParser extends BaseTagParser {
|
||||
if (tagword !== "<" && tagword !== "<h:" && tagword !== "<hypernet:") {
|
||||
let searchTerm = tagword.replace("<hypernet:", "").replace("<h:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x)); // Filter by tagword
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = hypernetworks;
|
||||
}
|
||||
@@ -16,8 +16,9 @@ class HypernetParser extends BaseTagParser {
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.hypernetwork)
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.hypernetwork)
|
||||
result.meta = "Hypernetwork";
|
||||
result.sortKey = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -28,9 +29,9 @@ class HypernetParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (hypernetworks.length === 0) {
|
||||
try {
|
||||
hypernetworks = (await readFile(`${tagBasePath}/temp/hyp.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) //Remove empty lines
|
||||
.map(x => x.trim()); // Remove carriage returns and padding if it exists
|
||||
hypernetworks = (await loadCSV(`${tagBasePath}/temp/hyp.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Remove carriage returns and padding if it exists
|
||||
} catch (e) {
|
||||
console.error("Error loading hypernetworks.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@ class LoraParser extends BaseTagParser {
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lora)
|
||||
result.meta = "Lora";
|
||||
result.hash = t[1];
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -36,7 +37,7 @@ async function load() {
|
||||
try {
|
||||
loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lora.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ class LycoParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show lyco
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lyco:") {
|
||||
let searchTerm = tagword.replace("<lyco:", "").replace("<l:", "").replace("<", "");
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lyco:" && tagword !== "<lora:") {
|
||||
let searchTerm = tagword.replace("<lyco:", "").replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
tempResults = lycos.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
@@ -23,7 +23,8 @@ class LycoParser extends BaseTagParser {
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lyco)
|
||||
result.meta = "Lyco";
|
||||
result.hash = t[1];
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -36,7 +37,7 @@ async function load() {
|
||||
try {
|
||||
lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lyco.txt: " + e);
|
||||
}
|
||||
@@ -51,7 +52,8 @@ async function sanitize(tagType, text) {
|
||||
multiplier = info["preferred weight"];
|
||||
}
|
||||
|
||||
return `<lyco:${text}:${multiplier}>`;
|
||||
let prefix = TAC_CFG.useLoraPrefixForLycos ? "lora" : "lyco";
|
||||
return `<${prefix}:${text}:${multiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ async function load() {
|
||||
// Add to the dict
|
||||
csv_lines.forEach(parts => {
|
||||
const hash = parts[0];
|
||||
const keywords = parts[1].replaceAll("| ", ", ").replaceAll("|", ", ").trim();
|
||||
const keywords = parts[1]?.replaceAll("| ", ", ")?.replaceAll("|", ", ")?.trim();
|
||||
const lastSepIndex = parts[2]?.lastIndexOf("/") + 1 || parts[2]?.lastIndexOf("\\") + 1 || 0;
|
||||
const name = parts[2]?.substring(lastSepIndex).trim() || "none"
|
||||
|
||||
|
||||
67
javascript/ext_styles.js
Normal file
67
javascript/ext_styles.js
Normal file
@@ -0,0 +1,67 @@
|
||||
const STYLE_REGEX = /(\$(\d*)\(?)[^$|\[\],\s]*\)?/;
|
||||
const STYLE_TRIGGER = () => TAC_CFG.useStyleVars && tagword.match(STYLE_REGEX);
|
||||
|
||||
var lastStyleVarIndex = "";
|
||||
|
||||
class StyleParser extends BaseTagParser {
|
||||
async parse() {
|
||||
// Refresh if needed
|
||||
await refreshStyleNamesIfChanged();
|
||||
|
||||
// Show styles
|
||||
let tempResults = [];
|
||||
let matchGroups = tagword.match(STYLE_REGEX);
|
||||
|
||||
// Save index to insert again later or clear last one
|
||||
lastStyleVarIndex = matchGroups[2] ? matchGroups[2] : "";
|
||||
|
||||
if (tagword !== matchGroups[1]) {
|
||||
let searchTerm = tagword.replace(matchGroups[1], "");
|
||||
|
||||
let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
tempResults = styleNames.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = styleNames;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.styleName)
|
||||
result.meta = "Style";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load(force = false) {
|
||||
if (styleNames.length === 0 || force) {
|
||||
try {
|
||||
styleNames = (await loadCSV(`${tagBasePath}/temp/styles.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.filter(x => x[0] !== "None") // Remove "None" style
|
||||
.map(x => [x[0].trim()]); // Trim name
|
||||
} catch (e) {
|
||||
console.error("Error loading styles.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.styleName) {
|
||||
if (text.includes(" ")) {
|
||||
return `$${lastStyleVarIndex}(${text})`;
|
||||
} else {
|
||||
return`$${lastStyleVarIndex}${text}`
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
PARSERS.push(new StyleParser(STYLE_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
@@ -7,7 +7,7 @@ class UmiParser extends BaseTagParser {
|
||||
parse(textArea, prompt) {
|
||||
// We are in a UMI yaml tag definition, parse further
|
||||
let umiSubPrompts = [...prompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
|
||||
let umiTags = [];
|
||||
let umiTagsWithOperators = []
|
||||
|
||||
@@ -15,7 +15,7 @@ class UmiParser extends BaseTagParser {
|
||||
|
||||
umiSubPrompts.forEach(umiSubPrompt => {
|
||||
umiTags = umiTags.concat([...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map(x => x[1].toLowerCase()));
|
||||
|
||||
|
||||
const start = umiSubPrompt.index;
|
||||
const end = umiSubPrompt.index + umiSubPrompt[0].length;
|
||||
if (textArea.selectionStart >= start && textArea.selectionStart <= end) {
|
||||
@@ -113,7 +113,7 @@ class UmiParser extends BaseTagParser {
|
||||
|| !matches.all.includes(x[0])
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
if (umiTags.length > 0) {
|
||||
// Get difference for subprompt
|
||||
let tagCountChange = umiTags.length - umiPreviousTags.length;
|
||||
@@ -129,7 +129,7 @@ class UmiParser extends BaseTagParser {
|
||||
return;
|
||||
}
|
||||
|
||||
let umiTagword = diff[0] || '';
|
||||
let umiTagword = tagCountChange < 0 ? '' : diff[0] || '';
|
||||
let tempResults = [];
|
||||
if (umiTagword && umiTagword.length > 0) {
|
||||
umiTagword = umiTagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
@@ -149,10 +149,11 @@ class UmiParser extends BaseTagParser {
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
} else if (showAll) {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
filteredWildcardsSorted.forEach(t => {
|
||||
@@ -160,14 +161,16 @@ class UmiParser extends BaseTagParser {
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
} else {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
filteredWildcardsSorted.forEach(t => {
|
||||
@@ -178,12 +181,14 @@ class UmiParser extends BaseTagParser {
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function updateUmiTags( tagType, sanitizedText, newPrompt, textArea) {
|
||||
function updateUmiTags(tagType, sanitizedText, newPrompt, textArea) {
|
||||
// If it was a umi wildcard, also update the umiPreviousTags
|
||||
if (tagType === ResultType.umiWildcard && originalTagword.length > 0) {
|
||||
let umiSubPrompts = [...newPrompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
@@ -19,7 +19,7 @@ class WildcardParser extends BaseTagParser {
|
||||
let wcPairs = wcFound || wildcardExtFiles.filter(x => x[1].toLowerCase() === wcFile);
|
||||
|
||||
if (!wcPairs) return [];
|
||||
|
||||
|
||||
let wildcards = [];
|
||||
for (let i = 0; i < wcPairs.length; i++) {
|
||||
const basePath = wcPairs[i][0];
|
||||
@@ -85,13 +85,14 @@ class WildcardFileParser extends BaseTagParser {
|
||||
} else {
|
||||
result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile);
|
||||
result.meta = "Wildcard file";
|
||||
result.sortKey = wcFile[2].trim();
|
||||
}
|
||||
|
||||
|
||||
finalResults.push(result);
|
||||
alreadyAdded.set(wcFile[1], true);
|
||||
});
|
||||
|
||||
finalResults.sort((a, b) => a.text.localeCompare(b.text));
|
||||
finalResults.sort(getSortFunction());
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
@@ -100,17 +101,19 @@ class WildcardFileParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (wildcardFiles.length === 0 && wildcardExtFiles.length === 0) {
|
||||
try {
|
||||
let wcFileArr = (await readFile(`${tagBasePath}/temp/wc.txt`)).split("\n");
|
||||
let wcBasePath = wcFileArr[0].trim(); // First line should be the base path
|
||||
wildcardFiles = wcFileArr.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => [wcBasePath, x.trim().replace(".txt", "")]); // Remove file extension & newlines
|
||||
let wcFileArr = await loadCSV(`${tagBasePath}/temp/wc.txt`);
|
||||
if (wcFileArr && wcFileArr.length > 0) {
|
||||
let wcBasePath = wcFileArr[0][0].trim(); // First line should be the base path
|
||||
wildcardFiles = wcFileArr.slice(1)
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [wcBasePath, x[0]?.trim().replace(".txt", ""), x[1]]); // Remove file extension & newlines
|
||||
}
|
||||
|
||||
// To support multiple sources, we need to separate them using the provided "-----" strings
|
||||
let wcExtFileArr = (await readFile(`${tagBasePath}/temp/wce.txt`)).split("\n");
|
||||
let wcExtFileArr = await loadCSV(`${tagBasePath}/temp/wce.txt`);
|
||||
let splitIndices = [];
|
||||
for (let index = 0; index < wcExtFileArr.length; index++) {
|
||||
if (wcExtFileArr[index].trim() === "-----") {
|
||||
if (wcExtFileArr[index][0].trim() === "-----") {
|
||||
splitIndices.push(index);
|
||||
}
|
||||
}
|
||||
@@ -121,18 +124,18 @@ async function load() {
|
||||
let end = splitIndices[i];
|
||||
|
||||
let wcExtFile = wcExtFileArr.slice(start, end);
|
||||
let base = wcExtFile[0].trim() + "/";
|
||||
wcExtFile = wcExtFile.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().replace(base, "").replace(".txt", "")); // Remove file extension & newlines;
|
||||
|
||||
wcExtFile = wcExtFile.map(x => [base, x]);
|
||||
wildcardExtFiles.push(...wcExtFile);
|
||||
if (wcExtFile && wcExtFile.length > 0) {
|
||||
let base = wcExtFile[0][0].trim() + "/";
|
||||
wcExtFile = wcExtFile.slice(1)
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [base, x[0]?.trim().replace(base, "").replace(".txt", ""), x[1]]);
|
||||
wildcardExtFiles.push(...wcExtFile);
|
||||
}
|
||||
}
|
||||
|
||||
// Load the yaml wildcard json file and append it as a wildcard file, appending each key as a path component until we reach the end
|
||||
yamlWildcards = await readFile(`${tagBasePath}/temp/wc_yaml.json`, true);
|
||||
|
||||
|
||||
// Append each key as a path component until we reach a leaf
|
||||
Object.keys(yamlWildcards).forEach(file => {
|
||||
const flattened = flatten(yamlWildcards[file], [], "/");
|
||||
@@ -150,7 +153,7 @@ function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.wildcardFile || tagType === ResultType.yamlWildcard) {
|
||||
return `__${text}__`;
|
||||
} else if (tagType === ResultType.wildcardTag) {
|
||||
return text.replace(/^.*?: /g, "");
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const styleColors = {
|
||||
"--results-neutral-text": ["#e0e0e0","black"],
|
||||
"--results-bg": ["#0b0f19", "#ffffff"],
|
||||
"--results-border-color": ["#4b5563", "#e5e7eb"],
|
||||
"--results-border-width": ["1px", "1.5px"],
|
||||
@@ -35,6 +36,7 @@ const autocompleteCSS = `
|
||||
.autocompleteResults {
|
||||
background-color: var(--results-bg) !important;
|
||||
border: var(--results-border-width) solid var(--results-border-color) !important;
|
||||
color: var(--results-neutral-text) !important;
|
||||
border-radius: 12px !important;
|
||||
height: fit-content;
|
||||
flex-basis: fit-content;
|
||||
@@ -211,11 +213,15 @@ async function syncOptions() {
|
||||
useWildcards: opts["tac_useWildcards"],
|
||||
sortWildcardResults: opts["tac_sortWildcardResults"],
|
||||
useEmbeddings: opts["tac_useEmbeddings"],
|
||||
includeEmbeddingsInNormalResults: opts["tac_includeEmbeddingsInNormalResults"],
|
||||
useHypernetworks: opts["tac_useHypernetworks"],
|
||||
useLoras: opts["tac_useLoras"],
|
||||
useLycos: opts["tac_useLycos"],
|
||||
useLoraPrefixForLycos: opts["tac_useLoraPrefixForLycos"],
|
||||
showWikiLinks: opts["tac_showWikiLinks"],
|
||||
showExtraNetworkPreviews: opts["tac_showExtraNetworkPreviews"],
|
||||
modelSortOrder: opts["tac_modelSortOrder"],
|
||||
useStyleVars: opts["tac_useStyleVars"],
|
||||
// Insertion related settings
|
||||
replaceUnderscores: opts["tac_replaceUnderscores"],
|
||||
escapeParentheses: opts["tac_escapeParentheses"],
|
||||
@@ -224,6 +230,7 @@ async function syncOptions() {
|
||||
alwaysSpaceAtEnd: opts["tac_alwaysSpaceAtEnd"],
|
||||
wildcardCompletionMode: opts["tac_wildcardCompletionMode"],
|
||||
modelKeywordCompletion: opts["tac_modelKeywordCompletion"],
|
||||
modelKeywordLocation: opts["tac_modelKeywordLocation"],
|
||||
// Alias settings
|
||||
alias: {
|
||||
searchByAlias: opts["tac_alias.searchByAlias"],
|
||||
@@ -267,6 +274,17 @@ async function syncOptions() {
|
||||
await loadTags(newCFG);
|
||||
}
|
||||
|
||||
// Refresh temp files if model sort order changed
|
||||
// Contrary to the other loads, this one shouldn't happen on a first time load
|
||||
if (TAC_CFG && newCFG.modelSortOrder !== TAC_CFG.modelSortOrder) {
|
||||
const dropdown = gradioApp().querySelector("#setting_tac_modelSortOrder");
|
||||
dropdown.style.opacity = 0.5;
|
||||
dropdown.style.pointerEvents = "none";
|
||||
await refreshTacTempFiles(true);
|
||||
dropdown.style.opacity = null;
|
||||
dropdown.style.pointerEvents = null;
|
||||
}
|
||||
|
||||
// Update CSS if maxResults changed
|
||||
if (TAC_CFG && newCFG.maxResults !== TAC_CFG.maxResults) {
|
||||
gradioApp().querySelectorAll(".autocompleteResults").forEach(r => {
|
||||
@@ -330,7 +348,7 @@ function showResults(textArea) {
|
||||
if (TAC_CFG.slidingPopup) {
|
||||
let caretPosition = getCaretCoordinates(textArea, textArea.selectionEnd).left;
|
||||
let offset = Math.min(textArea.offsetLeft - textArea.scrollLeft + caretPosition, textArea.offsetWidth - parentDiv.offsetWidth);
|
||||
|
||||
|
||||
parentDiv.style.left = `${offset}px`;
|
||||
} else {
|
||||
if (parentDiv.style.left)
|
||||
@@ -346,9 +364,9 @@ function showResults(textArea) {
|
||||
function hideResults(textArea) {
|
||||
let textAreaId = getTextAreaIdentifier(textArea);
|
||||
let resultsDiv = gradioApp().querySelector('.autocompleteParent' + textAreaId);
|
||||
|
||||
|
||||
if (!resultsDiv) return;
|
||||
|
||||
|
||||
resultsDiv.style.display = "none";
|
||||
selectedTag = null;
|
||||
}
|
||||
@@ -358,12 +376,12 @@ function isEnabled() {
|
||||
if (TAC_CFG.activeIn.global) {
|
||||
// Skip check if the current model was not correctly detected, since it could wrongly disable the script otherwise
|
||||
if (!currentModelName || !currentModelHash) return true;
|
||||
|
||||
|
||||
let modelList = TAC_CFG.activeIn.modelList
|
||||
.split(",")
|
||||
.map(x => x.trim())
|
||||
.filter(x => x.length > 0);
|
||||
|
||||
|
||||
let shortHash = currentModelHash.substring(0, 10);
|
||||
let modelNameWithoutHash = currentModelName.replace(/\[.*\]$/g, "").trim();
|
||||
if (TAC_CFG.activeIn.modelListMode.toLowerCase() === "blacklist") {
|
||||
@@ -382,9 +400,10 @@ function isEnabled() {
|
||||
const WEIGHT_REGEX = /[([]([^()[\]:|]+)(?::(?:\d+(?:\.\d+)?|\.\d+))?[)\]]/g;
|
||||
const POINTY_REGEX = /<[^\s,<](?:[^\t\n\r,<>]*>|[^\t\n\r,> ]*)/g;
|
||||
const COMPLETED_WILDCARD_REGEX = /__[^\s,_][^\t\n\r,_]*[^\s,_]__[^\s,_]*/g;
|
||||
const NORMAL_TAG_REGEX = /[^\s,|<>)\]]+|</g;
|
||||
const STYLE_VAR_REGEX = /\$\(?[^$|\[\],\s]*\)?/g;
|
||||
const NORMAL_TAG_REGEX = /[^\s,|<>\[\]:]+_\([^\s,|<>\[\]:]*\)?|[^\s,|<>():\[\]]+|</g;
|
||||
const RUBY_TAG_REGEX = /[\w\d<][\w\d' \-?!/$%]{2,}>?/g;
|
||||
const TAG_REGEX = new RegExp(`${POINTY_REGEX.source}|${COMPLETED_WILDCARD_REGEX.source}|${NORMAL_TAG_REGEX.source}`, "g");
|
||||
const TAG_REGEX = new RegExp(`${POINTY_REGEX.source}|${COMPLETED_WILDCARD_REGEX.source}|${STYLE_VAR_REGEX.source}|${NORMAL_TAG_REGEX.source}`, "g");
|
||||
|
||||
// On click, insert the tag into the prompt textbox with respect to the cursor position
|
||||
async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithoutChoice = false) {
|
||||
@@ -475,6 +494,10 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
optionalSeparator = TAC_CFG.extraNetworksSeparator || " ";
|
||||
}
|
||||
|
||||
// Escape $ signs since they are special chars for the replace function
|
||||
// We need four since we're also escaping them in replaceAll in the first place
|
||||
sanitizedText = sanitizedText.replaceAll("$", "$$$$");
|
||||
|
||||
// Replace partial tag word with new text, add comma if needed
|
||||
let insert = surrounding.replace(match, sanitizedText + optionalSeparator);
|
||||
|
||||
@@ -493,11 +516,19 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
keywords = info["activation text"];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!keywords && modelKeywordPath.length > 0 && result.hash && result.hash !== "NOFILE" && result.hash.length > 0) {
|
||||
let nameDict = modelKeywordDict.get(result.hash);
|
||||
let names = [result.text + ".safetensors", result.text + ".pt", result.text + ".ckpt"];
|
||||
|
||||
// No match, try to find a sha256 match from the cache file
|
||||
if (!nameDict) {
|
||||
const sha256 = await fetchAPI(`/tacapi/v1/lora-cached-hash/${result.text}`)
|
||||
if (sha256) {
|
||||
nameDict = modelKeywordDict.get(sha256);
|
||||
}
|
||||
}
|
||||
|
||||
if (nameDict) {
|
||||
let found = false;
|
||||
names.forEach(name => {
|
||||
@@ -506,7 +537,7 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
keywords = nameDict.get(name);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
if (!found)
|
||||
keywords = nameDict.get("none");
|
||||
}
|
||||
@@ -514,22 +545,33 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
|
||||
if (keywords && keywords.length > 0) {
|
||||
textBeforeKeywordInsertion = newPrompt;
|
||||
|
||||
newPrompt = `${keywords}, ${newPrompt}`; // Insert keywords
|
||||
|
||||
|
||||
if (TAC_CFG.modelKeywordLocation === "Start of prompt")
|
||||
newPrompt = `${keywords}, ${newPrompt}`; // Insert keywords
|
||||
else if (TAC_CFG.modelKeywordLocation === "End of prompt")
|
||||
newPrompt = `${newPrompt}, ${keywords}`; // Insert keywords
|
||||
else {
|
||||
let keywordStart = prompt[editStart - 1] === " " ? editStart - 1 : editStart;
|
||||
newPrompt = prompt.substring(0, keywordStart) + `, ${keywords} ${insert}` + prompt.substring(editEnd);
|
||||
}
|
||||
|
||||
|
||||
textAfterKeywordInsertion = newPrompt;
|
||||
keywordInsertionUndone = false;
|
||||
setTimeout(() => lastEditWasKeywordInsertion = true, 200)
|
||||
|
||||
|
||||
keywordsLength = keywords.length + 2; // +2 for the comma and space
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Insert into prompt textbox and reposition cursor
|
||||
textArea.value = newPrompt;
|
||||
textArea.selectionStart = afterInsertCursorPos + optionalSeparator.length + keywordsLength;
|
||||
textArea.selectionEnd = textArea.selectionStart
|
||||
|
||||
// Set self trigger flag to show wildcard contents after the filename was inserted
|
||||
if ([ResultType.wildcardFile, ResultType.yamlWildcard, ResultType.umiWildcard].includes(result.type))
|
||||
tacSelfTrigger = true;
|
||||
// Since we've modified a Gradio Textbox component manually, we need to simulate an `input` DOM event to ensure it's propagated back to python.
|
||||
// Uses a built-in method from the webui's ui.js which also already accounts for event target
|
||||
updateInput(textArea);
|
||||
@@ -656,7 +698,9 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
if (linkPart.includes("[")) {
|
||||
linkPart = linkPart.split("[")[0]
|
||||
}
|
||||
|
||||
|
||||
linkPart = encodeURIComponent(linkPart);
|
||||
|
||||
// Set link based on selected file
|
||||
let tagFileNameLower = tagFileName.toLowerCase();
|
||||
if (tagFileNameLower.startsWith("danbooru")) {
|
||||
@@ -664,7 +708,7 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
} else if (tagFileNameLower.startsWith("e621")) {
|
||||
wikiLink.href = `https://e621.net/wiki_pages/${linkPart}`;
|
||||
}
|
||||
|
||||
|
||||
wikiLink.target = "_blank";
|
||||
flexDiv.appendChild(wikiLink);
|
||||
}
|
||||
@@ -717,7 +761,7 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
else if (result.meta.startsWith("v2"))
|
||||
itemText.classList.add("acEmbeddingV2");
|
||||
}
|
||||
|
||||
|
||||
flexDiv.appendChild(metaDiv);
|
||||
}
|
||||
|
||||
@@ -782,7 +826,7 @@ async function updateSelectionStyle(textArea, newIndex, oldIndex) {
|
||||
}
|
||||
|
||||
let img = previewDiv.querySelector("img");
|
||||
|
||||
|
||||
let url = await getExtraNetworkPreviewURL(selectedFilename, shorthandType);
|
||||
if (url) {
|
||||
img.src = url;
|
||||
@@ -808,7 +852,7 @@ function updateRuby(textArea, prompt) {
|
||||
ruby.setAttribute("class", `acRuby${typeClass} notranslate`);
|
||||
textArea.parentNode.appendChild(ruby);
|
||||
}
|
||||
|
||||
|
||||
ruby.innerText = prompt;
|
||||
|
||||
let bracketEscapedPrompt = prompt.replaceAll("\\(", "$").replaceAll("\\)", "%");
|
||||
@@ -826,9 +870,9 @@ function updateRuby(textArea, prompt) {
|
||||
.replaceAll(" ", "_")
|
||||
.replaceAll("\\(", "(")
|
||||
.replaceAll("\\)", ")");
|
||||
|
||||
|
||||
const translation = translations?.get(tag) || translations?.get(unsanitizedTag);
|
||||
|
||||
|
||||
let escapedTag = escapeRegExp(tag);
|
||||
return { tag, escapedTag, translation };
|
||||
}
|
||||
@@ -844,14 +888,14 @@ function updateRuby(textArea, prompt) {
|
||||
// First try to find direct matches
|
||||
[...rubyTags].forEach(tag => {
|
||||
let tuple = prepareTag(tag);
|
||||
|
||||
|
||||
if (tuple.translation) {
|
||||
html = replaceOccurences(html, tuple);
|
||||
} else {
|
||||
let subTags = tuple.tag.split(" ").filter(x => x.trim().length > 0);
|
||||
// Return if there is only one word
|
||||
if (subTags.length === 1) return;
|
||||
|
||||
|
||||
let subHtml = tag.replaceAll("$", "\\(").replaceAll("%", "\\)");
|
||||
|
||||
let translateNgram = (windows) => {
|
||||
@@ -866,14 +910,14 @@ function updateRuby(textArea, prompt) {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
// Perform n-gram sliding window search
|
||||
translateNgram(toNgrams(subTags, 3));
|
||||
translateNgram(toNgrams(subTags, 2));
|
||||
translateNgram(toNgrams(subTags, 1));
|
||||
|
||||
let escapedTag = escapeRegExp(tuple.tag);
|
||||
|
||||
|
||||
let searchRegex = new RegExp(`(?<!<ruby>)(?:\\b)${escapedTag}(?:\\b|$|(?=[,|: \\t\\n\\r]))(?!<rt>)`, "g");
|
||||
html = html.replaceAll(searchRegex, subHtml);
|
||||
}
|
||||
@@ -910,6 +954,7 @@ function checkKeywordInsertionUndo(textArea, event) {
|
||||
if (lastEditWasKeywordInsertion && !keywordInsertionUndone) {
|
||||
keywordInsertionUndone = true;
|
||||
textArea.value = textBeforeKeywordInsertion;
|
||||
tacSelfTrigger = true;
|
||||
updateInput(textArea);
|
||||
}
|
||||
break;
|
||||
@@ -917,6 +962,7 @@ function checkKeywordInsertionUndo(textArea, event) {
|
||||
if (lastEditWasKeywordInsertion && keywordInsertionUndone) {
|
||||
keywordInsertionUndone = false;
|
||||
textArea.value = textAfterKeywordInsertion;
|
||||
tacSelfTrigger = true;
|
||||
updateInput(textArea);
|
||||
}
|
||||
case undefined:
|
||||
@@ -951,7 +997,7 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
.map(match => match[1]);
|
||||
let tags = prompt.match(TAG_REGEX)
|
||||
if (weightedTags !== null && tags !== null) {
|
||||
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted) && !tag.startsWith("<[")))
|
||||
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted) && !tag.startsWith("<[") && !tag.startsWith("$(")))
|
||||
.concat(weightedTags);
|
||||
}
|
||||
|
||||
@@ -985,46 +1031,46 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
}
|
||||
|
||||
results = [];
|
||||
resultCountBeforeNormalTags = 0;
|
||||
tagword = tagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
|
||||
// Process all parsers
|
||||
let resultCandidates = await processParsers(textArea, prompt);
|
||||
let resultCandidates = (await processParsers(textArea, prompt))?.filter(x => x.length > 0);
|
||||
// If one ore more result candidates match, use their results
|
||||
if (resultCandidates && resultCandidates.length > 0) {
|
||||
// Flatten our candidate(s)
|
||||
results = resultCandidates.flat();
|
||||
// If there was more than one candidate, sort the results by text to mix them
|
||||
// instead of having them added in the order of the parsers
|
||||
let shouldSort = resultCandidates.length > 1;
|
||||
if (shouldSort) {
|
||||
results = results.sort((a, b) => {
|
||||
let sortByA = a.type === ResultType.chant ? a.aliases : a.text;
|
||||
let sortByB = b.type === ResultType.chant ? b.aliases : b.text;
|
||||
return sortByA.localeCompare(sortByB);
|
||||
});
|
||||
// Sort results, but not if it's umi tags since they are sorted by count
|
||||
if (!(resultCandidates.length === 1 && results[0].type === ResultType.umiWildcard))
|
||||
results = results.sort(getSortFunction());
|
||||
|
||||
// Since some tags are kaomoji, we have to add the normal results in some cases
|
||||
if (tagword.startsWith("<") || tagword.startsWith("*<")) {
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
if (tagword.startsWith("*")) {
|
||||
tagword = tagword.slice(1);
|
||||
searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i');
|
||||
} else {
|
||||
searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i');
|
||||
}
|
||||
let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults);
|
||||
|
||||
genericResults.forEach(g => {
|
||||
let result = new AutocompleteResult(g[0].trim(), ResultType.tag)
|
||||
result.category = g[1];
|
||||
result.count = g[2];
|
||||
result.aliases = g[3];
|
||||
results.push(result);
|
||||
});
|
||||
// Since some tags are kaomoji, we have to add the normal results in some cases
|
||||
if (tagword.startsWith("<") || tagword.startsWith("*<")) {
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
if (tagword.startsWith("*")) {
|
||||
tagword = tagword.slice(1);
|
||||
searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i');
|
||||
} else {
|
||||
searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i');
|
||||
}
|
||||
let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults);
|
||||
|
||||
genericResults.forEach(g => {
|
||||
let result = new AutocompleteResult(g[0].trim(), ResultType.tag)
|
||||
result.category = g[1];
|
||||
result.count = g[2];
|
||||
result.aliases = g[3];
|
||||
results.push(result);
|
||||
});
|
||||
}
|
||||
} else { // Else search the normal tag list
|
||||
}
|
||||
// Else search the normal tag list
|
||||
if (!resultCandidates || resultCandidates.length === 0
|
||||
|| (TAC_CFG.includeEmbeddingsInNormalResults && !(tagword.startsWith("<") || tagword.startsWith("*<")))
|
||||
) {
|
||||
resultCountBeforeNormalTags = results.length;
|
||||
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
if (tagword.startsWith("*")) {
|
||||
@@ -1039,7 +1085,7 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
let aliasFilter = (x) => x[3] && x[3].toLowerCase().search(searchRegex) > -1;
|
||||
let translationFilter = (x) => (translations.has(x[0]) && translations.get(x[0]).toLowerCase().search(searchRegex) > -1)
|
||||
|| x[3] && x[3].split(",").some(y => translations.has(y) && translations.get(y).toLowerCase().search(searchRegex) > -1);
|
||||
|
||||
|
||||
let fil;
|
||||
if (TAC_CFG.alias.searchByAlias && TAC_CFG.translation.searchByTranslation)
|
||||
fil = (x) => baseFilter(x) || aliasFilter(x) || translationFilter(x);
|
||||
@@ -1077,10 +1123,10 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
results = results.concat(extraResults);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Slice if the user has set a max result count
|
||||
if (!TAC_CFG.showAllResults) {
|
||||
results = results.slice(0, TAC_CFG.maxResults);
|
||||
results = results.slice(0, TAC_CFG.maxResults + resultCountBeforeNormalTags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1098,7 +1144,7 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
function navigateInList(textArea, event) {
|
||||
// Return if the function is deactivated in the UI or the current model is excluded due to white/blacklist settings
|
||||
if (!isEnabled()) return;
|
||||
|
||||
|
||||
let keys = TAC_CFG.keymap;
|
||||
|
||||
// Close window if Home or End is pressed while not a keybinding, since it would break completion on leaving the original tag
|
||||
@@ -1148,10 +1194,25 @@ function navigateInList(textArea, event) {
|
||||
}
|
||||
break;
|
||||
case keys["JumpToStart"]:
|
||||
selectedTag = 0;
|
||||
if (TAC_CFG.includeEmbeddingsInNormalResults &&
|
||||
selectedTag > resultCountBeforeNormalTags &&
|
||||
resultCountBeforeNormalTags > 0
|
||||
) {
|
||||
selectedTag = resultCountBeforeNormalTags;
|
||||
} else {
|
||||
selectedTag = 0;
|
||||
}
|
||||
break;
|
||||
case keys["JumpToEnd"]:
|
||||
selectedTag = resultCount - 1;
|
||||
// Jump to the end of the list, or the end of embeddings if they are included in the normal results
|
||||
if (TAC_CFG.includeEmbeddingsInNormalResults &&
|
||||
selectedTag < resultCountBeforeNormalTags &&
|
||||
resultCountBeforeNormalTags > 0
|
||||
) {
|
||||
selectedTag = Math.min(resultCountBeforeNormalTags, resultCount - 1);
|
||||
} else {
|
||||
selectedTag = resultCount - 1;
|
||||
}
|
||||
break;
|
||||
case keys["ChooseSelected"]:
|
||||
if (selectedTag !== null) {
|
||||
@@ -1188,8 +1249,8 @@ function navigateInList(textArea, event) {
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
async function refreshTacTempFiles() {
|
||||
setTimeout(async () => {
|
||||
async function refreshTacTempFiles(api = false) {
|
||||
const reload = async () => {
|
||||
wildcardFiles = [];
|
||||
wildcardExtFiles = [];
|
||||
umiWildcards = [];
|
||||
@@ -1201,7 +1262,23 @@ async function refreshTacTempFiles() {
|
||||
await processQueue(QUEUE_FILE_LOAD, null);
|
||||
|
||||
console.log("TAC: Refreshed temp files");
|
||||
}, 2000);
|
||||
}
|
||||
|
||||
if (api) {
|
||||
await postAPI("tacapi/v1/refresh-temp-files", null);
|
||||
await reload();
|
||||
} else {
|
||||
setTimeout(async () => {
|
||||
await reload();
|
||||
}, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
async function refreshEmbeddings() {
|
||||
await postAPI("tacapi/v1/refresh-embeddings", null);
|
||||
embeddings = [];
|
||||
await processQueue(QUEUE_FILE_LOAD, null);
|
||||
console.log("TAC: Refreshed embeddings");
|
||||
}
|
||||
|
||||
function addAutocompleteToArea(area) {
|
||||
@@ -1224,8 +1301,13 @@ function addAutocompleteToArea(area) {
|
||||
|
||||
// Add autocomplete event listener
|
||||
area.addEventListener('input', (e) => {
|
||||
debounce(autocomplete(area, area.value), TAC_CFG.delayTime);
|
||||
updateRuby(area, area.value);
|
||||
|
||||
// Cancel autocomplete itself if the event has no inputType (e.g. because it was triggered by the updateInput() function)
|
||||
if (!e.inputType && !tacSelfTrigger) return;
|
||||
tacSelfTrigger = false;
|
||||
|
||||
debounce(autocomplete(area, area.value), TAC_CFG.delayTime);
|
||||
checkKeywordInsertionUndo(area, e);
|
||||
});
|
||||
// Add focusout event listener
|
||||
@@ -1286,6 +1368,13 @@ async function setup() {
|
||||
// Listener for internal temp files refresh button
|
||||
gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles);
|
||||
|
||||
// Also add listener for external network refresh button (plus triggering python code)
|
||||
["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => {
|
||||
gradioApp().querySelector(e)?.addEventListener("click", ()=>{
|
||||
refreshTacTempFiles(true);
|
||||
});
|
||||
})
|
||||
|
||||
// Add mutation observer for the model hash text to also allow hash-based blacklist again
|
||||
let modelHashText = gradioApp().querySelector("#sd_checkpoint_hash");
|
||||
updateModelName();
|
||||
@@ -1296,6 +1385,7 @@ async function setup() {
|
||||
if (mutation.type === "attributes" && mutation.attributeName === "title") {
|
||||
currentModelHash = mutation.target.title;
|
||||
updateModelName();
|
||||
refreshEmbeddings();
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -1320,7 +1410,7 @@ async function setup() {
|
||||
let mode = (document.querySelector(".dark") || gradioApp().querySelector(".dark")) ? 0 : 1;
|
||||
// Check if we are on webkit
|
||||
let browser = navigator.userAgent.toLowerCase().indexOf('firefox') > -1 ? "firefox" : "other";
|
||||
|
||||
|
||||
let css = autocompleteCSS;
|
||||
// Replace vars with actual values (can't use actual css vars because of the way we inject the css)
|
||||
Object.keys(styleColors).forEach((key) => {
|
||||
@@ -1329,7 +1419,7 @@ async function setup() {
|
||||
Object.keys(browserVars).forEach((key) => {
|
||||
css = css.replaceAll(`var(${key})`, browserVars[key][browser]);
|
||||
})
|
||||
|
||||
|
||||
if (acStyle.styleSheet) {
|
||||
acStyle.styleSheet.cssText = css;
|
||||
} else {
|
||||
|
||||
@@ -16,6 +16,8 @@ hash_dict = {}
|
||||
|
||||
|
||||
def load_hash_cache():
|
||||
if not known_hashes_file.exists():
|
||||
known_hashes_file.touch()
|
||||
with open(known_hashes_file, "r", encoding="utf-8") as file:
|
||||
reader = csv.reader(
|
||||
file.readlines(), delimiter=",", quotechar='"', skipinitialspace=True
|
||||
@@ -28,6 +30,8 @@ def load_hash_cache():
|
||||
def update_hash_cache():
|
||||
global file_needs_update
|
||||
if file_needs_update:
|
||||
if not known_hashes_file.exists():
|
||||
known_hashes_file.touch()
|
||||
with open(known_hashes_file, "w", encoding="utf-8", newline='') as file:
|
||||
writer = csv.writer(file)
|
||||
for name, (hash, mtime) in hash_dict.items():
|
||||
|
||||
@@ -6,31 +6,34 @@ try:
|
||||
from modules.paths import extensions_dir, script_path
|
||||
|
||||
# Webui root path
|
||||
FILE_DIR = Path(script_path)
|
||||
FILE_DIR = Path(script_path).absolute()
|
||||
|
||||
# The extension base path
|
||||
EXT_PATH = Path(extensions_dir)
|
||||
EXT_PATH = Path(extensions_dir).absolute()
|
||||
except ImportError:
|
||||
# Webui root path
|
||||
FILE_DIR = Path().absolute()
|
||||
# The extension base path
|
||||
EXT_PATH = FILE_DIR.joinpath("extensions")
|
||||
EXT_PATH = FILE_DIR.joinpath("extensions").absolute()
|
||||
|
||||
# Tags base path
|
||||
TAGS_PATH = Path(scripts.basedir()).joinpath("tags")
|
||||
TAGS_PATH = Path(scripts.basedir()).joinpath("tags").absolute()
|
||||
|
||||
# The path to the folder containing the wildcards and embeddings
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards")
|
||||
EMB_PATH = Path(shared.cmd_opts.embeddings_dir)
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir)
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards").absolute()
|
||||
EMB_PATH = Path(shared.cmd_opts.embeddings_dir).absolute()
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir).absolute()
|
||||
|
||||
try:
|
||||
LORA_PATH = Path(shared.cmd_opts.lora_dir)
|
||||
LORA_PATH = Path(shared.cmd_opts.lora_dir).absolute()
|
||||
except AttributeError:
|
||||
LORA_PATH = None
|
||||
|
||||
try:
|
||||
LYCO_PATH = Path(shared.cmd_opts.lyco_dir)
|
||||
try:
|
||||
LYCO_PATH = Path(shared.cmd_opts.lyco_dir_backcompat).absolute()
|
||||
except:
|
||||
LYCO_PATH = Path(shared.cmd_opts.lyco_dir).absolute() # attempt original non-backcompat path
|
||||
except AttributeError:
|
||||
LYCO_PATH = None
|
||||
|
||||
@@ -49,7 +52,7 @@ def find_ext_wildcard_paths():
|
||||
getattr(shared.cmd_opts, "wildcards_dir", None), # Cmd arg from the wildcard extension
|
||||
getattr(opts, "wildcard_dir", None), # Custom path from sd-dynamic-prompts
|
||||
]
|
||||
for path in [Path(p) for p in custom_paths if p is not None]:
|
||||
for path in [Path(p).absolute() for p in custom_paths if p is not None]:
|
||||
if path.exists():
|
||||
found.append(path)
|
||||
|
||||
@@ -61,8 +64,8 @@ WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
|
||||
# The path to the temporary files
|
||||
# In the webui root, on windows it exists by default, on linux it doesn't
|
||||
STATIC_TEMP_PATH = FILE_DIR.joinpath("tmp")
|
||||
TEMP_PATH = TAGS_PATH.joinpath("temp") # Extension specific temp files
|
||||
STATIC_TEMP_PATH = FILE_DIR.joinpath("tmp").absolute()
|
||||
TEMP_PATH = TAGS_PATH.joinpath("temp").absolute() # Extension specific temp files
|
||||
|
||||
# Make sure these folders exist
|
||||
if not TEMP_PATH.exists():
|
||||
|
||||
@@ -9,8 +9,8 @@ from pathlib import Path
|
||||
import gradio as gr
|
||||
import yaml
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import FileResponse, JSONResponse
|
||||
from modules import script_callbacks, sd_hijack, shared
|
||||
from fastapi.responses import Response, FileResponse, JSONResponse
|
||||
from modules import script_callbacks, sd_hijack, shared, hashes
|
||||
|
||||
from scripts.model_keyword_support import (get_lora_simple_hash,
|
||||
load_hash_cache, update_hash_cache,
|
||||
@@ -24,21 +24,60 @@ except Exception as e: # Not supported.
|
||||
load_textual_inversion_embeddings = lambda *args, **kwargs: None
|
||||
print("Tag Autocomplete: Cannot reload embeddings instantly:", e)
|
||||
|
||||
# Sorting functions for extra networks / embeddings stuff
|
||||
sort_criteria = {
|
||||
"Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(),
|
||||
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime,
|
||||
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime
|
||||
}
|
||||
|
||||
def sort_models(model_list, sort_method = None, name_has_subpath = False):
|
||||
"""Sorts models according to the setting.
|
||||
|
||||
Input: list of (full_path, display_name, {hash}) models.
|
||||
Returns models in the format of name, sort key, meta.
|
||||
Meta is optional and can be a hash, version string or other required info.
|
||||
"""
|
||||
if len(model_list) == 0:
|
||||
return model_list
|
||||
|
||||
if sort_method is None:
|
||||
sort_method = getattr(shared.opts, "tac_modelSortOrder", "Name")
|
||||
|
||||
# Get sorting method from dictionary
|
||||
sorter = sort_criteria.get(sort_method, sort_criteria["Name"])
|
||||
|
||||
# During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated.
|
||||
# The list itself doesn't need to get sorted at this point.
|
||||
if len(model_list[0]) > 2:
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list]
|
||||
else:
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}"' for path, name in model_list]
|
||||
return results
|
||||
|
||||
|
||||
def get_wildcards():
|
||||
"""Returns a list of all wildcards. Works on nested folders."""
|
||||
wildcard_files = list(WILDCARD_PATH.rglob("*.txt"))
|
||||
resolved = [w.relative_to(WILDCARD_PATH).as_posix(
|
||||
) for w in wildcard_files if w.name != "put wildcards here.txt"]
|
||||
return resolved
|
||||
resolved = [(w, w.relative_to(WILDCARD_PATH).as_posix())
|
||||
for w in wildcard_files
|
||||
if w.name != "put wildcards here.txt"
|
||||
and w.is_file()]
|
||||
return sort_models(resolved, name_has_subpath=True)
|
||||
|
||||
|
||||
def get_ext_wildcards():
|
||||
"""Returns a list of all extension wildcards. Works on nested folders."""
|
||||
wildcard_files = []
|
||||
|
||||
excluded_folder_names = [s.strip() for s in getattr(shared.opts, "tac_wildcardExclusionList", "").split(",")]
|
||||
for path in WILDCARD_EXT_PATHS:
|
||||
wildcard_files.append(path.as_posix())
|
||||
wildcard_files.extend(p.relative_to(path).as_posix() for p in path.rglob("*.txt") if p.name != "put wildcards here.txt")
|
||||
resolved = [(w, w.relative_to(path).as_posix())
|
||||
for w in path.rglob("*.txt")
|
||||
if w.name != "put wildcards here.txt"
|
||||
and not any(excluded in w.parts for excluded in excluded_folder_names)
|
||||
and w.is_file()]
|
||||
wildcard_files.extend(sort_models(resolved, name_has_subpath=True))
|
||||
wildcard_files.append("-----")
|
||||
|
||||
return wildcard_files
|
||||
@@ -52,11 +91,13 @@ def is_umi_format(data):
|
||||
break
|
||||
return not issue_found
|
||||
|
||||
def parse_umi_format(umi_tags, count, data):
|
||||
count = 0
|
||||
def parse_umi_format(umi_tags, data):
|
||||
global count
|
||||
for item in data:
|
||||
umi_tags[count] = ','.join(data[item]['Tags'])
|
||||
count += 1
|
||||
|
||||
|
||||
|
||||
def parse_dynamic_prompt_format(yaml_wildcards, data, path):
|
||||
# Recurse subkeys, delete those without string lists as values
|
||||
@@ -66,7 +107,7 @@ def parse_dynamic_prompt_format(yaml_wildcards, data, path):
|
||||
recurse_dict(value)
|
||||
elif not (isinstance(value, list) and all(isinstance(v, str) for v in value)):
|
||||
del d[key]
|
||||
|
||||
|
||||
recurse_dict(data)
|
||||
# Add to yaml_wildcards
|
||||
yaml_wildcards[path.name] = data
|
||||
@@ -76,13 +117,12 @@ def get_yaml_wildcards():
|
||||
"""Returns a list of all tags found in extension YAML files found under a Tags: key."""
|
||||
yaml_files = []
|
||||
for path in WILDCARD_EXT_PATHS:
|
||||
yaml_files.extend(p for p in path.rglob("*.yml"))
|
||||
yaml_files.extend(p for p in path.rglob("*.yaml"))
|
||||
yaml_files.extend(p for p in path.rglob("*.yml") if p.is_file())
|
||||
yaml_files.extend(p for p in path.rglob("*.yaml") if p.is_file())
|
||||
|
||||
yaml_wildcards = {}
|
||||
|
||||
umi_tags = {} # { tag: count }
|
||||
count = 0
|
||||
|
||||
for path in yaml_files:
|
||||
try:
|
||||
@@ -90,21 +130,21 @@ def get_yaml_wildcards():
|
||||
data = yaml.safe_load(file)
|
||||
if (data):
|
||||
if (is_umi_format(data)):
|
||||
parse_umi_format(umi_tags, count, data)
|
||||
parse_umi_format(umi_tags, data)
|
||||
else:
|
||||
parse_dynamic_prompt_format(yaml_wildcards, data, path)
|
||||
else:
|
||||
print('No data found in ' + path.name)
|
||||
except yaml.YAMLError:
|
||||
print('Issue in parsing YAML file ' + path.name)
|
||||
except (yaml.YAMLError, UnicodeDecodeError) as e:
|
||||
print(f'Issue in parsing YAML file {path.name}: {e}')
|
||||
continue
|
||||
|
||||
|
||||
# Sort by count
|
||||
umi_sorted = sorted(umi_tags.items(), key=lambda item: item[1], reverse=True)
|
||||
umi_output = []
|
||||
for tag, count in umi_sorted:
|
||||
umi_output.append(f"{tag},{count}")
|
||||
|
||||
|
||||
if (len(umi_output) > 0):
|
||||
write_to_temp_file('umi_tags.txt', umi_output)
|
||||
|
||||
@@ -118,48 +158,34 @@ def get_embeddings(sd_model):
|
||||
# Version constants
|
||||
V1_SHAPE = 768
|
||||
V2_SHAPE = 1024
|
||||
VXL_SHAPE = 2048
|
||||
emb_v1 = []
|
||||
emb_v2 = []
|
||||
emb_vXL = []
|
||||
results = []
|
||||
|
||||
try:
|
||||
# Get embedding dict from sd_hijack to separate v1/v2 embeddings
|
||||
emb_type_a = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
emb_type_b = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
# Get the shape of the first item in the dict
|
||||
emb_a_shape = -1
|
||||
emb_b_shape = -1
|
||||
if (len(emb_type_a) > 0):
|
||||
emb_a_shape = next(iter(emb_type_a.items()))[1].shape
|
||||
if (len(emb_type_b) > 0):
|
||||
emb_b_shape = next(iter(emb_type_b.items()))[1].shape
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
|
||||
# Add embeddings to the correct list
|
||||
if (emb_a_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_a.keys())
|
||||
elif (emb_a_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_a.keys())
|
||||
for key, emb in (loaded | skipped).items():
|
||||
if emb.filename is None or emb.shape is None:
|
||||
continue
|
||||
|
||||
if (emb_b_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_b.keys())
|
||||
elif (emb_b_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_b.keys())
|
||||
if emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(emb.filename), key, "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(emb.filename), key, "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(emb.filename), key, "vXL"))
|
||||
|
||||
# Get shape of current model
|
||||
#vec = sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
|
||||
#model_shape = vec.shape[1]
|
||||
# Show relevant entries at the top
|
||||
#if (model_shape == V1_SHAPE):
|
||||
# results = [e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2]
|
||||
#elif (model_shape == V2_SHAPE):
|
||||
# results = [e + ",v2" for e in emb_v2] + [e + ",v1" for e in emb_v1]
|
||||
#else:
|
||||
# raise AttributeError # Fallback to old method
|
||||
results = sorted([e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2], key=lambda x: x.lower())
|
||||
results = sort_models(emb_v1) + sort_models(emb_v2) + sort_models(emb_vXL)
|
||||
except AttributeError:
|
||||
print("tag_autocomplete_helper: Old webui version or unrecognized model shape, using fallback for embedding completion.")
|
||||
# Get a list of all embeddings in the folder
|
||||
all_embeds = [str(e.relative_to(EMB_PATH)) for e in EMB_PATH.rglob("*") if e.suffix in {".bin", ".pt", ".png",'.webp', '.jxl', '.avif'}]
|
||||
all_embeds = [str(e.relative_to(EMB_PATH)) for e in EMB_PATH.rglob("*") if e.suffix in {".bin", ".pt", ".png",'.webp', '.jxl', '.avif'} and e.is_file()]
|
||||
# Remove files with a size of 0
|
||||
all_embeds = [e for e in all_embeds if EMB_PATH.joinpath(e).stat().st_size > 0]
|
||||
# Remove file extensions
|
||||
@@ -173,53 +199,121 @@ def get_hypernetworks():
|
||||
|
||||
# Get a list of all hypernetworks in the folder
|
||||
hyp_paths = [Path(h) for h in glob.glob(HYP_PATH.joinpath("**/*").as_posix(), recursive=True)]
|
||||
all_hypernetworks = [str(h.name) for h in hyp_paths if h.suffix in {".pt"}]
|
||||
# Remove file extensions
|
||||
return sorted([h[:h.rfind('.')] for h in all_hypernetworks], key=lambda x: x.lower())
|
||||
all_hypernetworks = [(h, h.stem) for h in hyp_paths if h.suffix in {".pt"} and h.is_file()]
|
||||
return sort_models(all_hypernetworks)
|
||||
|
||||
model_keyword_installed = write_model_keyword_path()
|
||||
|
||||
|
||||
def _get_lora():
|
||||
"""
|
||||
Write a list of all lora.
|
||||
Fallback method for when the built-in Lora.networks module is not available.
|
||||
"""
|
||||
# Get a list of all lora in the folder
|
||||
lora_paths = [
|
||||
Path(l)
|
||||
for l in glob.glob(LORA_PATH.joinpath("**/*").as_posix(), recursive=True)
|
||||
]
|
||||
# Get hashes
|
||||
valid_loras = [
|
||||
lf
|
||||
for lf in lora_paths
|
||||
if lf.suffix in {".safetensors", ".ckpt", ".pt"} and lf.is_file()
|
||||
]
|
||||
|
||||
return valid_loras
|
||||
|
||||
|
||||
def _get_lyco():
|
||||
"""
|
||||
Write a list of all LyCORIS/LOHA from https://github.com/KohakuBlueleaf/a1111-sd-webui-lycoris
|
||||
Fallback method for when the built-in Lora.networks module is not available.
|
||||
"""
|
||||
# Get a list of all LyCORIS in the folder
|
||||
lyco_paths = [
|
||||
Path(ly)
|
||||
for ly in glob.glob(LYCO_PATH.joinpath("**/*").as_posix(), recursive=True)
|
||||
]
|
||||
|
||||
# Get hashes
|
||||
valid_lycos = [
|
||||
lyf
|
||||
for lyf in lyco_paths
|
||||
if lyf.suffix in {".safetensors", ".ckpt", ".pt"} and lyf.is_file()
|
||||
]
|
||||
return valid_lycos
|
||||
|
||||
|
||||
# Attempt to use the build-in Lora.networks Lora/LyCORIS models lists.
|
||||
try:
|
||||
import sys
|
||||
from modules import extensions
|
||||
sys.path.append(Path(extensions.extensions_builtin_dir).joinpath("Lora").as_posix())
|
||||
import lora # pyright: ignore [reportMissingImports]
|
||||
|
||||
def _get_lora():
|
||||
return [
|
||||
Path(model.filename).absolute()
|
||||
for model in lora.available_loras.values()
|
||||
if Path(model.filename).absolute().is_relative_to(LORA_PATH)
|
||||
]
|
||||
|
||||
def _get_lyco():
|
||||
return [
|
||||
Path(model.filename).absolute()
|
||||
for model in lora.available_loras.values()
|
||||
if Path(model.filename).absolute().is_relative_to(LYCO_PATH)
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
pass
|
||||
# no need to report
|
||||
# print(f'Exception setting-up performant fetchers: {e}')
|
||||
|
||||
|
||||
def get_lora():
|
||||
"""Write a list of all lora"""
|
||||
global model_keyword_installed
|
||||
|
||||
# Get a list of all lora in the folder
|
||||
lora_paths = [Path(l) for l in glob.glob(LORA_PATH.joinpath("**/*").as_posix(), recursive=True)]
|
||||
# Get hashes
|
||||
valid_loras = [lf for lf in lora_paths if lf.suffix in {".safetensors", ".ckpt", ".pt"}]
|
||||
hashes = {}
|
||||
valid_loras = _get_lora()
|
||||
loras_with_hash = []
|
||||
for l in valid_loras:
|
||||
if not l.exists() or not l.is_file():
|
||||
continue
|
||||
name = l.relative_to(LORA_PATH).as_posix()
|
||||
if model_keyword_installed:
|
||||
hashes[name] = get_lora_simple_hash(l)
|
||||
hash = get_lora_simple_hash(l)
|
||||
else:
|
||||
hashes[name] = ""
|
||||
hash = ""
|
||||
loras_with_hash.append((l, name, hash))
|
||||
# Sort
|
||||
sorted_loras = dict(sorted(hashes.items()))
|
||||
# Add hashes and return
|
||||
return [f"\"{name}\",{hash}" for name, hash in sorted_loras.items()]
|
||||
return sort_models(loras_with_hash)
|
||||
|
||||
|
||||
def get_lyco():
|
||||
"""Write a list of all LyCORIS/LOHA from https://github.com/KohakuBlueleaf/a1111-sd-webui-lycoris"""
|
||||
|
||||
# Get a list of all LyCORIS in the folder
|
||||
lyco_paths = [Path(ly) for ly in glob.glob(LYCO_PATH.joinpath("**/*").as_posix(), recursive=True)]
|
||||
|
||||
# Get hashes
|
||||
valid_lycos = [lyf for lyf in lyco_paths if lyf.suffix in {".safetensors", ".ckpt", ".pt"}]
|
||||
hashes = {}
|
||||
valid_lycos = _get_lyco()
|
||||
lycos_with_hash = []
|
||||
for ly in valid_lycos:
|
||||
if not ly.exists() or not ly.is_file():
|
||||
continue
|
||||
name = ly.relative_to(LYCO_PATH).as_posix()
|
||||
if model_keyword_installed:
|
||||
hashes[name] = get_lora_simple_hash(ly)
|
||||
hash = get_lora_simple_hash(ly)
|
||||
else:
|
||||
hashes[name] = ""
|
||||
|
||||
hash = ""
|
||||
lycos_with_hash.append((ly, name, hash))
|
||||
# Sort
|
||||
sorted_lycos = dict(sorted(hashes.items()))
|
||||
# Add hashes and return
|
||||
return [f"\"{name}\",{hash}" for name, hash in sorted_lycos.items()]
|
||||
return sort_models(lycos_with_hash)
|
||||
|
||||
def get_style_names():
|
||||
try:
|
||||
style_names: list[str] = shared.prompt_styles.styles.keys()
|
||||
style_names = sorted(style_names, key=len, reverse=True)
|
||||
return style_names
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def write_tag_base_path():
|
||||
"""Writes the tag base path to a fixed location temporary file"""
|
||||
@@ -235,19 +329,19 @@ def write_to_temp_file(name, data):
|
||||
|
||||
csv_files = []
|
||||
csv_files_withnone = []
|
||||
def update_tag_files():
|
||||
def update_tag_files(*args, **kwargs):
|
||||
"""Returns a list of all potential tag files"""
|
||||
global csv_files, csv_files_withnone
|
||||
files = [str(t.relative_to(TAGS_PATH)) for t in TAGS_PATH.glob("*.csv")]
|
||||
files = [str(t.relative_to(TAGS_PATH)) for t in TAGS_PATH.glob("*.csv") if t.is_file()]
|
||||
csv_files = files
|
||||
csv_files_withnone = ["None"] + files
|
||||
|
||||
json_files = []
|
||||
json_files_withnone = []
|
||||
def update_json_files():
|
||||
def update_json_files(*args, **kwargs):
|
||||
"""Returns a list of all potential json files"""
|
||||
global json_files, json_files_withnone
|
||||
files = [str(j.relative_to(TAGS_PATH)) for j in TAGS_PATH.glob("*.json")]
|
||||
files = [str(j.relative_to(TAGS_PATH)) for j in TAGS_PATH.glob("*.json") if j.is_file()]
|
||||
json_files = files
|
||||
json_files_withnone = ["None"] + files
|
||||
|
||||
@@ -274,6 +368,7 @@ write_to_temp_file('umi_tags.txt', [])
|
||||
write_to_temp_file('hyp.txt', [])
|
||||
write_to_temp_file('lora.txt', [])
|
||||
write_to_temp_file('lyco.txt', [])
|
||||
write_to_temp_file('styles.txt', [])
|
||||
# Only reload embeddings if the file doesn't exist, since they are already re-written on model load
|
||||
if not TEMP_PATH.joinpath("emb.txt").exists():
|
||||
write_to_temp_file('emb.txt', [])
|
||||
@@ -283,22 +378,41 @@ if EMB_PATH.exists():
|
||||
# Get embeddings after the model loaded callback
|
||||
script_callbacks.on_model_loaded(get_embeddings)
|
||||
|
||||
def refresh_temp_files():
|
||||
global WILDCARD_EXT_PATHS
|
||||
WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
load_textual_inversion_embeddings(force_reload = True) # Instant embedding reload.
|
||||
write_temp_files()
|
||||
get_embeddings(shared.sd_model)
|
||||
def refresh_embeddings(force: bool, *args, **kwargs):
|
||||
try:
|
||||
# Fix for SD.Next infinite refresh loop due to gradio not updating after model load on demand.
|
||||
# This will just skip embedding loading if no model is loaded yet (or there really are no embeddings).
|
||||
# Try catch is just for safety incase sd_hijack access fails for some reason.
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
if len((loaded | skipped)) > 0:
|
||||
load_textual_inversion_embeddings(force_reload=force)
|
||||
get_embeddings(None)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def write_temp_files():
|
||||
def refresh_temp_files(*args, **kwargs):
|
||||
global WILDCARD_EXT_PATHS
|
||||
skip_wildcard_refresh = getattr(shared.opts, "tac_skipWildcardRefresh", False)
|
||||
if skip_wildcard_refresh:
|
||||
WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
write_temp_files(skip_wildcard_refresh)
|
||||
refresh_embeddings(force=True)
|
||||
|
||||
def write_style_names(*args, **kwargs):
|
||||
styles = get_style_names()
|
||||
if styles:
|
||||
write_to_temp_file('styles.txt', styles)
|
||||
|
||||
def write_temp_files(skip_wildcard_refresh = False):
|
||||
# Write wildcards to wc.txt if found
|
||||
if WILDCARD_PATH.exists():
|
||||
if WILDCARD_PATH.exists() and not skip_wildcard_refresh:
|
||||
wildcards = [WILDCARD_PATH.relative_to(FILE_DIR).as_posix()] + get_wildcards()
|
||||
if wildcards:
|
||||
write_to_temp_file('wc.txt', wildcards)
|
||||
|
||||
# Write extension wildcards to wce.txt if found
|
||||
if WILDCARD_EXT_PATHS is not None:
|
||||
if WILDCARD_EXT_PATHS is not None and not skip_wildcard_refresh:
|
||||
wildcards_ext = get_ext_wildcards()
|
||||
if wildcards_ext:
|
||||
write_to_temp_file('wce.txt', wildcards_ext)
|
||||
@@ -318,7 +432,7 @@ def write_temp_files():
|
||||
lora = get_lora()
|
||||
if lora:
|
||||
write_to_temp_file('lora.txt', lora)
|
||||
|
||||
|
||||
lyco_exists = LYCO_PATH is not None and LYCO_PATH.exists()
|
||||
if lyco_exists and not (lora_exists and LYCO_PATH.samefile(LORA_PATH)):
|
||||
lyco = get_lyco()
|
||||
@@ -330,6 +444,8 @@ def write_temp_files():
|
||||
if model_keyword_installed:
|
||||
update_hash_cache()
|
||||
|
||||
if shared.prompt_styles is not None:
|
||||
write_style_names()
|
||||
|
||||
write_temp_files()
|
||||
|
||||
@@ -368,12 +484,18 @@ def on_ui_settings():
|
||||
"tac_delayTime": shared.OptionInfo(100, "Time in ms to wait before triggering completion again").needs_restart(),
|
||||
"tac_useWildcards": shared.OptionInfo(True, "Search for wildcards"),
|
||||
"tac_sortWildcardResults": shared.OptionInfo(True, "Sort wildcard file contents alphabetically").info("If your wildcard files have a specific custom order, disable this to keep it"),
|
||||
"tac_wildcardExclusionList": shared.OptionInfo("", "Wildcard folder exclusion list").info("Add folder names that shouldn't be searched for wildcards, separated by comma.").needs_restart(),
|
||||
"tac_skipWildcardRefresh": shared.OptionInfo(False, "Don't re-scan for wildcard files when pressing the extra networks refresh button").info("Useful to prevent hanging if you use a very large wildcard collection."),
|
||||
"tac_useEmbeddings": shared.OptionInfo(True, "Search for embeddings"),
|
||||
"tac_includeEmbeddingsInNormalResults": shared.OptionInfo(False, "Include embeddings in normal tag results").info("The 'JumpTo...' keybinds (End & Home key by default) will select the first non-embedding result of their direction on the first press for quick navigation in longer lists."),
|
||||
"tac_useHypernetworks": shared.OptionInfo(True, "Search for hypernetworks"),
|
||||
"tac_useLoras": shared.OptionInfo(True, "Search for Loras"),
|
||||
"tac_useLycos": shared.OptionInfo(True, "Search for LyCORIS/LoHa"),
|
||||
"tac_useLoraPrefixForLycos": shared.OptionInfo(True, "Use the '<lora:' prefix instead of '<lyco:' for models in the LyCORIS folder").info("The lyco prefix is included for backwards compatibility and not used anymore by default. Disable this if you are on an old webui version without built-in lyco support."),
|
||||
"tac_showWikiLinks": shared.OptionInfo(False, "Show '?' next to tags, linking to its Danbooru or e621 wiki page").info("Warning: This is an external site and very likely contains NSFW examples!"),
|
||||
"tac_showExtraNetworkPreviews": shared.OptionInfo(True, "Show preview thumbnails for extra networks if available"),
|
||||
"tac_modelSortOrder": shared.OptionInfo("Name", "Model sort order", gr.Dropdown, lambda: {"choices": list(sort_criteria.keys())}).info("Order for extra network models and wildcards in dropdown"),
|
||||
"tac_useStyleVars": shared.OptionInfo(False, "Search for webui style names").info("Suggests style names from the webui dropdown with '$'. Currently requires a secondary extension like <a href=\"https://github.com/SirVeggie/extension-style-vars\" target=\"_blank\">style-vars</a> to actually apply the styles before generating."),
|
||||
# Insertion related settings
|
||||
"tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"),
|
||||
"tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"),
|
||||
@@ -381,6 +503,7 @@ def on_ui_settings():
|
||||
"tac_appendSpace": shared.OptionInfo(True, "Append space on tag autocompletion").info("will append after comma if the above is enabled"),
|
||||
"tac_alwaysSpaceAtEnd": shared.OptionInfo(True, "Always append space if inserting at the end of the textbox").info("takes precedence over the regular space setting for that position"),
|
||||
"tac_modelKeywordCompletion": shared.OptionInfo("Never", "Try to add known trigger words for LORA/LyCO models", gr.Dropdown, lambda: {"choices": ["Never","Only user list","Always"]}).info("Will use & prefer the native activation keywords settable in the extra networks UI. Other functionality requires the <a href=\"https://github.com/mix1009/model-keyword\" target=\"_blank\">model-keyword</a> extension to be installed, but will work with it disabled.").needs_restart(),
|
||||
"tac_modelKeywordLocation": shared.OptionInfo("Start of prompt", "Where to insert the trigger keyword", gr.Dropdown, lambda: {"choices": ["Start of prompt","End of prompt","Before LORA/LyCO"]}).info("Only relevant if the above option is enabled"),
|
||||
"tac_wildcardCompletionMode": shared.OptionInfo("To next folder level", "How to complete nested wildcard paths", gr.Dropdown, lambda: {"choices": ["To next folder level","To first difference","Always fully"]}).info("e.g. \"hair/colours/light/...\""),
|
||||
# Alias settings
|
||||
"tac_alias.searchByAlias": shared.OptionInfo(True, "Search by alias"),
|
||||
@@ -451,28 +574,43 @@ def on_ui_settings():
|
||||
shared.opts.add_option("tac_colormap", shared.OptionInfo(colorDefault, colorLabel, gr.Textbox, section=TAC_SECTION))
|
||||
|
||||
shared.opts.add_option("tac_refreshTempFiles", shared.OptionInfo("Refresh TAC temp files", "Refresh internal temp files", gr.HTML, {}, refresh=refresh_temp_files, section=TAC_SECTION))
|
||||
|
||||
|
||||
script_callbacks.on_ui_settings(on_ui_settings)
|
||||
|
||||
def get_style_mtime():
|
||||
try:
|
||||
style_file = getattr(shared, "styles_filename", "styles.csv")
|
||||
# Check in case a list is returned
|
||||
if isinstance(style_file, list):
|
||||
style_file = style_file[0]
|
||||
|
||||
style_file = Path(FILE_DIR).joinpath(style_file)
|
||||
if Path.exists(style_file):
|
||||
return style_file.stat().st_mtime
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
last_style_mtime = get_style_mtime()
|
||||
|
||||
def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
async def get_json_info(base_path: Path, filename: str = None):
|
||||
if base_path is None or (not base_path.exists()):
|
||||
return JSONResponse({}, status_code=404)
|
||||
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
json_candidates = glob.glob(base_path.as_posix() + f"/**/{filename}.json", recursive=True)
|
||||
if json_candidates is not None and len(json_candidates) > 0:
|
||||
if json_candidates is not None and len(json_candidates) > 0 and Path(json_candidates[0]).is_file():
|
||||
return FileResponse(json_candidates[0])
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": e}, status_code=500)
|
||||
|
||||
|
||||
async def get_preview_thumbnail(base_path: Path, filename: str = None, blob: bool = False):
|
||||
if base_path is None or (not base_path.exists()):
|
||||
return JSONResponse({}, status_code=404)
|
||||
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
img_glob = glob.glob(base_path.as_posix() + f"/**/{filename}.*", recursive=True)
|
||||
img_candidates = [img for img in img_glob if Path(img).suffix in [".png", ".jpg", ".jpeg", ".webp", ".gif"]]
|
||||
img_candidates = [img for img in img_glob if Path(img).suffix in [".png", ".jpg", ".jpeg", ".webp", ".gif"] and Path(img).is_file()]
|
||||
if img_candidates is not None and len(img_candidates) > 0:
|
||||
if blob:
|
||||
return FileResponse(img_candidates[0])
|
||||
@@ -481,14 +619,34 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": e}, status_code=500)
|
||||
|
||||
@app.post("/tacapi/v1/refresh-temp-files")
|
||||
async def api_refresh_temp_files():
|
||||
refresh_temp_files()
|
||||
|
||||
@app.post("/tacapi/v1/refresh-embeddings")
|
||||
async def api_refresh_embeddings():
|
||||
refresh_embeddings(force=False)
|
||||
|
||||
@app.get("/tacapi/v1/lora-info/{lora_name}")
|
||||
async def get_lora_info(lora_name):
|
||||
return await get_json_info(LORA_PATH, lora_name)
|
||||
|
||||
|
||||
@app.get("/tacapi/v1/lyco-info/{lyco_name}")
|
||||
async def get_lyco_info(lyco_name):
|
||||
return await get_json_info(LYCO_PATH, lyco_name)
|
||||
|
||||
|
||||
@app.get("/tacapi/v1/lora-cached-hash/{lora_name}")
|
||||
async def get_lora_cached_hash(lora_name: str):
|
||||
path_glob = glob.glob(LORA_PATH.as_posix() + f"/**/{lora_name}.*", recursive=True)
|
||||
paths = [lora for lora in path_glob if Path(lora).suffix in [".safetensors", ".ckpt", ".pt"] and Path(lora).is_file()]
|
||||
if paths is not None and len(paths) > 0:
|
||||
path = paths[0]
|
||||
hash = hashes.sha256_from_cache(path, f"lora/{lora_name}", path.endswith(".safetensors"))
|
||||
if hash is not None:
|
||||
return hash
|
||||
|
||||
return None
|
||||
|
||||
def get_path_for_type(type):
|
||||
if type == "lora":
|
||||
return LORA_PATH
|
||||
@@ -504,28 +662,42 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
@app.get("/tacapi/v1/thumb-preview/{filename}")
|
||||
async def get_thumb_preview(filename, type):
|
||||
return await get_preview_thumbnail(get_path_for_type(type), filename, False)
|
||||
|
||||
|
||||
@app.get("/tacapi/v1/thumb-preview-blob/{filename}")
|
||||
async def get_thumb_preview_blob(filename, type):
|
||||
return await get_preview_thumbnail(get_path_for_type(type), filename, True)
|
||||
|
||||
|
||||
@app.get("/tacapi/v1/wildcard-contents")
|
||||
async def get_wildcard_contents(basepath: str, filename: str):
|
||||
if basepath is None or basepath == "":
|
||||
return JSONResponse({}, status_code=404)
|
||||
return Response(status_code=404)
|
||||
|
||||
base = Path(basepath)
|
||||
if base is None or (not base.exists()):
|
||||
return JSONResponse({}, status_code=404)
|
||||
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
wildcard_path = base.joinpath(filename)
|
||||
if wildcard_path.exists():
|
||||
if wildcard_path.exists() and wildcard_path.is_file():
|
||||
return FileResponse(wildcard_path)
|
||||
else:
|
||||
return JSONResponse({}, status_code=404)
|
||||
return Response(status_code=404)
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": e}, status_code=500)
|
||||
|
||||
@app.get("/tacapi/v1/refresh-styles-if-changed")
|
||||
async def refresh_styles_if_changed():
|
||||
global last_style_mtime
|
||||
|
||||
mtime = get_style_mtime()
|
||||
if mtime is not None and mtime > last_style_mtime:
|
||||
last_style_mtime = mtime
|
||||
# Update temp file
|
||||
if shared.prompt_styles is not None:
|
||||
write_style_names()
|
||||
|
||||
return Response(status_code=200) # Success
|
||||
else:
|
||||
return Response(status_code=304) # Not modified
|
||||
|
||||
script_callbacks.on_app_started(api_tac)
|
||||
|
||||
22419
tags/e621_sfw.csv
Normal file
22419
tags/e621_sfw.csv
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user