mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-26 19:19:57 +00:00
Compare commits
114 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
119a3ad51f | ||
|
|
c820a22149 | ||
|
|
eb1e1820f9 | ||
|
|
ef59cff651 | ||
|
|
a454383c43 | ||
|
|
bec567fe26 | ||
|
|
d4041096c9 | ||
|
|
0903259ddf | ||
|
|
f3e64b1fa5 | ||
|
|
312cec5d71 | ||
|
|
b71e6339bd | ||
|
|
7ddbc3c0b2 | ||
|
|
4c2ef8f770 | ||
|
|
97c5e4f53c | ||
|
|
1d8d9f64b5 | ||
|
|
7437850600 | ||
|
|
829a4a7b89 | ||
|
|
22472ac8ad | ||
|
|
5f77fa26d3 | ||
|
|
f810b2dd8f | ||
|
|
08d3436f3b | ||
|
|
afa13306ef | ||
|
|
95200e82e1 | ||
|
|
a63ce64f4e | ||
|
|
a966be7546 | ||
|
|
d37e37acfa | ||
|
|
342fbc9041 | ||
|
|
d496569c9a | ||
|
|
7778142520 | ||
|
|
cde90c13c4 | ||
|
|
231b121fe0 | ||
|
|
c659ed2155 | ||
|
|
0a4c17cada | ||
|
|
6e65811d4a | ||
|
|
03673c060e | ||
|
|
1c11c4ad5a | ||
|
|
30c9593d3d | ||
|
|
f840586b6b | ||
|
|
886704e351 | ||
|
|
41626d22c3 | ||
|
|
57076060df | ||
|
|
5ef346cde3 | ||
|
|
edf76d9df2 | ||
|
|
837dc39811 | ||
|
|
f1870b7e87 | ||
|
|
20b6635a2a | ||
|
|
1fe8f26670 | ||
|
|
e82e958c3e | ||
|
|
2dd48eab79 | ||
|
|
4df90f5c95 | ||
|
|
a156214a48 | ||
|
|
15478e73b5 | ||
|
|
fcacf7dd66 | ||
|
|
82f819f336 | ||
|
|
effda54526 | ||
|
|
434301738a | ||
|
|
58804796f0 | ||
|
|
668ca800b8 | ||
|
|
a7233a594f | ||
|
|
4fba7baa69 | ||
|
|
5ebe22ddfc | ||
|
|
44c5450b28 | ||
|
|
5fd48f53de | ||
|
|
7128efc4f4 | ||
|
|
bd0ddfbb24 | ||
|
|
3108daf0e8 | ||
|
|
446ac14e7f | ||
|
|
363895494b | ||
|
|
04551a8132 | ||
|
|
ffc0e378d3 | ||
|
|
440f109f1f | ||
|
|
80fb247dbe | ||
|
|
b3e71e840d | ||
|
|
998514bebb | ||
|
|
d7e98200a8 | ||
|
|
ac790c8ede | ||
|
|
22365ec8d6 | ||
|
|
030a83aa4d | ||
|
|
460d32a4ed | ||
|
|
581bf1e6a4 | ||
|
|
74ea5493e5 | ||
|
|
94ec8884c3 | ||
|
|
6cf9acd6ab | ||
|
|
109a8a155e | ||
|
|
3caa1b51ed | ||
|
|
b44c36425a | ||
|
|
1e81403180 | ||
|
|
0f487a5c5c | ||
|
|
2baa12fea3 | ||
|
|
1a9157fe6e | ||
|
|
67eeb5fbf6 | ||
|
|
5911248ab9 | ||
|
|
1c693c0263 | ||
|
|
11ffed8afc | ||
|
|
cb54b66eda | ||
|
|
92a937ad01 | ||
|
|
ba9dce8d90 | ||
|
|
2622e1b596 | ||
|
|
b03b1a0211 | ||
|
|
3e33169a3a | ||
|
|
d8d991531a | ||
|
|
f626b9453d | ||
|
|
5067afeee9 | ||
|
|
018c6c8198 | ||
|
|
2846d79b7d | ||
|
|
783a847978 | ||
|
|
44effca702 | ||
|
|
475ef59197 | ||
|
|
3953260485 | ||
|
|
0a8e7d7d84 | ||
|
|
46d07d703a | ||
|
|
bd1dbe92c2 | ||
|
|
66fa745d6f | ||
|
|
37b5dca66e |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
tags/temp/
|
||||
__pycache__/
|
||||
tags/tag_frequency.db
|
||||
|
||||
@@ -20,6 +20,10 @@ Booru style tag autocompletion for the AUTOMATIC1111 Stable Diffusion WebUI
|
||||
</div>
|
||||
<br/>
|
||||
|
||||
#### ⚠️ Notice:
|
||||
I am currently looking for feedback on a new feature I'm working on and want to release soon.<br/>
|
||||
Please check [the announcement post](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/270) for more info if you are interested to help.
|
||||
|
||||
# 📄 Description
|
||||
|
||||
Tag Autocomplete is an extension for the popular [AUTOMATIC1111 web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) for Stable Diffusion.
|
||||
@@ -486,6 +490,7 @@ Example with Chinese translation:
|
||||
## List of translations
|
||||
- [🇨🇳 Chinese tags](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/23) by @HalfMAI, using machine translation and manual correction for the most common tags (uses legacy format)
|
||||
- [🇨🇳 Chinese tags](https://github.com/sgmklp/tag-for-autocompletion-with-translation) by @sgmklp, smaller set of manual translations based on https://github.com/zcyzcy88/TagTable
|
||||
- [🇯🇵 Japanese tags](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/265) by @applemango, both machine and human translations available
|
||||
|
||||
> ### 🫵 I need your help!
|
||||
> Translations are a community effort. If you have translated a tag file or want to create one, please open a Pull Request or Issue so your link can be added here.
|
||||
|
||||
@@ -410,8 +410,9 @@ https://www.w3.org/TR/uievents-key/#named-key-attribute-value
|
||||

|
||||
|
||||
## 翻訳リスト
|
||||
- [🇨🇳 Chinese tags](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/23) by @HalfMAI, 最も一般的なタグを機械翻訳と手作業で修正(レガシーフォーマットを使用)
|
||||
- [🇨🇳 Chinese tags](https://github.com/sgmklp/tag-for-autocompletion-with-translation) by @sgmklp, [こちら](https://github.com/zcyzcy88/TagTable)をベースにして、より小さくした手動での翻訳セット。
|
||||
- [🇨🇳 中国語訳](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/23) by @HalfMAI, 最も一般的なタグを機械翻訳と手作業で修正(レガシーフォーマットを使用)
|
||||
- [🇨🇳 中国語訳](https://github.com/sgmklp/tag-for-autocompletion-with-translation) by @sgmklp, [こちら](https://github.com/zcyzcy88/TagTable)をベースにして、より小さくした手動での翻訳セット。
|
||||
- [🇯🇵 日本語訳](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/265) by @applemango, 機械翻訳と人力翻訳の両方が利用可能。
|
||||
|
||||
> ### 🫵 あなたの助けが必要です!
|
||||
> 翻訳はコミュニティの努力により支えられています。もしあなたがタグファイルを翻訳したことがある場合、または作成したい場合は、あなたの成果をここに追加できるように、Pull RequestまたはIssueを開いてください。
|
||||
|
||||
@@ -13,6 +13,12 @@
|
||||
你可以按照[以下方法](#installation)下载或拷贝文件,也可以使用[Releases](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/releases)中打包好的文件。
|
||||
|
||||
## 常见问题 & 已知缺陷:
|
||||
- 很多中国用户都报告过此扩展名和其他扩展名的 JavaScript 文件被阻止的问题。
|
||||
常见的罪魁祸首是 IDM / Internet Download Manager 浏览器插件,它似乎出于安全目的阻止了本地文件请求。
|
||||
如果您安装了 IDM,请确保在使用 webui 时禁用以下插件:
|
||||
|
||||

|
||||
|
||||
- 当`replaceUnderscores`选项开启时, 脚本只会替换Tag的一部分如果Tag包含多个单词,比如将`atago (azur lane)`修改`atago`为`taihou`并使用自动补全时.会得到 `taihou (azur lane), lane)`的结果, 因为脚本没有把后面的部分认为成同一个Tag。
|
||||
|
||||
## 演示与截图
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
var TAC_CFG = null;
|
||||
var tagBasePath = "";
|
||||
var modelKeywordPath = "";
|
||||
var tacSelfTrigger = false;
|
||||
|
||||
// Tag completion data loaded from files
|
||||
var allTags = [];
|
||||
@@ -18,6 +19,7 @@ var loras = [];
|
||||
var lycos = [];
|
||||
var modelKeywordDict = new Map();
|
||||
var chants = [];
|
||||
var styleNames = [];
|
||||
|
||||
// Selected model info for black/whitelisting
|
||||
var currentModelHash = "";
|
||||
|
||||
@@ -12,7 +12,8 @@ const ResultType = Object.freeze({
|
||||
"hypernetwork": 8,
|
||||
"lora": 9,
|
||||
"lyco": 10,
|
||||
"chant": 11
|
||||
"chant": 11,
|
||||
"styleName": 12
|
||||
});
|
||||
|
||||
// Class to hold result data and annotations to make it clearer to use
|
||||
@@ -23,10 +24,12 @@ class AutocompleteResult {
|
||||
|
||||
// Additional info, only used in some cases
|
||||
category = null;
|
||||
count = null;
|
||||
count = Number.MAX_SAFE_INTEGER;
|
||||
usageBias = null;
|
||||
aliases = null;
|
||||
meta = null;
|
||||
hash = null;
|
||||
sortKey = null;
|
||||
|
||||
// Constructor
|
||||
constructor(text, type) {
|
||||
|
||||
@@ -79,6 +79,13 @@ const thirdParty = {
|
||||
"[id^=script_img2img_adetailer_ad_prompt] textarea",
|
||||
"[id^=script_img2img_adetailer_ad_negative_prompt] textarea"
|
||||
]
|
||||
},
|
||||
"deepdanbooru-object-recognition": {
|
||||
"base": "#tab_deepdanboru_object_recg_tab",
|
||||
"hasIds": false,
|
||||
"selectors": [
|
||||
"Found tags",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,4 +194,4 @@ function getTextAreaIdentifier(textArea) {
|
||||
break;
|
||||
}
|
||||
return modifier;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
// Utility functions for tag autocomplete
|
||||
|
||||
// Parse the CSV file into a 2D array. Doesn't use regex, so it is very lightweight.
|
||||
// We are ignoring newlines in quote fields since we expect one-line entries and parsing would break for unclosed quotes otherwise
|
||||
function parseCSV(str) {
|
||||
var arr = [];
|
||||
var quote = false; // 'true' means we're inside a quoted field
|
||||
const arr = [];
|
||||
let quote = false; // 'true' means we're inside a quoted field
|
||||
|
||||
// Iterate over each character, keep track of current row and column (of the returned array)
|
||||
for (var row = 0, col = 0, c = 0; c < str.length; c++) {
|
||||
var cc = str[c], nc = str[c + 1]; // Current character, next character
|
||||
for (let row = 0, col = 0, c = 0; c < str.length; c++) {
|
||||
let cc = str[c], nc = str[c+1]; // Current character, next character
|
||||
arr[row] = arr[row] || []; // Create a new row if necessary
|
||||
arr[row][col] = arr[row][col] || ''; // Create a new column (start with empty string) if necessary
|
||||
|
||||
@@ -22,14 +23,12 @@ function parseCSV(str) {
|
||||
// If it's a comma and we're not in a quoted field, move on to the next column
|
||||
if (cc == ',' && !quote) { ++col; continue; }
|
||||
|
||||
// If it's a newline (CRLF) and we're not in a quoted field, skip the next character
|
||||
// and move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\r' && nc == '\n' && !quote) { ++row; col = 0; ++c; continue; }
|
||||
// If it's a newline (CRLF), skip the next character and move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\r' && nc == '\n') { ++row; col = 0; ++c; quote = false; continue; }
|
||||
|
||||
// If it's a newline (LF or CR) and we're not in a quoted field,
|
||||
// move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\n' && !quote) { ++row; col = 0; continue; }
|
||||
if (cc == '\r' && !quote) { ++row; col = 0; continue; }
|
||||
// If it's a newline (LF or CR) move on to the next row and move to column 0 of that new row
|
||||
if (cc == '\n') { ++row; col = 0; quote = false; continue; }
|
||||
if (cc == '\r') { ++row; col = 0; quote = false; continue; }
|
||||
|
||||
// Otherwise, append the current character to the current column
|
||||
arr[row][col] += cc;
|
||||
@@ -81,6 +80,32 @@ async function fetchAPI(url, json = true, cache = false) {
|
||||
return await response.text();
|
||||
}
|
||||
|
||||
async function postAPI(url, body = null) {
|
||||
let response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: {'Content-Type': 'application/json'},
|
||||
body: body
|
||||
});
|
||||
|
||||
if (response.status != 200) {
|
||||
console.error(`Error posting to API endpoint "${url}": ` + response.status, response.statusText);
|
||||
return null;
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
async function putAPI(url, body = null) {
|
||||
let response = await fetch(url, { method: "PUT", body: body });
|
||||
|
||||
if (response.status != 200) {
|
||||
console.error(`Error putting to API endpoint "${url}": ` + response.status, response.statusText);
|
||||
return null;
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
// Extra network preview thumbnails
|
||||
async function getExtraNetworkPreviewURL(filename, type) {
|
||||
const previewJSON = await fetchAPI(`tacapi/v1/thumb-preview/${filename}?type=${type}`, true, true);
|
||||
@@ -98,6 +123,29 @@ async function getExtraNetworkPreviewURL(filename, type) {
|
||||
}
|
||||
}
|
||||
|
||||
lastStyleRefresh = 0;
|
||||
// Refresh style file if needed
|
||||
async function refreshStyleNamesIfChanged() {
|
||||
// Only refresh once per second
|
||||
currentTimestamp = new Date().getTime();
|
||||
if (currentTimestamp - lastStyleRefresh < 1000) return;
|
||||
lastStyleRefresh = currentTimestamp;
|
||||
|
||||
const response = await fetch(`tacapi/v1/refresh-styles-if-changed?${new Date().getTime()}`)
|
||||
if (response.status === 304) {
|
||||
// Not modified
|
||||
} else if (response.status === 200) {
|
||||
// Reload
|
||||
QUEUE_FILE_LOAD.forEach(async fn => {
|
||||
if (fn.toString().includes("styleNames"))
|
||||
await fn.call(null, true);
|
||||
})
|
||||
} else {
|
||||
// Error
|
||||
console.error(`Error refreshing styles.txt: ` + response.status, response.statusText);
|
||||
}
|
||||
}
|
||||
|
||||
// Debounce function to prevent spamming the autocomplete function
|
||||
var dbTimeOut;
|
||||
const debounce = (func, wait = 300) => {
|
||||
@@ -147,6 +195,104 @@ function flatten(obj, roots = [], sep = ".") {
|
||||
);
|
||||
}
|
||||
|
||||
// Calculate biased tag score based on post count and frequent usage
|
||||
function calculateUsageBias(result, count, uses) {
|
||||
// Check setting conditions
|
||||
if (uses < TAC_CFG.frequencyMinCount) {
|
||||
uses = 0;
|
||||
} else if (uses != 0) {
|
||||
result.usageBias = true;
|
||||
}
|
||||
|
||||
switch (TAC_CFG.frequencyFunction) {
|
||||
case "Logarithmic (weak)":
|
||||
return Math.log(1 + count) + Math.log(1 + uses);
|
||||
case "Logarithmic (strong)":
|
||||
return Math.log(1 + count) + 2 * Math.log(1 + uses);
|
||||
case "Usage first":
|
||||
return uses;
|
||||
default:
|
||||
return count;
|
||||
}
|
||||
}
|
||||
// Beautify return type for easier parsing
|
||||
function mapUseCountArray(useCounts, posAndNeg = false) {
|
||||
return useCounts.map(useCount => {
|
||||
if (posAndNeg) {
|
||||
return {
|
||||
"name": useCount[0],
|
||||
"type": useCount[1],
|
||||
"count": useCount[2],
|
||||
"negCount": useCount[3],
|
||||
"lastUseDate": useCount[4]
|
||||
}
|
||||
}
|
||||
return {
|
||||
"name": useCount[0],
|
||||
"type": useCount[1],
|
||||
"count": useCount[2],
|
||||
"lastUseDate": useCount[3]
|
||||
}
|
||||
});
|
||||
}
|
||||
// Call API endpoint to increase bias of tag in the database
|
||||
function increaseUseCount(tagName, type, negative = false) {
|
||||
postAPI(`tacapi/v1/increase-use-count?tagname=${tagName}&ttype=${type}&neg=${negative}`);
|
||||
}
|
||||
// Get use count of tag from the database
|
||||
async function getUseCount(tagName, type, negative = false) {
|
||||
return (await fetchAPI(`tacapi/v1/get-use-count?tagname=${tagName}&ttype=${type}&neg=${negative}`, true, false))["result"];
|
||||
}
|
||||
async function getUseCounts(tagNames, types, negative = false) {
|
||||
// While semantically weird, we have to use POST here for the body, as urls are limited in length
|
||||
const body = JSON.stringify({"tagNames": tagNames, "tagTypes": types, "neg": negative});
|
||||
const rawArray = (await postAPI(`tacapi/v1/get-use-count-list`, body))["result"]
|
||||
return mapUseCountArray(rawArray);
|
||||
}
|
||||
async function getAllUseCounts() {
|
||||
const rawArray = (await fetchAPI(`tacapi/v1/get-all-use-counts`))["result"];
|
||||
return mapUseCountArray(rawArray, true);
|
||||
}
|
||||
async function resetUseCount(tagName, type, resetPosCount, resetNegCount) {
|
||||
await putAPI(`tacapi/v1/reset-use-count?tagname=${tagName}&ttype=${type}&pos=${resetPosCount}&neg=${resetNegCount}`);
|
||||
}
|
||||
|
||||
function createTagUsageTable(tagCounts) {
|
||||
// Create table
|
||||
let tagTable = document.createElement("table");
|
||||
tagTable.innerHTML =
|
||||
`<thead>
|
||||
<tr>
|
||||
<td>Name</td>
|
||||
<td>Type</td>
|
||||
<td>Count(+)</td>
|
||||
<td>Count(-)</td>
|
||||
<td>Last used</td>
|
||||
</tr>
|
||||
</thead>`;
|
||||
tagTable.id = "tac_tagUsageTable"
|
||||
|
||||
tagCounts.forEach(t => {
|
||||
let tr = document.createElement("tr");
|
||||
|
||||
// Fill values
|
||||
let values = [t.name, t.type-1, t.count, t.negCount, t.lastUseDate]
|
||||
values.forEach(v => {
|
||||
let td = document.createElement("td");
|
||||
td.innerText = v;
|
||||
tr.append(td);
|
||||
});
|
||||
// Add delete/reset button
|
||||
let delButton = document.createElement("button");
|
||||
delButton.innerText = "🗑️";
|
||||
delButton.title = "Reset count";
|
||||
tr.append(delButton);
|
||||
|
||||
tagTable.append(tr)
|
||||
});
|
||||
|
||||
return tagTable;
|
||||
}
|
||||
|
||||
// Sliding window function to get possible combination groups of an array
|
||||
function toNgrams(inputArray, size) {
|
||||
@@ -156,7 +302,11 @@ function toNgrams(inputArray, size) {
|
||||
);
|
||||
}
|
||||
|
||||
function escapeRegExp(string) {
|
||||
function escapeRegExp(string, wildcardMatching = false) {
|
||||
if (wildcardMatching) {
|
||||
// Escape all characters except asterisks and ?, which should be treated separately as placeholders.
|
||||
return string.replace(/[-[\]{}()+.,\\^$|#\s]/g, '\\$&').replace(/\*/g, '.*').replace(/\?/g, '.');
|
||||
}
|
||||
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
|
||||
}
|
||||
function escapeHTML(unsafeText) {
|
||||
@@ -200,6 +350,49 @@ function observeElement(element, property, callback, delay = 0) {
|
||||
}
|
||||
}
|
||||
|
||||
// Sort functions
|
||||
function getSortFunction() {
|
||||
let criterion = TAC_CFG.modelSortOrder || "Name";
|
||||
|
||||
const textSort = (a, b, reverse = false) => {
|
||||
// Assign keys so next sort is faster
|
||||
if (!a.sortKey) {
|
||||
a.sortKey = a.type === ResultType.chant
|
||||
? a.aliases
|
||||
: a.text;
|
||||
}
|
||||
if (!b.sortKey) {
|
||||
b.sortKey = b.type === ResultType.chant
|
||||
? b.aliases
|
||||
: b.text;
|
||||
}
|
||||
|
||||
return reverse ? b.sortKey.localeCompare(a.sortKey) : a.sortKey.localeCompare(b.sortKey);
|
||||
}
|
||||
const numericSort = (a, b, reverse = false) => {
|
||||
const noKey = reverse ? "-1" : Number.MAX_SAFE_INTEGER;
|
||||
let aParsed = parseFloat(a.sortKey || noKey);
|
||||
let bParsed = parseFloat(b.sortKey || noKey);
|
||||
|
||||
if (aParsed === bParsed) {
|
||||
return textSort(a, b, false);
|
||||
}
|
||||
|
||||
return reverse ? bParsed - aParsed : aParsed - bParsed;
|
||||
}
|
||||
|
||||
return (a, b) => {
|
||||
switch (criterion) {
|
||||
case "Date Modified (newest first)":
|
||||
return numericSort(a, b, true);
|
||||
case "Date Modified (oldest first)":
|
||||
return numericSort(a, b, false);
|
||||
default:
|
||||
return textSort(a, b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Queue calling function to process global queues
|
||||
async function processQueue(queue, context, ...args) {
|
||||
for (let i = 0; i < queue.length; i++) {
|
||||
|
||||
@@ -7,7 +7,10 @@ class ChantParser extends BaseTagParser {
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<c:") {
|
||||
let searchTerm = tagword.replace("<chant:", "").replace("<c:", "").replace("<", "");
|
||||
let filterCondition = x => x.terms.toLowerCase().includes(searchTerm) || x.name.toLowerCase().includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.terms.toLowerCase()) || regex.test(x.name.toLowerCase());
|
||||
};
|
||||
tempResults = chants.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = chants;
|
||||
@@ -41,7 +44,7 @@ async function load() {
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.chant) {
|
||||
return text.replace(/^.*?: /g, "");
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@@ -51,4 +54,4 @@ PARSERS.push(new ChantParser(CHANT_TRIGGER));
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_AFTER_CONFIG_CHANGE.push(load);
|
||||
QUEUE_AFTER_CONFIG_CHANGE.push(load);
|
||||
|
||||
@@ -11,12 +11,18 @@ class EmbeddingParser extends BaseTagParser {
|
||||
if (searchTerm.startsWith("v1") || searchTerm.startsWith("v2")) {
|
||||
versionString = searchTerm.slice(0, 2);
|
||||
searchTerm = searchTerm.slice(2);
|
||||
} else if (searchTerm.startsWith("vxl")) {
|
||||
versionString = searchTerm.slice(0, 3);
|
||||
searchTerm = searchTerm.slice(3);
|
||||
}
|
||||
|
||||
let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x[0].toLowerCase()) || regex.test(x[0].toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
|
||||
if (versionString)
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[1] && x[1] === versionString); // Filter by tagword
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[2] && x[2].toLowerCase() === versionString.toLowerCase()); // Filter by tagword
|
||||
else
|
||||
tempResults = embeddings.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
@@ -26,8 +32,13 @@ class EmbeddingParser extends BaseTagParser {
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.embedding)
|
||||
result.meta = t[1] + " Embedding";
|
||||
let lastDot = t[0].lastIndexOf(".") > -1 ? t[0].lastIndexOf(".") : t[0].length;
|
||||
let lastSlash = t[0].lastIndexOf("/") > -1 ? t[0].lastIndexOf("/") : -1;
|
||||
let name = t[0].trim().substring(lastSlash + 1, lastDot);
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.embedding)
|
||||
result.sortKey = t[1];
|
||||
result.meta = t[2] + " Embedding";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -38,9 +49,9 @@ class EmbeddingParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (embeddings.length === 0) {
|
||||
try {
|
||||
embeddings = (await readFile(`${tagBasePath}/temp/emb.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().split(",")); // Split into name, version type pairs
|
||||
embeddings = (await loadCSV(`${tagBasePath}/temp/emb.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0].trim(), x[1], x[2]]); // Return name, sortKey, hash tuples
|
||||
} catch (e) {
|
||||
console.error("Error loading embeddings.txt: " + e);
|
||||
}
|
||||
@@ -49,7 +60,7 @@ async function load() {
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.embedding) {
|
||||
return text.replace(/^.*?: /g, "");
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@@ -58,4 +69,4 @@ PARSERS.push(new EmbeddingParser(EMB_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -7,8 +7,11 @@ class HypernetParser extends BaseTagParser {
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<h:" && tagword !== "<hypernet:") {
|
||||
let searchTerm = tagword.replace("<hypernet:", "").replace("<h:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x)); // Filter by tagword
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = hypernetworks;
|
||||
}
|
||||
@@ -16,8 +19,9 @@ class HypernetParser extends BaseTagParser {
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.hypernetwork)
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.hypernetwork)
|
||||
result.meta = "Hypernetwork";
|
||||
result.sortKey = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -28,9 +32,9 @@ class HypernetParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (hypernetworks.length === 0) {
|
||||
try {
|
||||
hypernetworks = (await readFile(`${tagBasePath}/temp/hyp.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) //Remove empty lines
|
||||
.map(x => x.trim()); // Remove carriage returns and padding if it exists
|
||||
hypernetworks = (await loadCSV(`${tagBasePath}/temp/hyp.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Remove carriage returns and padding if it exists
|
||||
} catch (e) {
|
||||
console.error("Error loading hypernetworks.txt: " + e);
|
||||
}
|
||||
@@ -48,4 +52,4 @@ PARSERS.push(new HypernetParser(HYP_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -7,7 +7,10 @@ class LoraParser extends BaseTagParser {
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lora:") {
|
||||
let searchTerm = tagword.replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = loras.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = loras;
|
||||
@@ -23,7 +26,8 @@ class LoraParser extends BaseTagParser {
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lora)
|
||||
result.meta = "Lora";
|
||||
result.hash = t[1];
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -36,7 +40,7 @@ async function load() {
|
||||
try {
|
||||
loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lora.txt: " + e);
|
||||
}
|
||||
@@ -60,4 +64,4 @@ PARSERS.push(new LoraParser(LORA_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -5,9 +5,12 @@ class LycoParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show lyco
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lyco:") {
|
||||
let searchTerm = tagword.replace("<lyco:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lyco:" && tagword !== "<lora:") {
|
||||
let searchTerm = tagword.replace("<lyco:", "").replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = lycos.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = lycos;
|
||||
@@ -23,7 +26,8 @@ class LycoParser extends BaseTagParser {
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.lyco)
|
||||
result.meta = "Lyco";
|
||||
result.hash = t[1];
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
@@ -36,7 +40,7 @@ async function load() {
|
||||
try {
|
||||
lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lyco.txt: " + e);
|
||||
}
|
||||
@@ -51,7 +55,8 @@ async function sanitize(tagType, text) {
|
||||
multiplier = info["preferred weight"];
|
||||
}
|
||||
|
||||
return `<lyco:${text}:${multiplier}>`;
|
||||
let prefix = TAC_CFG.useLoraPrefixForLycos ? "lora" : "lyco";
|
||||
return `<${prefix}:${text}:${multiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@@ -60,4 +65,4 @@ PARSERS.push(new LycoParser(LYCO_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -20,7 +20,7 @@ async function load() {
|
||||
// Add to the dict
|
||||
csv_lines.forEach(parts => {
|
||||
const hash = parts[0];
|
||||
const keywords = parts[1].replaceAll("| ", ", ").replaceAll("|", ", ").trim();
|
||||
const keywords = parts[1]?.replaceAll("| ", ", ")?.replaceAll("|", ", ")?.trim();
|
||||
const lastSepIndex = parts[2]?.lastIndexOf("/") + 1 || parts[2]?.lastIndexOf("\\") + 1 || 0;
|
||||
const name = parts[2]?.substring(lastSepIndex).trim() || "none"
|
||||
|
||||
|
||||
70
javascript/ext_styles.js
Normal file
70
javascript/ext_styles.js
Normal file
@@ -0,0 +1,70 @@
|
||||
const STYLE_REGEX = /(\$(\d*)\(?)[^$|\[\],\s]*\)?/;
|
||||
const STYLE_TRIGGER = () => TAC_CFG.useStyleVars && tagword.match(STYLE_REGEX);
|
||||
|
||||
var lastStyleVarIndex = "";
|
||||
|
||||
class StyleParser extends BaseTagParser {
|
||||
async parse() {
|
||||
// Refresh if needed
|
||||
await refreshStyleNamesIfChanged();
|
||||
|
||||
// Show styles
|
||||
let tempResults = [];
|
||||
let matchGroups = tagword.match(STYLE_REGEX);
|
||||
|
||||
// Save index to insert again later or clear last one
|
||||
lastStyleVarIndex = matchGroups[2] ? matchGroups[2] : "";
|
||||
|
||||
if (tagword !== matchGroups[1]) {
|
||||
let searchTerm = tagword.replace(matchGroups[1], "");
|
||||
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x[0].toLowerCase()) || regex.test(x[0].toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = styleNames.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = styleNames;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.styleName)
|
||||
result.meta = "Style";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load(force = false) {
|
||||
if (styleNames.length === 0 || force) {
|
||||
try {
|
||||
styleNames = (await loadCSV(`${tagBasePath}/temp/styles.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.filter(x => x[0] !== "None") // Remove "None" style
|
||||
.map(x => [x[0].trim()]); // Trim name
|
||||
} catch (e) {
|
||||
console.error("Error loading styles.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.styleName) {
|
||||
if (text.includes(" ")) {
|
||||
return `$${lastStyleVarIndex}(${text})`;
|
||||
} else {
|
||||
return`$${lastStyleVarIndex}${text}`
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
PARSERS.push(new StyleParser(STYLE_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
@@ -129,7 +129,7 @@ class UmiParser extends BaseTagParser {
|
||||
return;
|
||||
}
|
||||
|
||||
let umiTagword = diff[0] || '';
|
||||
let umiTagword = tagCountChange < 0 ? '' : diff[0] || '';
|
||||
let tempResults = [];
|
||||
if (umiTagword && umiTagword.length > 0) {
|
||||
umiTagword = umiTagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
@@ -149,6 +149,7 @@ class UmiParser extends BaseTagParser {
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
} else if (showAll) {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
@@ -163,6 +164,8 @@ class UmiParser extends BaseTagParser {
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
} else {
|
||||
@@ -178,12 +181,14 @@ class UmiParser extends BaseTagParser {
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function updateUmiTags( tagType, sanitizedText, newPrompt, textArea) {
|
||||
function updateUmiTags(tagType, sanitizedText, newPrompt, textArea) {
|
||||
// If it was a umi wildcard, also update the umiPreviousTags
|
||||
if (tagType === ResultType.umiWildcard && originalTagword.length > 0) {
|
||||
let umiSubPrompts = [...newPrompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
@@ -85,13 +85,14 @@ class WildcardFileParser extends BaseTagParser {
|
||||
} else {
|
||||
result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile);
|
||||
result.meta = "Wildcard file";
|
||||
result.sortKey = wcFile[2].trim();
|
||||
}
|
||||
|
||||
finalResults.push(result);
|
||||
alreadyAdded.set(wcFile[1], true);
|
||||
});
|
||||
|
||||
finalResults.sort((a, b) => a.text.localeCompare(b.text));
|
||||
finalResults.sort(getSortFunction());
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
@@ -100,17 +101,19 @@ class WildcardFileParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (wildcardFiles.length === 0 && wildcardExtFiles.length === 0) {
|
||||
try {
|
||||
let wcFileArr = (await readFile(`${tagBasePath}/temp/wc.txt`)).split("\n");
|
||||
let wcBasePath = wcFileArr[0].trim(); // First line should be the base path
|
||||
wildcardFiles = wcFileArr.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => [wcBasePath, x.trim().replace(".txt", "")]); // Remove file extension & newlines
|
||||
let wcFileArr = await loadCSV(`${tagBasePath}/temp/wc.txt`);
|
||||
if (wcFileArr && wcFileArr.length > 0) {
|
||||
let wcBasePath = wcFileArr[0][0].trim(); // First line should be the base path
|
||||
wildcardFiles = wcFileArr.slice(1)
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [wcBasePath, x[0]?.trim().replace(".txt", ""), x[1]]); // Remove file extension & newlines
|
||||
}
|
||||
|
||||
// To support multiple sources, we need to separate them using the provided "-----" strings
|
||||
let wcExtFileArr = (await readFile(`${tagBasePath}/temp/wce.txt`)).split("\n");
|
||||
let wcExtFileArr = await loadCSV(`${tagBasePath}/temp/wce.txt`);
|
||||
let splitIndices = [];
|
||||
for (let index = 0; index < wcExtFileArr.length; index++) {
|
||||
if (wcExtFileArr[index].trim() === "-----") {
|
||||
if (wcExtFileArr[index][0].trim() === "-----") {
|
||||
splitIndices.push(index);
|
||||
}
|
||||
}
|
||||
@@ -121,13 +124,13 @@ async function load() {
|
||||
let end = splitIndices[i];
|
||||
|
||||
let wcExtFile = wcExtFileArr.slice(start, end);
|
||||
let base = wcExtFile[0].trim() + "/";
|
||||
wcExtFile = wcExtFile.slice(1)
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().replace(base, "").replace(".txt", "")); // Remove file extension & newlines;
|
||||
|
||||
wcExtFile = wcExtFile.map(x => [base, x]);
|
||||
wildcardExtFiles.push(...wcExtFile);
|
||||
if (wcExtFile && wcExtFile.length > 0) {
|
||||
let base = wcExtFile[0][0].trim() + "/";
|
||||
wcExtFile = wcExtFile.slice(1)
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [base, x[0]?.trim().replace(base, "").replace(".txt", ""), x[1]]);
|
||||
wildcardExtFiles.push(...wcExtFile);
|
||||
}
|
||||
}
|
||||
|
||||
// Load the yaml wildcard json file and append it as a wildcard file, appending each key as a path component until we reach the end
|
||||
@@ -150,7 +153,7 @@ function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.wildcardFile || tagType === ResultType.yamlWildcard) {
|
||||
return `__${text}__`;
|
||||
} else if (tagType === ResultType.wildcardTag) {
|
||||
return text.replace(/^.*?: /g, "");
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const styleColors = {
|
||||
"--results-neutral-text": ["#e0e0e0","black"],
|
||||
"--results-bg": ["#0b0f19", "#ffffff"],
|
||||
"--results-border-color": ["#4b5563", "#e5e7eb"],
|
||||
"--results-border-width": ["1px", "1.5px"],
|
||||
@@ -35,6 +36,7 @@ const autocompleteCSS = `
|
||||
.autocompleteResults {
|
||||
background-color: var(--results-bg) !important;
|
||||
border: var(--results-border-width) solid var(--results-border-color) !important;
|
||||
color: var(--results-neutral-text) !important;
|
||||
border-radius: 12px !important;
|
||||
height: fit-content;
|
||||
flex-basis: fit-content;
|
||||
@@ -84,6 +86,10 @@ const autocompleteCSS = `
|
||||
white-space: nowrap;
|
||||
color: var(--meta-text-color);
|
||||
}
|
||||
.acMetaText.biased::before {
|
||||
content: "✨";
|
||||
margin-right: 2px;
|
||||
}
|
||||
.acWikiLink {
|
||||
padding: 0.5rem;
|
||||
margin: -0.5rem 0 -0.5rem -0.5rem;
|
||||
@@ -214,9 +220,18 @@ async function syncOptions() {
|
||||
includeEmbeddingsInNormalResults: opts["tac_includeEmbeddingsInNormalResults"],
|
||||
useHypernetworks: opts["tac_useHypernetworks"],
|
||||
useLoras: opts["tac_useLoras"],
|
||||
useLycos: opts["tac_useLycos"],
|
||||
useLycos: opts["tac_useLycos"],
|
||||
useLoraPrefixForLycos: opts["tac_useLoraPrefixForLycos"],
|
||||
showWikiLinks: opts["tac_showWikiLinks"],
|
||||
showExtraNetworkPreviews: opts["tac_showExtraNetworkPreviews"],
|
||||
modelSortOrder: opts["tac_modelSortOrder"],
|
||||
frequencySort: opts["tac_frequencySort"],
|
||||
frequencyFunction: opts["tac_frequencyFunction"],
|
||||
frequencyMinCount: opts["tac_frequencyMinCount"],
|
||||
frequencyMaxAge: opts["tac_frequencyMaxAge"],
|
||||
frequencyRecommendCap: opts["tac_frequencyRecommendCap"],
|
||||
frequencyIncludeAlias: opts["tac_frequencyIncludeAlias"],
|
||||
useStyleVars: opts["tac_useStyleVars"],
|
||||
// Insertion related settings
|
||||
replaceUnderscores: opts["tac_replaceUnderscores"],
|
||||
escapeParentheses: opts["tac_escapeParentheses"],
|
||||
@@ -269,6 +284,17 @@ async function syncOptions() {
|
||||
await loadTags(newCFG);
|
||||
}
|
||||
|
||||
// Refresh temp files if model sort order changed
|
||||
// Contrary to the other loads, this one shouldn't happen on a first time load
|
||||
if (TAC_CFG && newCFG.modelSortOrder !== TAC_CFG.modelSortOrder) {
|
||||
const dropdown = gradioApp().querySelector("#setting_tac_modelSortOrder");
|
||||
dropdown.style.opacity = 0.5;
|
||||
dropdown.style.pointerEvents = "none";
|
||||
await refreshTacTempFiles(true);
|
||||
dropdown.style.opacity = null;
|
||||
dropdown.style.pointerEvents = null;
|
||||
}
|
||||
|
||||
// Update CSS if maxResults changed
|
||||
if (TAC_CFG && newCFG.maxResults !== TAC_CFG.maxResults) {
|
||||
gradioApp().querySelectorAll(".autocompleteResults").forEach(r => {
|
||||
@@ -384,9 +410,10 @@ function isEnabled() {
|
||||
const WEIGHT_REGEX = /[([]([^()[\]:|]+)(?::(?:\d+(?:\.\d+)?|\.\d+))?[)\]]/g;
|
||||
const POINTY_REGEX = /<[^\s,<](?:[^\t\n\r,<>]*>|[^\t\n\r,> ]*)/g;
|
||||
const COMPLETED_WILDCARD_REGEX = /__[^\s,_][^\t\n\r,_]*[^\s,_]__[^\s,_]*/g;
|
||||
const NORMAL_TAG_REGEX = /[^\s,|<>)\]]+|</g;
|
||||
const STYLE_VAR_REGEX = /\$\(?[^$|\[\],\s]*\)?/g;
|
||||
const NORMAL_TAG_REGEX = /[^\s,|<>\[\]:]+_\([^\s,|<>\[\]:]*\)?|[^\s,|<>():\[\]]+|</g;
|
||||
const RUBY_TAG_REGEX = /[\w\d<][\w\d' \-?!/$%]{2,}>?/g;
|
||||
const TAG_REGEX = new RegExp(`${POINTY_REGEX.source}|${COMPLETED_WILDCARD_REGEX.source}|${NORMAL_TAG_REGEX.source}`, "g");
|
||||
const TAG_REGEX = new RegExp(`${POINTY_REGEX.source}|${COMPLETED_WILDCARD_REGEX.source}|${STYLE_VAR_REGEX.source}|${NORMAL_TAG_REGEX.source}`, "g");
|
||||
|
||||
// On click, insert the tag into the prompt textbox with respect to the cursor position
|
||||
async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithoutChoice = false) {
|
||||
@@ -449,6 +476,37 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
}
|
||||
}
|
||||
|
||||
// Frequency db update
|
||||
if (TAC_CFG.frequencySort) {
|
||||
let name = null;
|
||||
|
||||
switch (tagType) {
|
||||
case ResultType.wildcardFile:
|
||||
case ResultType.yamlWildcard:
|
||||
// We only want to update the frequency for a full wildcard, not partial paths
|
||||
if (sanitizedText.endsWith("__"))
|
||||
name = text
|
||||
break;
|
||||
case ResultType.chant:
|
||||
// Chants use a slightly different format
|
||||
name = result.aliases;
|
||||
break;
|
||||
default:
|
||||
name = text;
|
||||
break;
|
||||
}
|
||||
|
||||
if (name && name.length > 0) {
|
||||
// Check if it's a negative prompt
|
||||
let textAreaId = getTextAreaIdentifier(textArea);
|
||||
let isNegative = textAreaId.includes("n");
|
||||
// Sanitize name for API call
|
||||
name = encodeURIComponent(name)
|
||||
// Call API & update db
|
||||
increaseUseCount(name, tagType, isNegative)
|
||||
}
|
||||
}
|
||||
|
||||
var prompt = textArea.value;
|
||||
|
||||
// Edit prompt text
|
||||
@@ -477,6 +535,10 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
optionalSeparator = TAC_CFG.extraNetworksSeparator || " ";
|
||||
}
|
||||
|
||||
// Escape $ signs since they are special chars for the replace function
|
||||
// We need four since we're also escaping them in replaceAll in the first place
|
||||
sanitizedText = sanitizedText.replaceAll("$", "$$$$");
|
||||
|
||||
// Replace partial tag word with new text, add comma if needed
|
||||
let insert = surrounding.replace(match, sanitizedText + optionalSeparator);
|
||||
|
||||
@@ -500,6 +562,14 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
let nameDict = modelKeywordDict.get(result.hash);
|
||||
let names = [result.text + ".safetensors", result.text + ".pt", result.text + ".ckpt"];
|
||||
|
||||
// No match, try to find a sha256 match from the cache file
|
||||
if (!nameDict) {
|
||||
const sha256 = await fetchAPI(`/tacapi/v1/lora-cached-hash/${result.text}`)
|
||||
if (sha256) {
|
||||
nameDict = modelKeywordDict.get(sha256);
|
||||
}
|
||||
}
|
||||
|
||||
if (nameDict) {
|
||||
let found = false;
|
||||
names.forEach(name => {
|
||||
@@ -540,8 +610,13 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
textArea.selectionStart = afterInsertCursorPos + optionalSeparator.length + keywordsLength;
|
||||
textArea.selectionEnd = textArea.selectionStart
|
||||
|
||||
// Set self trigger flag to show wildcard contents after the filename was inserted
|
||||
if ([ResultType.wildcardFile, ResultType.yamlWildcard, ResultType.umiWildcard].includes(result.type))
|
||||
tacSelfTrigger = true;
|
||||
// Since we've modified a Gradio Textbox component manually, we need to simulate an `input` DOM event to ensure it's propagated back to python.
|
||||
// Uses a built-in method from the webui's ui.js which also already accounts for event target
|
||||
if (tagType === ResultType.wildcardTag || tagType === ResultType.wildcardFile || tagType === ResultType.yamlWildcard)
|
||||
tacSelfTrigger = true;
|
||||
updateInput(textArea);
|
||||
|
||||
// Update previous tags with the edited prompt to prevent re-searching the same term
|
||||
@@ -656,6 +731,7 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
let wikiLink = document.createElement("a");
|
||||
wikiLink.classList.add("acWikiLink");
|
||||
wikiLink.innerText = "?";
|
||||
wikiLink.title = "Open external wiki page for this tag"
|
||||
|
||||
let linkPart = displayText;
|
||||
// Only use alias result if it is one
|
||||
@@ -667,6 +743,8 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
linkPart = linkPart.split("[")[0]
|
||||
}
|
||||
|
||||
linkPart = encodeURIComponent(linkPart);
|
||||
|
||||
// Set link based on selected file
|
||||
let tagFileNameLower = tagFileName.toLowerCase();
|
||||
if (tagFileNameLower.startsWith("danbooru")) {
|
||||
@@ -699,7 +777,7 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
}
|
||||
|
||||
// Post count
|
||||
if (result.count && !isNaN(result.count)) {
|
||||
if (result.count && !isNaN(result.count) && result.count !== Number.MAX_SAFE_INTEGER) {
|
||||
let postCount = result.count;
|
||||
let formatter;
|
||||
|
||||
@@ -731,8 +809,24 @@ function addResultsToList(textArea, results, tagword, resetList) {
|
||||
flexDiv.appendChild(metaDiv);
|
||||
}
|
||||
|
||||
// Add small ✨ marker to indicate usage sorting
|
||||
if (result.usageBias) {
|
||||
flexDiv.querySelector(".acMetaText").classList.add("biased");
|
||||
flexDiv.title = "✨ Frequent tag. Ctrl/Cmd + click to reset usage count."
|
||||
}
|
||||
|
||||
// Check if it's a negative prompt
|
||||
let isNegative = textAreaId.includes("n");
|
||||
|
||||
// Add listener
|
||||
li.addEventListener("click", function () { insertTextAtCursor(textArea, result, tagword); });
|
||||
li.addEventListener("click", (e) => {
|
||||
if (e.ctrlKey || e.metaKey) {
|
||||
resetUseCount(result.text, result.type, !isNegative, isNegative);
|
||||
flexDiv.querySelector(".acMetaText").classList.remove("biased");
|
||||
} else {
|
||||
insertTextAtCursor(textArea, result, tagword);
|
||||
}
|
||||
});
|
||||
// Add element to list
|
||||
resultsList.appendChild(li);
|
||||
}
|
||||
@@ -920,6 +1014,7 @@ function checkKeywordInsertionUndo(textArea, event) {
|
||||
if (lastEditWasKeywordInsertion && !keywordInsertionUndone) {
|
||||
keywordInsertionUndone = true;
|
||||
textArea.value = textBeforeKeywordInsertion;
|
||||
tacSelfTrigger = true;
|
||||
updateInput(textArea);
|
||||
}
|
||||
break;
|
||||
@@ -927,6 +1022,7 @@ function checkKeywordInsertionUndo(textArea, event) {
|
||||
if (lastEditWasKeywordInsertion && keywordInsertionUndone) {
|
||||
keywordInsertionUndone = false;
|
||||
textArea.value = textAfterKeywordInsertion;
|
||||
tacSelfTrigger = true;
|
||||
updateInput(textArea);
|
||||
}
|
||||
case undefined:
|
||||
@@ -961,7 +1057,7 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
.map(match => match[1]);
|
||||
let tags = prompt.match(TAG_REGEX)
|
||||
if (weightedTags !== null && tags !== null) {
|
||||
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted) && !tag.startsWith("<[")))
|
||||
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted) && !tag.startsWith("<[") && !tag.startsWith("$(")))
|
||||
.concat(weightedTags);
|
||||
}
|
||||
|
||||
@@ -998,48 +1094,24 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
resultCountBeforeNormalTags = 0;
|
||||
tagword = tagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
|
||||
// Needed for slicing check later
|
||||
let normalTags = false;
|
||||
|
||||
// Process all parsers
|
||||
let resultCandidates = await processParsers(textArea, prompt);
|
||||
let resultCandidates = (await processParsers(textArea, prompt))?.filter(x => x.length > 0);
|
||||
// If one ore more result candidates match, use their results
|
||||
if (resultCandidates && resultCandidates.length > 0) {
|
||||
// Flatten our candidate(s)
|
||||
results = resultCandidates.flat();
|
||||
// If there was more than one candidate, sort the results by text to mix them
|
||||
// instead of having them added in the order of the parsers
|
||||
let shouldSort = resultCandidates.length > 1;
|
||||
if (shouldSort) {
|
||||
results = results.sort((a, b) => {
|
||||
let sortByA = a.type === ResultType.chant ? a.aliases : a.text;
|
||||
let sortByB = b.type === ResultType.chant ? b.aliases : b.text;
|
||||
return sortByA.localeCompare(sortByB);
|
||||
});
|
||||
|
||||
// Since some tags are kaomoji, we have to add the normal results in some cases
|
||||
if (tagword.startsWith("<") || tagword.startsWith("*<")) {
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
let searchRegex;
|
||||
if (tagword.startsWith("*")) {
|
||||
tagword = tagword.slice(1);
|
||||
searchRegex = new RegExp(`${escapeRegExp(tagword)}`, 'i');
|
||||
} else {
|
||||
searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(tagword)}`, 'i');
|
||||
}
|
||||
let genericResults = allTags.filter(x => x[0].toLowerCase().search(searchRegex) > -1).slice(0, TAC_CFG.maxResults);
|
||||
|
||||
genericResults.forEach(g => {
|
||||
let result = new AutocompleteResult(g[0].trim(), ResultType.tag)
|
||||
result.category = g[1];
|
||||
result.count = g[2];
|
||||
result.aliases = g[3];
|
||||
results.push(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
// Sort results, but not if it's umi tags since they are sorted by count
|
||||
if (!(resultCandidates.length === 1 && results[0].type === ResultType.umiWildcard))
|
||||
results = results.sort(getSortFunction());
|
||||
}
|
||||
// Else search the normal tag list
|
||||
if (!resultCandidates || resultCandidates.length === 0
|
||||
|| (TAC_CFG.includeEmbeddingsInNormalResults && !(tagword.startsWith("<") || tagword.startsWith("*<")))
|
||||
) {
|
||||
normalTags = true;
|
||||
resultCountBeforeNormalTags = results.length;
|
||||
|
||||
// Create escaped search regex with support for * as a start placeholder
|
||||
@@ -1094,11 +1166,6 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
results = results.concat(extraResults);
|
||||
}
|
||||
}
|
||||
|
||||
// Slice if the user has set a max result count
|
||||
if (!TAC_CFG.showAllResults) {
|
||||
results = results.slice(0, TAC_CFG.maxResults + resultCountBeforeNormalTags);
|
||||
}
|
||||
}
|
||||
|
||||
// Guard for empty results
|
||||
@@ -1108,6 +1175,57 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Sort again with frequency / usage count if enabled
|
||||
if (TAC_CFG.frequencySort) {
|
||||
// Split our results into a list of names and types
|
||||
let tagNames = [];
|
||||
let aliasNames = [];
|
||||
let types = [];
|
||||
// Limit to 2k for performance reasons
|
||||
const aliasTypes = [ResultType.tag, ResultType.extra];
|
||||
results.slice(0,2000).forEach(r => {
|
||||
const name = r.type === ResultType.chant ? r.aliases : r.text;
|
||||
// Add to alias list or tag list depending on if the name includes the tagword
|
||||
// (the same criteria is used in the filter in calculateUsageBias)
|
||||
if (aliasTypes.includes(r.type) && !name.includes(tagword)) {
|
||||
aliasNames.push(name);
|
||||
} else {
|
||||
tagNames.push(name);
|
||||
}
|
||||
types.push(r.type);
|
||||
});
|
||||
|
||||
// Check if it's a negative prompt
|
||||
let textAreaId = getTextAreaIdentifier(textArea);
|
||||
let isNegative = textAreaId.includes("n");
|
||||
|
||||
// Request use counts from the DB
|
||||
const names = TAC_CFG.frequencyIncludeAlias ? tagNames.concat(aliasNames) : tagNames;
|
||||
const counts = await getUseCounts(names, types, isNegative);
|
||||
|
||||
// Pre-calculate weights to prevent duplicate work
|
||||
const resultBiasMap = new Map();
|
||||
results.forEach(result => {
|
||||
const name = result.type === ResultType.chant ? result.aliases : result.text;
|
||||
const type = result.type;
|
||||
// Find matching pair from DB results
|
||||
const useStats = counts.find(c => c.name === name && c.type === type);
|
||||
const uses = useStats?.count || 0;
|
||||
// Calculate & set weight
|
||||
const weight = calculateUsageBias(result, result.count, uses)
|
||||
resultBiasMap.set(result, weight);
|
||||
});
|
||||
// Actual sorting with the pre-calculated weights
|
||||
results = results.sort((a, b) => {
|
||||
return resultBiasMap.get(b) - resultBiasMap.get(a);
|
||||
});
|
||||
}
|
||||
|
||||
// Slice if the user has set a max result count and we are not in a extra networks / wildcard list
|
||||
if (!TAC_CFG.showAllResults && normalTags) {
|
||||
results = results.slice(0, TAC_CFG.maxResults + resultCountBeforeNormalTags);
|
||||
}
|
||||
|
||||
addResultsToList(textArea, results, tagword, true);
|
||||
showResults(textArea);
|
||||
}
|
||||
@@ -1130,12 +1248,17 @@ function navigateInList(textArea, event) {
|
||||
|
||||
if (!validKeys.includes(event.key)) return;
|
||||
if (!isVisible(textArea)) return
|
||||
// Return if ctrl key is pressed to not interfere with weight editing shortcut
|
||||
if (event.ctrlKey || event.altKey || event.shiftKey || event.metaKey) return;
|
||||
// Add modifier keys to base as text+.
|
||||
let modKey = "";
|
||||
if (event.ctrlKey) modKey += "Ctrl+";
|
||||
if (event.altKey) modKey += "Alt+";
|
||||
if (event.shiftKey) modKey += "Shift+";
|
||||
if (event.metaKey) modKey += "Meta+";
|
||||
modKey += event.key;
|
||||
|
||||
oldSelectedTag = selectedTag;
|
||||
|
||||
switch (event.key) {
|
||||
switch (modKey) {
|
||||
case keys["MoveUp"]:
|
||||
if (selectedTag === null) {
|
||||
selectedTag = resultCount - 1;
|
||||
@@ -1206,6 +1329,8 @@ function navigateInList(textArea, event) {
|
||||
case keys["Close"]:
|
||||
hideResults(textArea);
|
||||
break;
|
||||
default:
|
||||
if (event.ctrlKey || event.altKey || event.shiftKey || event.metaKey) return;
|
||||
}
|
||||
let moveKeys = [keys["MoveUp"], keys["MoveDown"], keys["JumpUp"], keys["JumpDown"], keys["JumpToStart"], keys["JumpToEnd"]];
|
||||
if (selectedTag === resultCount - 1 && moveKeys.includes(event.key)) {
|
||||
@@ -1220,8 +1345,8 @@ function navigateInList(textArea, event) {
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
async function refreshTacTempFiles() {
|
||||
setTimeout(async () => {
|
||||
async function refreshTacTempFiles(api = false) {
|
||||
const reload = async () => {
|
||||
wildcardFiles = [];
|
||||
wildcardExtFiles = [];
|
||||
umiWildcards = [];
|
||||
@@ -1233,7 +1358,23 @@ async function refreshTacTempFiles() {
|
||||
await processQueue(QUEUE_FILE_LOAD, null);
|
||||
|
||||
console.log("TAC: Refreshed temp files");
|
||||
}, 2000);
|
||||
}
|
||||
|
||||
if (api) {
|
||||
await postAPI("tacapi/v1/refresh-temp-files");
|
||||
await reload();
|
||||
} else {
|
||||
setTimeout(async () => {
|
||||
await reload();
|
||||
}, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
async function refreshEmbeddings() {
|
||||
await postAPI("tacapi/v1/refresh-embeddings", null);
|
||||
embeddings = [];
|
||||
await processQueue(QUEUE_FILE_LOAD, null);
|
||||
console.log("TAC: Refreshed embeddings");
|
||||
}
|
||||
|
||||
function addAutocompleteToArea(area) {
|
||||
@@ -1256,8 +1397,13 @@ function addAutocompleteToArea(area) {
|
||||
|
||||
// Add autocomplete event listener
|
||||
area.addEventListener('input', (e) => {
|
||||
debounce(autocomplete(area, area.value), TAC_CFG.delayTime);
|
||||
updateRuby(area, area.value);
|
||||
|
||||
// Cancel autocomplete itself if the event has no inputType (e.g. because it was triggered by the updateInput() function)
|
||||
if (!e.inputType && !tacSelfTrigger) return;
|
||||
tacSelfTrigger = false;
|
||||
|
||||
debounce(autocomplete(area, area.value), TAC_CFG.delayTime);
|
||||
checkKeywordInsertionUndo(area, e);
|
||||
});
|
||||
// Add focusout event listener
|
||||
@@ -1318,6 +1464,13 @@ async function setup() {
|
||||
// Listener for internal temp files refresh button
|
||||
gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles);
|
||||
|
||||
// Also add listener for external network refresh button (plus triggering python code)
|
||||
["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => {
|
||||
gradioApp().querySelector(e)?.addEventListener("click", ()=>{
|
||||
refreshTacTempFiles(true);
|
||||
});
|
||||
})
|
||||
|
||||
// Add mutation observer for the model hash text to also allow hash-based blacklist again
|
||||
let modelHashText = gradioApp().querySelector("#sd_checkpoint_hash");
|
||||
updateModelName();
|
||||
@@ -1328,6 +1481,7 @@ async function setup() {
|
||||
if (mutation.type === "attributes" && mutation.attributeName === "title") {
|
||||
currentModelHash = mutation.target.title;
|
||||
updateModelName();
|
||||
refreshEmbeddings();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -16,6 +16,8 @@ hash_dict = {}
|
||||
|
||||
|
||||
def load_hash_cache():
|
||||
if not known_hashes_file.exists():
|
||||
known_hashes_file.touch()
|
||||
with open(known_hashes_file, "r", encoding="utf-8") as file:
|
||||
reader = csv.reader(
|
||||
file.readlines(), delimiter=",", quotechar='"', skipinitialspace=True
|
||||
@@ -28,6 +30,8 @@ def load_hash_cache():
|
||||
def update_hash_cache():
|
||||
global file_needs_update
|
||||
if file_needs_update:
|
||||
if not known_hashes_file.exists():
|
||||
known_hashes_file.touch()
|
||||
with open(known_hashes_file, "w", encoding="utf-8", newline='') as file:
|
||||
writer = csv.writer(file)
|
||||
for name, (hash, mtime) in hash_dict.items():
|
||||
|
||||
@@ -6,31 +6,34 @@ try:
|
||||
from modules.paths import extensions_dir, script_path
|
||||
|
||||
# Webui root path
|
||||
FILE_DIR = Path(script_path)
|
||||
FILE_DIR = Path(script_path).absolute()
|
||||
|
||||
# The extension base path
|
||||
EXT_PATH = Path(extensions_dir)
|
||||
EXT_PATH = Path(extensions_dir).absolute()
|
||||
except ImportError:
|
||||
# Webui root path
|
||||
FILE_DIR = Path().absolute()
|
||||
# The extension base path
|
||||
EXT_PATH = FILE_DIR.joinpath("extensions")
|
||||
EXT_PATH = FILE_DIR.joinpath("extensions").absolute()
|
||||
|
||||
# Tags base path
|
||||
TAGS_PATH = Path(scripts.basedir()).joinpath("tags")
|
||||
TAGS_PATH = Path(scripts.basedir()).joinpath("tags").absolute()
|
||||
|
||||
# The path to the folder containing the wildcards and embeddings
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards")
|
||||
EMB_PATH = Path(shared.cmd_opts.embeddings_dir)
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir)
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards").absolute()
|
||||
EMB_PATH = Path(shared.cmd_opts.embeddings_dir).absolute()
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir).absolute()
|
||||
|
||||
try:
|
||||
LORA_PATH = Path(shared.cmd_opts.lora_dir)
|
||||
LORA_PATH = Path(shared.cmd_opts.lora_dir).absolute()
|
||||
except AttributeError:
|
||||
LORA_PATH = None
|
||||
|
||||
try:
|
||||
LYCO_PATH = Path(shared.cmd_opts.lyco_dir)
|
||||
try:
|
||||
LYCO_PATH = Path(shared.cmd_opts.lyco_dir_backcompat).absolute()
|
||||
except:
|
||||
LYCO_PATH = Path(shared.cmd_opts.lyco_dir).absolute() # attempt original non-backcompat path
|
||||
except AttributeError:
|
||||
LYCO_PATH = None
|
||||
|
||||
@@ -49,7 +52,7 @@ def find_ext_wildcard_paths():
|
||||
getattr(shared.cmd_opts, "wildcards_dir", None), # Cmd arg from the wildcard extension
|
||||
getattr(opts, "wildcard_dir", None), # Custom path from sd-dynamic-prompts
|
||||
]
|
||||
for path in [Path(p) for p in custom_paths if p is not None]:
|
||||
for path in [Path(p).absolute() for p in custom_paths if p is not None]:
|
||||
if path.exists():
|
||||
found.append(path)
|
||||
|
||||
@@ -61,8 +64,8 @@ WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
|
||||
# The path to the temporary files
|
||||
# In the webui root, on windows it exists by default, on linux it doesn't
|
||||
STATIC_TEMP_PATH = FILE_DIR.joinpath("tmp")
|
||||
TEMP_PATH = TAGS_PATH.joinpath("temp") # Extension specific temp files
|
||||
STATIC_TEMP_PATH = FILE_DIR.joinpath("tmp").absolute()
|
||||
TEMP_PATH = TAGS_PATH.joinpath("temp").absolute() # Extension specific temp files
|
||||
|
||||
# Make sure these folders exist
|
||||
if not TEMP_PATH.exists():
|
||||
|
||||
@@ -2,21 +2,37 @@
|
||||
# to a temporary file to expose it to the javascript side
|
||||
|
||||
import glob
|
||||
import importlib
|
||||
import json
|
||||
import sqlite3
|
||||
import urllib.parse
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import yaml
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import FileResponse, JSONResponse
|
||||
from modules import script_callbacks, sd_hijack, shared
|
||||
from fastapi.responses import Response, FileResponse, JSONResponse
|
||||
from modules import script_callbacks, sd_hijack, shared, hashes
|
||||
from pydantic import BaseModel
|
||||
|
||||
from scripts.model_keyword_support import (get_lora_simple_hash,
|
||||
load_hash_cache, update_hash_cache,
|
||||
write_model_keyword_path)
|
||||
from scripts.shared_paths import *
|
||||
|
||||
try:
|
||||
import scripts.tag_frequency_db as tdb
|
||||
|
||||
# Ensure the db dependency is reloaded on script reload
|
||||
importlib.reload(tdb)
|
||||
|
||||
db = tdb.TagFrequencyDb()
|
||||
if int(db.version) != int(tdb.db_ver):
|
||||
raise ValueError("Database version mismatch")
|
||||
except (ImportError, ValueError, sqlite3.Error) as e:
|
||||
print(f"Tag Autocomplete: Tag frequency database error - \"{e}\"")
|
||||
db = None
|
||||
|
||||
# Attempt to get embedding load function, using the same call as api.
|
||||
try:
|
||||
load_textual_inversion_embeddings = sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings
|
||||
@@ -24,21 +40,60 @@ except Exception as e: # Not supported.
|
||||
load_textual_inversion_embeddings = lambda *args, **kwargs: None
|
||||
print("Tag Autocomplete: Cannot reload embeddings instantly:", e)
|
||||
|
||||
# Sorting functions for extra networks / embeddings stuff
|
||||
sort_criteria = {
|
||||
"Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(),
|
||||
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime,
|
||||
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime
|
||||
}
|
||||
|
||||
def sort_models(model_list, sort_method = None, name_has_subpath = False):
|
||||
"""Sorts models according to the setting.
|
||||
|
||||
Input: list of (full_path, display_name, {hash}) models.
|
||||
Returns models in the format of name, sort key, meta.
|
||||
Meta is optional and can be a hash, version string or other required info.
|
||||
"""
|
||||
if len(model_list) == 0:
|
||||
return model_list
|
||||
|
||||
if sort_method is None:
|
||||
sort_method = getattr(shared.opts, "tac_modelSortOrder", "Name")
|
||||
|
||||
# Get sorting method from dictionary
|
||||
sorter = sort_criteria.get(sort_method, sort_criteria["Name"])
|
||||
|
||||
# During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated.
|
||||
# The list itself doesn't need to get sorted at this point.
|
||||
if len(model_list[0]) > 2:
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list]
|
||||
else:
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}"' for path, name in model_list]
|
||||
return results
|
||||
|
||||
|
||||
def get_wildcards():
|
||||
"""Returns a list of all wildcards. Works on nested folders."""
|
||||
wildcard_files = list(WILDCARD_PATH.rglob("*.txt"))
|
||||
resolved = [w.relative_to(WILDCARD_PATH).as_posix(
|
||||
) for w in wildcard_files if w.name != "put wildcards here.txt"]
|
||||
return resolved
|
||||
resolved = [(w, w.relative_to(WILDCARD_PATH).as_posix())
|
||||
for w in wildcard_files
|
||||
if w.name != "put wildcards here.txt"
|
||||
and w.is_file()]
|
||||
return sort_models(resolved, name_has_subpath=True)
|
||||
|
||||
|
||||
def get_ext_wildcards():
|
||||
"""Returns a list of all extension wildcards. Works on nested folders."""
|
||||
wildcard_files = []
|
||||
|
||||
excluded_folder_names = [s.strip() for s in getattr(shared.opts, "tac_wildcardExclusionList", "").split(",")]
|
||||
for path in WILDCARD_EXT_PATHS:
|
||||
wildcard_files.append(path.as_posix())
|
||||
wildcard_files.extend(p.relative_to(path).as_posix() for p in path.rglob("*.txt") if p.name != "put wildcards here.txt")
|
||||
resolved = [(w, w.relative_to(path).as_posix())
|
||||
for w in path.rglob("*.txt")
|
||||
if w.name != "put wildcards here.txt"
|
||||
and not any(excluded in w.parts for excluded in excluded_folder_names)
|
||||
and w.is_file()]
|
||||
wildcard_files.extend(sort_models(resolved, name_has_subpath=True))
|
||||
wildcard_files.append("-----")
|
||||
|
||||
return wildcard_files
|
||||
@@ -52,7 +107,9 @@ def is_umi_format(data):
|
||||
break
|
||||
return not issue_found
|
||||
|
||||
def parse_umi_format(umi_tags, count, data):
|
||||
count = 0
|
||||
def parse_umi_format(umi_tags, data):
|
||||
global count
|
||||
for item in data:
|
||||
umi_tags[count] = ','.join(data[item]['Tags'])
|
||||
count += 1
|
||||
@@ -76,13 +133,12 @@ def get_yaml_wildcards():
|
||||
"""Returns a list of all tags found in extension YAML files found under a Tags: key."""
|
||||
yaml_files = []
|
||||
for path in WILDCARD_EXT_PATHS:
|
||||
yaml_files.extend(p for p in path.rglob("*.yml"))
|
||||
yaml_files.extend(p for p in path.rglob("*.yaml"))
|
||||
yaml_files.extend(p for p in path.rglob("*.yml") if p.is_file())
|
||||
yaml_files.extend(p for p in path.rglob("*.yaml") if p.is_file())
|
||||
|
||||
yaml_wildcards = {}
|
||||
|
||||
umi_tags = {} # { tag: count }
|
||||
count = 0
|
||||
|
||||
for path in yaml_files:
|
||||
try:
|
||||
@@ -90,13 +146,13 @@ def get_yaml_wildcards():
|
||||
data = yaml.safe_load(file)
|
||||
if (data):
|
||||
if (is_umi_format(data)):
|
||||
parse_umi_format(umi_tags, count, data)
|
||||
parse_umi_format(umi_tags, data)
|
||||
else:
|
||||
parse_dynamic_prompt_format(yaml_wildcards, data, path)
|
||||
else:
|
||||
print('No data found in ' + path.name)
|
||||
except yaml.YAMLError:
|
||||
print('Issue in parsing YAML file ' + path.name)
|
||||
except (yaml.YAMLError, UnicodeDecodeError) as e:
|
||||
print(f'Issue in parsing YAML file {path.name}: {e}')
|
||||
continue
|
||||
|
||||
# Sort by count
|
||||
@@ -118,48 +174,49 @@ def get_embeddings(sd_model):
|
||||
# Version constants
|
||||
V1_SHAPE = 768
|
||||
V2_SHAPE = 1024
|
||||
VXL_SHAPE = 2048
|
||||
emb_v1 = []
|
||||
emb_v2 = []
|
||||
emb_vXL = []
|
||||
emb_unknown = []
|
||||
results = []
|
||||
|
||||
try:
|
||||
# The sd_model embedding_db reference only exists in sd.next with diffusers backend
|
||||
try:
|
||||
loaded_sdnext = sd_model.embedding_db.word_embeddings
|
||||
skipped_sdnext = sd_model.embedding_db.skipped_embeddings
|
||||
except (NameError, AttributeError):
|
||||
loaded_sdnext = {}
|
||||
skipped_sdnext = {}
|
||||
|
||||
# Get embedding dict from sd_hijack to separate v1/v2 embeddings
|
||||
emb_type_a = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
emb_type_b = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
# Get the shape of the first item in the dict
|
||||
emb_a_shape = -1
|
||||
emb_b_shape = -1
|
||||
if (len(emb_type_a) > 0):
|
||||
emb_a_shape = next(iter(emb_type_a.items()))[1].shape
|
||||
if (len(emb_type_b) > 0):
|
||||
emb_b_shape = next(iter(emb_type_b.items()))[1].shape
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
loaded = loaded | loaded_sdnext
|
||||
skipped = skipped | skipped_sdnext
|
||||
|
||||
# Add embeddings to the correct list
|
||||
if (emb_a_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_a.keys())
|
||||
elif (emb_a_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_a.keys())
|
||||
for key, emb in (loaded | skipped).items():
|
||||
if emb.filename is None:
|
||||
continue
|
||||
|
||||
if (emb_b_shape == V1_SHAPE):
|
||||
emb_v1 = list(emb_type_b.keys())
|
||||
elif (emb_b_shape == V2_SHAPE):
|
||||
emb_v2 = list(emb_type_b.keys())
|
||||
if emb.shape is None:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
elif emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "vXL"))
|
||||
else:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
|
||||
# Get shape of current model
|
||||
#vec = sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
|
||||
#model_shape = vec.shape[1]
|
||||
# Show relevant entries at the top
|
||||
#if (model_shape == V1_SHAPE):
|
||||
# results = [e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2]
|
||||
#elif (model_shape == V2_SHAPE):
|
||||
# results = [e + ",v2" for e in emb_v2] + [e + ",v1" for e in emb_v1]
|
||||
#else:
|
||||
# raise AttributeError # Fallback to old method
|
||||
results = sorted([e + ",v1" for e in emb_v1] + [e + ",v2" for e in emb_v2], key=lambda x: x.lower())
|
||||
results = sort_models(emb_v1) + sort_models(emb_v2) + sort_models(emb_vXL) + sort_models(emb_unknown)
|
||||
except AttributeError:
|
||||
print("tag_autocomplete_helper: Old webui version or unrecognized model shape, using fallback for embedding completion.")
|
||||
# Get a list of all embeddings in the folder
|
||||
all_embeds = [str(e.relative_to(EMB_PATH)) for e in EMB_PATH.rglob("*") if e.suffix in {".bin", ".pt", ".png",'.webp', '.jxl', '.avif'}]
|
||||
all_embeds = [str(e.relative_to(EMB_PATH)) for e in EMB_PATH.rglob("*") if e.suffix in {".bin", ".pt", ".png",'.webp', '.jxl', '.avif'} and e.is_file()]
|
||||
# Remove files with a size of 0
|
||||
all_embeds = [e for e in all_embeds if EMB_PATH.joinpath(e).stat().st_size > 0]
|
||||
# Remove file extensions
|
||||
@@ -173,53 +230,129 @@ def get_hypernetworks():
|
||||
|
||||
# Get a list of all hypernetworks in the folder
|
||||
hyp_paths = [Path(h) for h in glob.glob(HYP_PATH.joinpath("**/*").as_posix(), recursive=True)]
|
||||
all_hypernetworks = [str(h.name) for h in hyp_paths if h.suffix in {".pt"}]
|
||||
# Remove file extensions
|
||||
return sorted([h[:h.rfind('.')] for h in all_hypernetworks], key=lambda x: x.lower())
|
||||
all_hypernetworks = [(h, h.stem) for h in hyp_paths if h.suffix in {".pt"} and h.is_file()]
|
||||
return sort_models(all_hypernetworks)
|
||||
|
||||
model_keyword_installed = write_model_keyword_path()
|
||||
|
||||
|
||||
def _get_lora():
|
||||
"""
|
||||
Write a list of all lora.
|
||||
Fallback method for when the built-in Lora.networks module is not available.
|
||||
"""
|
||||
# Get a list of all lora in the folder
|
||||
lora_paths = [
|
||||
Path(l)
|
||||
for l in glob.glob(LORA_PATH.joinpath("**/*").as_posix(), recursive=True)
|
||||
]
|
||||
# Get hashes
|
||||
valid_loras = [
|
||||
lf
|
||||
for lf in lora_paths
|
||||
if lf.suffix in {".safetensors", ".ckpt", ".pt"} and lf.is_file()
|
||||
]
|
||||
|
||||
return valid_loras
|
||||
|
||||
|
||||
def _get_lyco():
|
||||
"""
|
||||
Write a list of all LyCORIS/LOHA from https://github.com/KohakuBlueleaf/a1111-sd-webui-lycoris
|
||||
Fallback method for when the built-in Lora.networks module is not available.
|
||||
"""
|
||||
# Get a list of all LyCORIS in the folder
|
||||
lyco_paths = [
|
||||
Path(ly)
|
||||
for ly in glob.glob(LYCO_PATH.joinpath("**/*").as_posix(), recursive=True)
|
||||
]
|
||||
|
||||
# Get hashes
|
||||
valid_lycos = [
|
||||
lyf
|
||||
for lyf in lyco_paths
|
||||
if lyf.suffix in {".safetensors", ".ckpt", ".pt"} and lyf.is_file()
|
||||
]
|
||||
return valid_lycos
|
||||
|
||||
|
||||
# Attempt to use the build-in Lora.networks Lora/LyCORIS models lists.
|
||||
try:
|
||||
import sys
|
||||
from modules import extensions
|
||||
sys.path.append(Path(extensions.extensions_builtin_dir).joinpath("Lora").as_posix())
|
||||
import lora # pyright: ignore [reportMissingImports]
|
||||
|
||||
def _get_lora():
|
||||
return [
|
||||
Path(model.filename).absolute()
|
||||
for model in lora.available_loras.values()
|
||||
if Path(model.filename).absolute().is_relative_to(LORA_PATH)
|
||||
]
|
||||
|
||||
def _get_lyco():
|
||||
return [
|
||||
Path(model.filename).absolute()
|
||||
for model in lora.available_loras.values()
|
||||
if Path(model.filename).absolute().is_relative_to(LYCO_PATH)
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
pass
|
||||
# no need to report
|
||||
# print(f'Exception setting-up performant fetchers: {e}')
|
||||
|
||||
|
||||
def is_visible(p: Path) -> bool:
|
||||
if getattr(shared.opts, "extra_networks_hidden_models", "When searched") != "Never":
|
||||
return True
|
||||
for part in p.parts:
|
||||
if part.startswith('.'):
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_lora():
|
||||
"""Write a list of all lora"""
|
||||
global model_keyword_installed
|
||||
|
||||
# Get a list of all lora in the folder
|
||||
lora_paths = [Path(l) for l in glob.glob(LORA_PATH.joinpath("**/*").as_posix(), recursive=True)]
|
||||
# Get hashes
|
||||
valid_loras = [lf for lf in lora_paths if lf.suffix in {".safetensors", ".ckpt", ".pt"}]
|
||||
hashes = {}
|
||||
valid_loras = _get_lora()
|
||||
loras_with_hash = []
|
||||
for l in valid_loras:
|
||||
if not l.exists() or not l.is_file() or not is_visible(l):
|
||||
continue
|
||||
name = l.relative_to(LORA_PATH).as_posix()
|
||||
if model_keyword_installed:
|
||||
hashes[name] = get_lora_simple_hash(l)
|
||||
hash = get_lora_simple_hash(l)
|
||||
else:
|
||||
hashes[name] = ""
|
||||
hash = ""
|
||||
loras_with_hash.append((l, name, hash))
|
||||
# Sort
|
||||
sorted_loras = dict(sorted(hashes.items()))
|
||||
# Add hashes and return
|
||||
return [f"\"{name}\",{hash}" for name, hash in sorted_loras.items()]
|
||||
return sort_models(loras_with_hash)
|
||||
|
||||
|
||||
def get_lyco():
|
||||
"""Write a list of all LyCORIS/LOHA from https://github.com/KohakuBlueleaf/a1111-sd-webui-lycoris"""
|
||||
|
||||
# Get a list of all LyCORIS in the folder
|
||||
lyco_paths = [Path(ly) for ly in glob.glob(LYCO_PATH.joinpath("**/*").as_posix(), recursive=True)]
|
||||
|
||||
# Get hashes
|
||||
valid_lycos = [lyf for lyf in lyco_paths if lyf.suffix in {".safetensors", ".ckpt", ".pt"}]
|
||||
hashes = {}
|
||||
valid_lycos = _get_lyco()
|
||||
lycos_with_hash = []
|
||||
for ly in valid_lycos:
|
||||
if not ly.exists() or not ly.is_file() or not is_visible(ly):
|
||||
continue
|
||||
name = ly.relative_to(LYCO_PATH).as_posix()
|
||||
if model_keyword_installed:
|
||||
hashes[name] = get_lora_simple_hash(ly)
|
||||
hash = get_lora_simple_hash(ly)
|
||||
else:
|
||||
hashes[name] = ""
|
||||
|
||||
hash = ""
|
||||
lycos_with_hash.append((ly, name, hash))
|
||||
# Sort
|
||||
sorted_lycos = dict(sorted(hashes.items()))
|
||||
# Add hashes and return
|
||||
return [f"\"{name}\",{hash}" for name, hash in sorted_lycos.items()]
|
||||
return sort_models(lycos_with_hash)
|
||||
|
||||
def get_style_names():
|
||||
try:
|
||||
style_names: list[str] = shared.prompt_styles.styles.keys()
|
||||
style_names = sorted(style_names, key=len, reverse=True)
|
||||
return style_names
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def write_tag_base_path():
|
||||
"""Writes the tag base path to a fixed location temporary file"""
|
||||
@@ -235,19 +368,19 @@ def write_to_temp_file(name, data):
|
||||
|
||||
csv_files = []
|
||||
csv_files_withnone = []
|
||||
def update_tag_files():
|
||||
def update_tag_files(*args, **kwargs):
|
||||
"""Returns a list of all potential tag files"""
|
||||
global csv_files, csv_files_withnone
|
||||
files = [str(t.relative_to(TAGS_PATH)) for t in TAGS_PATH.glob("*.csv")]
|
||||
files = [str(t.relative_to(TAGS_PATH)) for t in TAGS_PATH.glob("*.csv") if t.is_file()]
|
||||
csv_files = files
|
||||
csv_files_withnone = ["None"] + files
|
||||
|
||||
json_files = []
|
||||
json_files_withnone = []
|
||||
def update_json_files():
|
||||
def update_json_files(*args, **kwargs):
|
||||
"""Returns a list of all potential json files"""
|
||||
global json_files, json_files_withnone
|
||||
files = [str(j.relative_to(TAGS_PATH)) for j in TAGS_PATH.glob("*.json")]
|
||||
files = [str(j.relative_to(TAGS_PATH)) for j in TAGS_PATH.glob("*.json") if j.is_file()]
|
||||
json_files = files
|
||||
json_files_withnone = ["None"] + files
|
||||
|
||||
@@ -274,6 +407,7 @@ write_to_temp_file('umi_tags.txt', [])
|
||||
write_to_temp_file('hyp.txt', [])
|
||||
write_to_temp_file('lora.txt', [])
|
||||
write_to_temp_file('lyco.txt', [])
|
||||
write_to_temp_file('styles.txt', [])
|
||||
# Only reload embeddings if the file doesn't exist, since they are already re-written on model load
|
||||
if not TEMP_PATH.joinpath("emb.txt").exists():
|
||||
write_to_temp_file('emb.txt', [])
|
||||
@@ -283,22 +417,41 @@ if EMB_PATH.exists():
|
||||
# Get embeddings after the model loaded callback
|
||||
script_callbacks.on_model_loaded(get_embeddings)
|
||||
|
||||
def refresh_temp_files():
|
||||
global WILDCARD_EXT_PATHS
|
||||
WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
load_textual_inversion_embeddings(force_reload = True) # Instant embedding reload.
|
||||
write_temp_files()
|
||||
get_embeddings(shared.sd_model)
|
||||
def refresh_embeddings(force: bool, *args, **kwargs):
|
||||
try:
|
||||
# Fix for SD.Next infinite refresh loop due to gradio not updating after model load on demand.
|
||||
# This will just skip embedding loading if no model is loaded yet (or there really are no embeddings).
|
||||
# Try catch is just for safety incase sd_hijack access fails for some reason.
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
if len((loaded | skipped)) > 0:
|
||||
load_textual_inversion_embeddings(force_reload=force)
|
||||
get_embeddings(None)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def write_temp_files():
|
||||
def refresh_temp_files(*args, **kwargs):
|
||||
global WILDCARD_EXT_PATHS
|
||||
skip_wildcard_refresh = getattr(shared.opts, "tac_skipWildcardRefresh", False)
|
||||
if skip_wildcard_refresh:
|
||||
WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
write_temp_files(skip_wildcard_refresh)
|
||||
refresh_embeddings(force=True)
|
||||
|
||||
def write_style_names(*args, **kwargs):
|
||||
styles = get_style_names()
|
||||
if styles:
|
||||
write_to_temp_file('styles.txt', styles)
|
||||
|
||||
def write_temp_files(skip_wildcard_refresh = False):
|
||||
# Write wildcards to wc.txt if found
|
||||
if WILDCARD_PATH.exists():
|
||||
if WILDCARD_PATH.exists() and not skip_wildcard_refresh:
|
||||
wildcards = [WILDCARD_PATH.relative_to(FILE_DIR).as_posix()] + get_wildcards()
|
||||
if wildcards:
|
||||
write_to_temp_file('wc.txt', wildcards)
|
||||
|
||||
# Write extension wildcards to wce.txt if found
|
||||
if WILDCARD_EXT_PATHS is not None:
|
||||
if WILDCARD_EXT_PATHS is not None and not skip_wildcard_refresh:
|
||||
wildcards_ext = get_ext_wildcards()
|
||||
if wildcards_ext:
|
||||
write_to_temp_file('wce.txt', wildcards_ext)
|
||||
@@ -330,6 +483,8 @@ def write_temp_files():
|
||||
if model_keyword_installed:
|
||||
update_hash_cache()
|
||||
|
||||
if shared.prompt_styles is not None:
|
||||
write_style_names()
|
||||
|
||||
write_temp_files()
|
||||
|
||||
@@ -349,6 +504,13 @@ def on_ui_settings():
|
||||
return self
|
||||
shared.OptionInfo.needs_restart = needs_restart
|
||||
|
||||
# Dictionary of function options and their explanations
|
||||
frequency_sort_functions = {
|
||||
"Logarithmic (weak)": "Will respect the base order and slightly prefer often used tags",
|
||||
"Logarithmic (strong)": "Same as Logarithmic (weak), but with a stronger bias",
|
||||
"Usage first": "Will list used tags by frequency before all others",
|
||||
}
|
||||
|
||||
tac_options = {
|
||||
# Main tag file
|
||||
"tac_tagFile": shared.OptionInfo("danbooru.csv", "Tag filename", gr.Dropdown, lambda: {"choices": csv_files_withnone}, refresh=update_tag_files),
|
||||
@@ -368,13 +530,25 @@ def on_ui_settings():
|
||||
"tac_delayTime": shared.OptionInfo(100, "Time in ms to wait before triggering completion again").needs_restart(),
|
||||
"tac_useWildcards": shared.OptionInfo(True, "Search for wildcards"),
|
||||
"tac_sortWildcardResults": shared.OptionInfo(True, "Sort wildcard file contents alphabetically").info("If your wildcard files have a specific custom order, disable this to keep it"),
|
||||
"tac_wildcardExclusionList": shared.OptionInfo("", "Wildcard folder exclusion list").info("Add folder names that shouldn't be searched for wildcards, separated by comma.").needs_restart(),
|
||||
"tac_skipWildcardRefresh": shared.OptionInfo(False, "Don't re-scan for wildcard files when pressing the extra networks refresh button").info("Useful to prevent hanging if you use a very large wildcard collection."),
|
||||
"tac_useEmbeddings": shared.OptionInfo(True, "Search for embeddings"),
|
||||
"tac_includeEmbeddingsInNormalResults": shared.OptionInfo(False, "Include embeddings in normal tag results").info("The 'JumpTo...' keybinds (End & Home key by default) will select the first non-embedding result of their direction on the first press for quick navigation in longer lists."),
|
||||
"tac_useHypernetworks": shared.OptionInfo(True, "Search for hypernetworks"),
|
||||
"tac_useLoras": shared.OptionInfo(True, "Search for Loras"),
|
||||
"tac_useLycos": shared.OptionInfo(True, "Search for LyCORIS/LoHa"),
|
||||
"tac_useLoraPrefixForLycos": shared.OptionInfo(True, "Use the '<lora:' prefix instead of '<lyco:' for models in the LyCORIS folder").info("The lyco prefix is included for backwards compatibility and not used anymore by default. Disable this if you are on an old webui version without built-in lyco support."),
|
||||
"tac_showWikiLinks": shared.OptionInfo(False, "Show '?' next to tags, linking to its Danbooru or e621 wiki page").info("Warning: This is an external site and very likely contains NSFW examples!"),
|
||||
"tac_showExtraNetworkPreviews": shared.OptionInfo(True, "Show preview thumbnails for extra networks if available"),
|
||||
"tac_modelSortOrder": shared.OptionInfo("Name", "Model sort order", gr.Dropdown, lambda: {"choices": list(sort_criteria.keys())}).info("Order for extra network models and wildcards in dropdown"),
|
||||
"tac_useStyleVars": shared.OptionInfo(False, "Search for webui style names").info("Suggests style names from the webui dropdown with '$'. Currently requires a secondary extension like <a href=\"https://github.com/SirVeggie/extension-style-vars\" target=\"_blank\">style-vars</a> to actually apply the styles before generating."),
|
||||
# Frequency sorting settings
|
||||
"tac_frequencySort": shared.OptionInfo(True, "Locally record tag usage and sort frequent tags higher").info("Will also work for extra networks, keeping the specified base order"),
|
||||
"tac_frequencyFunction": shared.OptionInfo("Logarithmic (weak)", "Function to use for frequency sorting", gr.Dropdown, lambda: {"choices": list(frequency_sort_functions.keys())}).info("; ".join([f'<b>{key}</b>: {val}' for key, val in frequency_sort_functions.items()])),
|
||||
"tac_frequencyMinCount": shared.OptionInfo(3, "Minimum number of uses for a tag to be considered frequent").info("Tags with less uses than this will not be sorted higher, even if the sorting function would normally result in a higher position."),
|
||||
"tac_frequencyMaxAge": shared.OptionInfo(30, "Maximum days since last use for a tag to be considered frequent").info("Similar to the above, tags that haven't been used in this many days will not be sorted higher. Set to 0 to disable."),
|
||||
"tac_frequencyRecommendCap": shared.OptionInfo(10, "Maximum number of recommended tags").info("Limits the maximum number of recommended tags to not drown out normal results. Set to 0 to disable."),
|
||||
"tac_frequencyIncludeAlias": shared.OptionInfo(False, "Frequency sorting matches aliases for frequent tags").info("Tag frequency will be increased for the main tag even if an alias is used for completion. This option can be used to override the default behavior of alias results being ignored for frequency sorting."),
|
||||
# Insertion related settings
|
||||
"tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"),
|
||||
"tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"),
|
||||
@@ -439,6 +613,20 @@ def on_ui_settings():
|
||||
"6": ["red", "maroon"],
|
||||
"7": ["whitesmoke", "black"],
|
||||
"8": ["seagreen", "darkseagreen"]
|
||||
},
|
||||
"derpibooru": {
|
||||
"-1": ["red", "maroon"],
|
||||
"0": ["#60d160", "#3d9d3d"],
|
||||
"1": ["#fff956", "#918e2e"],
|
||||
"3": ["#fd9961", "#a14c2e"],
|
||||
"4": ["#cf5bbe", "#6c1e6c"],
|
||||
"5": ["#3c8ad9", "#1e5e93"],
|
||||
"6": ["#a6a6a6", "#555555"],
|
||||
"7": ["#47abc1", "#1f6c7c"],
|
||||
"8": ["#7871d0", "#392f7d"],
|
||||
"9": ["#df3647", "#8e1c2b"],
|
||||
"10": ["#c98f2b", "#7b470e"],
|
||||
"11": ["#e87ebe", "#a83583"]
|
||||
}
|
||||
}\
|
||||
"""
|
||||
@@ -456,25 +644,40 @@ def on_ui_settings():
|
||||
|
||||
script_callbacks.on_ui_settings(on_ui_settings)
|
||||
|
||||
def get_style_mtime():
|
||||
try:
|
||||
style_file = getattr(shared, "styles_filename", "styles.csv")
|
||||
# Check in case a list is returned
|
||||
if isinstance(style_file, list):
|
||||
style_file = style_file[0]
|
||||
|
||||
style_file = Path(FILE_DIR).joinpath(style_file)
|
||||
if Path.exists(style_file):
|
||||
return style_file.stat().st_mtime
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
last_style_mtime = get_style_mtime()
|
||||
|
||||
def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
async def get_json_info(base_path: Path, filename: str = None):
|
||||
if base_path is None or (not base_path.exists()):
|
||||
return JSONResponse({}, status_code=404)
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
json_candidates = glob.glob(base_path.as_posix() + f"/**/{filename}.json", recursive=True)
|
||||
if json_candidates is not None and len(json_candidates) > 0:
|
||||
if json_candidates is not None and len(json_candidates) > 0 and Path(json_candidates[0]).is_file():
|
||||
return FileResponse(json_candidates[0])
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": e}, status_code=500)
|
||||
|
||||
async def get_preview_thumbnail(base_path: Path, filename: str = None, blob: bool = False):
|
||||
if base_path is None or (not base_path.exists()):
|
||||
return JSONResponse({}, status_code=404)
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
img_glob = glob.glob(base_path.as_posix() + f"/**/{filename}.*", recursive=True)
|
||||
img_candidates = [img for img in img_glob if Path(img).suffix in [".png", ".jpg", ".jpeg", ".webp", ".gif"]]
|
||||
img_candidates = [img for img in img_glob if Path(img).suffix in [".png", ".jpg", ".jpeg", ".webp", ".gif"] and Path(img).is_file()]
|
||||
if img_candidates is not None and len(img_candidates) > 0:
|
||||
if blob:
|
||||
return FileResponse(img_candidates[0])
|
||||
@@ -483,6 +686,14 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": e}, status_code=500)
|
||||
|
||||
@app.post("/tacapi/v1/refresh-temp-files")
|
||||
async def api_refresh_temp_files():
|
||||
refresh_temp_files()
|
||||
|
||||
@app.post("/tacapi/v1/refresh-embeddings")
|
||||
async def api_refresh_embeddings():
|
||||
refresh_embeddings(force=False)
|
||||
|
||||
@app.get("/tacapi/v1/lora-info/{lora_name}")
|
||||
async def get_lora_info(lora_name):
|
||||
return await get_json_info(LORA_PATH, lora_name)
|
||||
@@ -491,6 +702,18 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
async def get_lyco_info(lyco_name):
|
||||
return await get_json_info(LYCO_PATH, lyco_name)
|
||||
|
||||
@app.get("/tacapi/v1/lora-cached-hash/{lora_name}")
|
||||
async def get_lora_cached_hash(lora_name: str):
|
||||
path_glob = glob.glob(LORA_PATH.as_posix() + f"/**/{lora_name}.*", recursive=True)
|
||||
paths = [lora for lora in path_glob if Path(lora).suffix in [".safetensors", ".ckpt", ".pt"] and Path(lora).is_file()]
|
||||
if paths is not None and len(paths) > 0:
|
||||
path = paths[0]
|
||||
hash = hashes.sha256_from_cache(path, f"lora/{lora_name}", path.endswith(".safetensors"))
|
||||
if hash is not None:
|
||||
return hash
|
||||
|
||||
return None
|
||||
|
||||
def get_path_for_type(type):
|
||||
if type == "lora":
|
||||
return LORA_PATH
|
||||
@@ -514,20 +737,88 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
@app.get("/tacapi/v1/wildcard-contents")
|
||||
async def get_wildcard_contents(basepath: str, filename: str):
|
||||
if basepath is None or basepath == "":
|
||||
return JSONResponse({}, status_code=404)
|
||||
return Response(status_code=404)
|
||||
|
||||
base = Path(basepath)
|
||||
if base is None or (not base.exists()):
|
||||
return JSONResponse({}, status_code=404)
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
wildcard_path = base.joinpath(filename)
|
||||
if wildcard_path.exists():
|
||||
if wildcard_path.exists() and wildcard_path.is_file():
|
||||
return FileResponse(wildcard_path)
|
||||
else:
|
||||
return JSONResponse({}, status_code=404)
|
||||
return Response(status_code=404)
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": e}, status_code=500)
|
||||
|
||||
@app.get("/tacapi/v1/refresh-styles-if-changed")
|
||||
async def refresh_styles_if_changed():
|
||||
global last_style_mtime
|
||||
|
||||
mtime = get_style_mtime()
|
||||
if mtime is not None and mtime > last_style_mtime:
|
||||
last_style_mtime = mtime
|
||||
# Update temp file
|
||||
if shared.prompt_styles is not None:
|
||||
write_style_names()
|
||||
|
||||
return Response(status_code=200) # Success
|
||||
else:
|
||||
return Response(status_code=304) # Not modified
|
||||
def db_request(func, get = False):
|
||||
if db is not None:
|
||||
try:
|
||||
if get:
|
||||
ret = func()
|
||||
if ret is list:
|
||||
ret = [{"name": t[0], "type": t[1], "count": t[2], "lastUseDate": t[3]} for t in ret]
|
||||
return JSONResponse({"result": ret})
|
||||
else:
|
||||
func()
|
||||
except sqlite3.Error as e:
|
||||
return JSONResponse({"error": e.__cause__}, status_code=500)
|
||||
else:
|
||||
return JSONResponse({"error": "Database not initialized"}, status_code=500)
|
||||
|
||||
@app.post("/tacapi/v1/increase-use-count")
|
||||
async def increase_use_count(tagname: str, ttype: int, neg: bool):
|
||||
db_request(lambda: db.increase_tag_count(tagname, ttype, neg))
|
||||
|
||||
@app.get("/tacapi/v1/get-use-count")
|
||||
async def get_use_count(tagname: str, ttype: int, neg: bool):
|
||||
return db_request(lambda: db.get_tag_count(tagname, ttype, neg), get=True)
|
||||
|
||||
# Small dataholder class
|
||||
class UseCountListRequest(BaseModel):
|
||||
tagNames: list[str]
|
||||
tagTypes: list[int]
|
||||
neg: bool = False
|
||||
|
||||
# Semantically weird to use post here, but it's required for the body on js side
|
||||
@app.post("/tacapi/v1/get-use-count-list")
|
||||
async def get_use_count_list(body: UseCountListRequest):
|
||||
# If a date limit is set > 0, pass it to the db
|
||||
date_limit = getattr(shared.opts, "tac_frequencyMaxAge", 30)
|
||||
date_limit = date_limit if date_limit > 0 else None
|
||||
|
||||
count_list = list(db.get_tag_counts(body.tagNames, body.tagTypes, body.neg, date_limit))
|
||||
|
||||
# If a limit is set, return at max the top n results by count
|
||||
if count_list and len(count_list):
|
||||
limit = int(min(getattr(shared.opts, "tac_frequencyRecommendCap", 10), len(count_list)))
|
||||
# Sort by count and return the top n
|
||||
if limit > 0:
|
||||
count_list = sorted(count_list, key=lambda x: x[2], reverse=True)[:limit]
|
||||
|
||||
return db_request(lambda: count_list, get=True)
|
||||
|
||||
@app.put("/tacapi/v1/reset-use-count")
|
||||
async def reset_use_count(tagname: str, ttype: int, pos: bool, neg: bool):
|
||||
db_request(lambda: db.reset_tag_count(tagname, ttype, pos, neg))
|
||||
|
||||
@app.get("/tacapi/v1/get-all-use-counts")
|
||||
async def get_all_tag_counts():
|
||||
return db_request(lambda: db.get_all_tags(), get=True)
|
||||
|
||||
script_callbacks.on_app_started(api_tac)
|
||||
|
||||
189
scripts/tag_frequency_db.py
Normal file
189
scripts/tag_frequency_db.py
Normal file
@@ -0,0 +1,189 @@
|
||||
import sqlite3
|
||||
from contextlib import contextmanager
|
||||
|
||||
from scripts.shared_paths import TAGS_PATH
|
||||
|
||||
db_file = TAGS_PATH.joinpath("tag_frequency.db")
|
||||
timeout = 30
|
||||
db_ver = 1
|
||||
|
||||
|
||||
@contextmanager
|
||||
def transaction(db=db_file):
|
||||
"""Context manager for database transactions.
|
||||
Ensures that the connection is properly closed after the transaction.
|
||||
"""
|
||||
try:
|
||||
conn = sqlite3.connect(db, timeout=timeout)
|
||||
|
||||
conn.isolation_level = None
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("BEGIN")
|
||||
yield cursor
|
||||
cursor.execute("COMMIT")
|
||||
except sqlite3.Error as e:
|
||||
print("Tag Autocomplete: Frequency database error:", e)
|
||||
finally:
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
|
||||
class TagFrequencyDb:
|
||||
"""Class containing creation and interaction methods for the tag frequency database"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.version = self.__check()
|
||||
|
||||
def __check(self):
|
||||
if not db_file.exists():
|
||||
print("Tag Autocomplete: Creating frequency database")
|
||||
with transaction() as cursor:
|
||||
self.__create_db(cursor)
|
||||
self.__update_db_data(cursor, "version", db_ver)
|
||||
print("Tag Autocomplete: Database successfully created")
|
||||
|
||||
return self.__get_version()
|
||||
|
||||
def __create_db(self, cursor: sqlite3.Cursor):
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS db_data (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS tag_frequency (
|
||||
name TEXT NOT NULL,
|
||||
type INT NOT NULL,
|
||||
count_pos INT,
|
||||
count_neg INT,
|
||||
last_used TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (name, type)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
def __update_db_data(self, cursor: sqlite3.Cursor, key, value):
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT OR REPLACE
|
||||
INTO db_data (key, value)
|
||||
VALUES (?, ?)
|
||||
""",
|
||||
(key, value),
|
||||
)
|
||||
|
||||
def __get_version(self):
|
||||
with transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT value
|
||||
FROM db_data
|
||||
WHERE key = 'version'
|
||||
"""
|
||||
)
|
||||
db_version = cursor.fetchone()
|
||||
|
||||
return db_version[0] if db_version else 0
|
||||
|
||||
def get_all_tags(self):
|
||||
with transaction() as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT name, type, count_pos, count_neg, last_used
|
||||
FROM tag_frequency
|
||||
WHERE count_pos > 0 OR count_neg > 0
|
||||
ORDER BY count_pos + count_neg DESC
|
||||
"""
|
||||
)
|
||||
tags = cursor.fetchall()
|
||||
|
||||
return tags
|
||||
|
||||
def get_tag_count(self, tag, ttype, negative=False):
|
||||
count_str = "count_neg" if negative else "count_pos"
|
||||
with transaction() as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT {count_str}, last_used
|
||||
FROM tag_frequency
|
||||
WHERE name = ? AND type = ?
|
||||
""",
|
||||
(tag, ttype),
|
||||
)
|
||||
tag_count = cursor.fetchone()
|
||||
|
||||
if tag_count:
|
||||
return tag_count[0], tag_count[1]
|
||||
else:
|
||||
return 0, None
|
||||
|
||||
def get_tag_counts(self, tags: list[str], ttypes: list[str], negative=False, date_limit=None):
|
||||
count_str = "count_neg" if negative else "count_pos"
|
||||
with transaction() as cursor:
|
||||
for tag, ttype in zip(tags, ttypes):
|
||||
if date_limit is not None:
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT {count_str}, last_used
|
||||
FROM tag_frequency
|
||||
WHERE name = ? AND type = ?
|
||||
AND last_used > datetime('now', '-' || ? || ' days')
|
||||
""",
|
||||
(tag, ttype, date_limit),
|
||||
)
|
||||
else:
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT {count_str}, last_used
|
||||
FROM tag_frequency
|
||||
WHERE name = ? AND type = ?
|
||||
""",
|
||||
(tag, ttype),
|
||||
)
|
||||
tag_count = cursor.fetchone()
|
||||
if tag_count:
|
||||
yield (tag, ttype, tag_count[0], tag_count[1])
|
||||
else:
|
||||
yield (tag, ttype, 0, None)
|
||||
|
||||
def increase_tag_count(self, tag, ttype, negative=False):
|
||||
pos_count = self.get_tag_count(tag, ttype, False)[0]
|
||||
neg_count = self.get_tag_count(tag, ttype, True)[0]
|
||||
|
||||
if negative:
|
||||
neg_count += 1
|
||||
else:
|
||||
pos_count += 1
|
||||
|
||||
with transaction() as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
INSERT OR REPLACE
|
||||
INTO tag_frequency (name, type, count_pos, count_neg)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(tag, ttype, pos_count, neg_count),
|
||||
)
|
||||
|
||||
def reset_tag_count(self, tag, ttype, positive=True, negative=False):
|
||||
if positive and negative:
|
||||
set_str = "count_pos = 0, count_neg = 0"
|
||||
elif positive:
|
||||
set_str = "count_pos = 0"
|
||||
elif negative:
|
||||
set_str = "count_neg = 0"
|
||||
|
||||
with transaction() as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
UPDATE tag_frequency
|
||||
SET {set_str}
|
||||
WHERE name = ? AND type = ?
|
||||
""",
|
||||
(tag, ttype),
|
||||
)
|
||||
113301
tags/EnglishDictionary.csv
Normal file
113301
tags/EnglishDictionary.csv
Normal file
File diff suppressed because it is too large
Load Diff
168662
tags/danbooru.csv
168662
tags/danbooru.csv
File diff suppressed because it is too large
Load Diff
95091
tags/derpibooru.csv
Normal file
95091
tags/derpibooru.csv
Normal file
File diff suppressed because it is too large
Load Diff
22419
tags/e621_sfw.csv
Normal file
22419
tags/e621_sfw.csv
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user