Compare commits

..

24 Commits
3.1.0 ... 3.3.0

Author SHA1 Message Date
DominikDoom
8766965a30 Credit original author 2025-05-08 12:43:40 +02:00
Disty0
34e68e1628 Fix SDNext ModernUI by following the cursor (#327) 2025-05-05 20:44:51 +02:00
DominikDoom
41d185b616 Improve IME consistency
Might help with #326
2025-05-01 13:48:32 +02:00
DominikDoom
e0baa58ace Fix style appending to wrong node on forge classic
Fixes #323
2025-04-16 11:23:12 +02:00
DominikDoom
c1ef12d887 Fix weighted tags preventing normal tag completion
caused by filter applying to every tag instead of just one to one
Fixes #324
2025-04-15 21:56:16 +02:00
Serick
4fc122de4b Added support for Forge classic (#322)
Fixes issues due to removal of hypernetworks in Forge classic
2025-04-15 09:35:54 +02:00
re-unknown
c341ccccb6 Add TIPO configuration for tag prompt in third-party selectors (#319) 2025-03-23 14:26:34 +01:00
akoyaki ayagi
bda8701734 Add a character core tags list file for chant function (#317)
Alternative chant list ("<c:" or "<chant:" prefix) for 26k characters and their tag descriptions. Allows greater likeness even if the model doesn't know the character well.
2025-03-08 10:45:40 +01:00
undefined
63fca457a7 Indicate repeated tag (#313)
Shows 🔁 to mark a tag that has already been used in the prompt
2025-01-16 09:29:33 +01:00
DominikDoom
38700d4743 Formatting 2025-01-04 19:35:14 +01:00
DominikDoom
bb492ba059 Add default color config & wiki link fix for merged tag list 2025-01-04 19:33:29 +01:00
Drac
40ad070a02 Add danbooru_e621_merged.csv (#312)
Post count threshold for this file is 25
2025-01-04 19:12:57 +01:00
DominikDoom
209b1dd76b End of 2024 tag list update
Danbooru and e621 tag lists as of 2024-12-22 (no Derpibooru for now, sorry).
Both cut off at a post count of 25, slightly improved consistency & new aliases included.
Thanks a lot to @DraconicDragon for the up-to-date tag list at https://github.com/DraconicDragon/dbr-e621-lists-archive
2025-01-03 14:03:26 +01:00
DominikDoom
196fa19bfc Fix derpibooru tags containing merge conflict markers
Thanks to @heftig for noticing this, as discussed in #293
2024-12-08 18:23:21 +01:00
DominikDoom
6ffeeafc49 Update danbooru tags (2024-11-9)
Thanks to @yamosin.
Closes #309

Note: This changes the cutoff type from top 100k to post count > 30, which adds ~21k rows
2024-11-09 15:35:59 +01:00
DominikDoom
08b7c58ea7 More catches for fixing #308 2024-11-02 15:52:10 +01:00
DominikDoom
6be91449f3 Try-catch in umi format check
Possible fix for #308
2024-11-02 13:51:51 +01:00
david419kr
b515c15e01 Underscore replacement exclusion list feature (#306) 2024-10-30 17:45:32 +01:00
DominikDoom
827b99c961 Make embedding refresh non-force by default
Added option for force-refreshing embeddings to restore old behavior
Fixes #301
2024-09-04 22:58:55 +02:00
DominikDoom
49ec047af8 Fix extra network tab refresh listener 2024-08-15 11:52:49 +02:00
DominikDoom
f94da07ed1 Fix ref 2024-08-11 14:56:58 +02:00
DominikDoom
e2cfe7341b Re-register embed load callback after model load if needed 2024-08-11 14:55:35 +02:00
DominikDoom
ce51ec52a2 Fix for forge type detection, sorting fallback if filename is missing 2024-08-11 14:26:37 +02:00
DominikDoom
f64d728ac6 Partial embedding fixes for webui forge
Resolves some symptoms of #297, but doesn't fix the underlying cause
2024-08-11 14:08:31 +02:00
9 changed files with 682979 additions and 265006 deletions

View File

@@ -86,6 +86,13 @@ const thirdParty = {
"selectors": [
"Found tags",
]
},
"TIPO": {
"base": "#tab_txt2img",
"hasIds": false,
"selectors": [
"Tag Prompt"
]
}
}

View File

@@ -31,7 +31,7 @@ const autocompleteCSS = `
position: absolute;
z-index: 999;
max-width: calc(100% - 1.5rem);
margin: 5px 0 0 0;
flex-direction: column; /* Ensure children stack vertically */
}
.autocompleteResults {
background-color: var(--results-bg) !important;
@@ -44,6 +44,7 @@ const autocompleteCSS = `
overflow-y: var(--results-overflow-y);
overflow-x: hidden;
word-break: break-word;
margin-top: 10px; /* Margin to create space below the cursor */
}
.sideInfo {
display: none;
@@ -90,6 +91,10 @@ const autocompleteCSS = `
content: "✨";
margin-right: 2px;
}
.acMetaText span.used::after {
content: "🔁";
margin-right: 2px;
}
.acWikiLink {
padding: 0.5rem;
margin: -0.5rem 0 -0.5rem -0.5rem;
@@ -234,6 +239,7 @@ async function syncOptions() {
useStyleVars: opts["tac_useStyleVars"],
// Insertion related settings
replaceUnderscores: opts["tac_replaceUnderscores"],
replaceUnderscoresExclusionList: opts["tac_undersocreReplacementExclusionList"],
escapeParentheses: opts["tac_escapeParentheses"],
appendComma: opts["tac_appendComma"],
appendSpace: opts["tac_appendSpace"],
@@ -357,10 +363,13 @@ function showResults(textArea) {
parentDiv.style.display = "flex";
if (TAC_CFG.slidingPopup) {
let caretPosition = getCaretCoordinates(textArea, textArea.selectionEnd).left;
let offset = Math.min(textArea.offsetLeft - textArea.scrollLeft + caretPosition, textArea.offsetWidth - parentDiv.offsetWidth);
let caretPosition = getCaretCoordinates(textArea, textArea.selectionEnd);
// Top cursor offset fix for SDNext modern UI, based on code by https://github.com/Nyx01
let offsetTop = textArea.offsetTop + caretPosition.top - textArea.scrollTop + 10; // Adjust this value for desired distance below cursor
let offsetLeft = Math.min(textArea.offsetLeft - textArea.scrollLeft + caretPosition.left, textArea.offsetWidth - parentDiv.offsetWidth);
parentDiv.style.left = `${offset}px`;
parentDiv.style.top = `${offsetTop}px`; // Position below the cursor
parentDiv.style.left = `${offsetLeft}px`;
} else {
if (parentDiv.style.left)
parentDiv.style.removeProperty("left");
@@ -430,8 +439,12 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
if (sanitizeResults && sanitizeResults.length > 0) {
sanitizedText = sanitizeResults[0];
} else {
sanitizedText = TAC_CFG.replaceUnderscores ? text.replaceAll("_", " ") : text;
const excluded_tags = TAC_CFG.replaceUnderscoresExclusionList?.split(',').map(s => s.trim()) || [];
if (TAC_CFG.replaceUnderscores && !excluded_tags.includes(sanitizedText)) {
sanitizedText = text.replaceAll("_", " ")
} else {
sanitizedText = text;
}
if (TAC_CFG.escapeParentheses && tagType === ResultType.tag) {
sanitizedText = sanitizedText
.replaceAll("(", "\\(")
@@ -621,12 +634,30 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
updateInput(textArea);
// Update previous tags with the edited prompt to prevent re-searching the same term
let weightedTags = [...newPrompt.matchAll(WEIGHT_REGEX)]
.map(match => match[1]);
let tags = newPrompt.match(TAG_REGEX())
if (weightedTags !== null) {
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted)))
.concat(weightedTags);
let weightedTags = [...prompt.matchAll(WEIGHT_REGEX)]
.map(match => match[1])
.sort((a, b) => a.length - b.length);
let tags = [...prompt.match(TAG_REGEX())].sort((a, b) => a.length - b.length);
if (weightedTags !== null && tags !== null) {
// Create a working copy of the normal tags
let workingTags = [...tags];
// For each weighted tag
for (const weightedTag of weightedTags) {
// Find first matching tag and remove it from working set
const matchIndex = workingTags.findIndex(tag =>
tag === weightedTag && !tag.startsWith("<[") && !tag.startsWith("$(")
);
if (matchIndex !== -1) {
// Remove the matched tag from the working set
workingTags.splice(matchIndex, 1);
}
}
// Combine filtered normal tags with weighted tags
tags = workingTags.concat(weightedTags);
}
previousTags = tags;
@@ -661,6 +692,30 @@ function addResultsToList(textArea, results, tagword, resetList) {
let tagColors = TAC_CFG.colorMap;
let mode = (document.querySelector(".dark") || gradioApp().querySelector(".dark")) ? 0 : 1;
let nextLength = Math.min(results.length, resultCount + TAC_CFG.resultStepLength);
const IS_DAN_OR_E621_TAG_FILE = (tagFileName.toLowerCase().startsWith("danbooru") || tagFileName.toLowerCase().startsWith("e621"));
const tagCount = {};
// Indicate if tag was used before
if (IS_DAN_OR_E621_TAG_FILE) {
const prompt = textArea.value.trim();
const tags = prompt.replaceAll('\n', ',').split(',').map(tag => tag.trim()).filter(tag => tag);
const unsanitizedTags = tags.map(tag => {
const weightedTags = [...tag.matchAll(WEIGHT_REGEX)].flat();
if (weightedTags.length === 2) {
return weightedTags[1];
} else {
// normal tags
return tag;
}
}).map(tag => tag.replaceAll(" ", "_").replaceAll("\\(", "(").replaceAll("\\)", ")"));
// Split tags by `,` and count tag
for (const tag of unsanitizedTags) {
tagCount[tag] = tagCount[tag] ? tagCount[tag] + 1 : 1;
}
}
for (let i = resultCount; i < nextLength; i++) {
let result = results[i];
@@ -726,29 +781,38 @@ function addResultsToList(textArea, results, tagword, resetList) {
}
// Add wiki link if the setting is enabled and a supported tag set loaded
if (TAC_CFG.showWikiLinks
&& (result.type === ResultType.tag)
&& (tagFileName.toLowerCase().startsWith("danbooru") || tagFileName.toLowerCase().startsWith("e621"))) {
if (
TAC_CFG.showWikiLinks &&
result.type === ResultType.tag &&
IS_DAN_OR_E621_TAG_FILE
) {
let wikiLink = document.createElement("a");
wikiLink.classList.add("acWikiLink");
wikiLink.innerText = "?";
wikiLink.title = "Open external wiki page for this tag"
wikiLink.title = "Open external wiki page for this tag";
let linkPart = displayText;
// Only use alias result if it is one
if (displayText.includes("➝"))
linkPart = displayText.split(" ➝ ")[1];
if (displayText.includes("➝")) linkPart = displayText.split(" ➝ ")[1];
// Remove any trailing translations
if (linkPart.includes("[")) {
linkPart = linkPart.split("[")[0]
linkPart = linkPart.split("[")[0];
}
linkPart = encodeURIComponent(linkPart);
// Set link based on selected file
let tagFileNameLower = tagFileName.toLowerCase();
if (tagFileNameLower.startsWith("danbooru")) {
if (tagFileNameLower.startsWith("danbooru_e621_merged")) {
// Use danbooru for categories 0-5, e621 for 6+
// Based on the merged categories from https://github.com/DraconicDragon/dbr-e621-lists-archive/tree/main/tag-lists/danbooru_e621_merged
// Danbooru is also the fallback if result.category is not set
wikiLink.href =
result.category && result.category >= 6
? `https://e621.net/wiki_pages/${linkPart}`
: `https://danbooru.donmai.us/wiki_pages/${linkPart}`;
} else if (tagFileNameLower.startsWith("danbooru")) {
wikiLink.href = `https://danbooru.donmai.us/wiki_pages/${linkPart}`;
} else if (tagFileNameLower.startsWith("e621")) {
wikiLink.href = `https://e621.net/wiki_pages/${linkPart}`;
@@ -813,7 +877,19 @@ function addResultsToList(textArea, results, tagword, resetList) {
// Add small ✨ marker to indicate usage sorting
if (result.usageBias) {
flexDiv.querySelector(".acMetaText").classList.add("biased");
flexDiv.title = "✨ Frequent tag. Ctrl/Cmd + click to reset usage count."
flexDiv.title = "✨ Frequent tag. Ctrl/Cmd + click to reset usage count.";
}
// Add 🔁 to indicate if tag was used before
if (IS_DAN_OR_E621_TAG_FILE && tagCount[result.text]) {
// Fix PR#313#issuecomment-2592551794
if (!(result.text === tagword && tagCount[result.text] === 1)) {
const textNode = flexDiv.querySelector(".acMetaText");
const span = document.createElement("span");
textNode.insertBefore(span, textNode.firstChild);
span.classList.add("used");
span.title = "🔁 The prompt already contains this tag";
}
}
// Check if it's a negative prompt
@@ -1072,11 +1148,29 @@ async function autocomplete(textArea, prompt, fixedTag = null) {
// Match tags with RegEx to get the last edited one
// We also match for the weighting format (e.g. "tag:1.0") here, and combine the two to get the full tag word set
let weightedTags = [...prompt.matchAll(WEIGHT_REGEX)]
.map(match => match[1]);
let tags = prompt.match(TAG_REGEX())
.map(match => match[1])
.sort((a, b) => a.length - b.length);
let tags = [...prompt.match(TAG_REGEX())].sort((a, b) => a.length - b.length);
if (weightedTags !== null && tags !== null) {
tags = tags.filter(tag => !weightedTags.some(weighted => tag.includes(weighted) && !tag.startsWith("<[") && !tag.startsWith("$(")))
.concat(weightedTags);
// Create a working copy of the normal tags
let workingTags = [...tags];
// For each weighted tag
for (const weightedTag of weightedTags) {
// Find first matching tag and remove it from working set
const matchIndex = workingTags.findIndex(tag =>
tag === weightedTag && !tag.startsWith("<[") && !tag.startsWith("$(")
);
if (matchIndex !== -1) {
// Remove the matched tag from the working set
workingTags.splice(matchIndex, 1);
}
}
// Combine filtered normal tags with weighted tags
tags = workingTags.concat(weightedTags);
}
// Guard for no tags
@@ -1421,6 +1515,12 @@ function addAutocompleteToArea(area) {
if (!e.inputType && !tacSelfTrigger) return;
tacSelfTrigger = false;
// Block hide we are composing (IME), so enter doesn't close the results
if (e.isComposing) {
hideBlocked = true;
setTimeout(() => { hideBlocked = false; }, 100);
}
debounce(autocomplete(area, area.value), TAC_CFG.delayTime);
checkKeywordInsertionUndo(area, e);
});
@@ -1483,9 +1583,16 @@ async function setup() {
gradioApp().querySelector("#refresh_tac_refreshTempFiles")?.addEventListener("click", refreshTacTempFiles);
// Also add listener for external network refresh button (plus triggering python code)
["#img2img_extra_refresh", "#txt2img_extra_refresh"].forEach(e => {
gradioApp().querySelector(e)?.addEventListener("click", ()=>{
refreshTacTempFiles(true);
let alreadyAdded = new Set();
["#img2img_extra_refresh", "#txt2img_extra_refresh", ".extra-network-control--refresh"].forEach(e => {
const elems = gradioApp().querySelectorAll(e);
elems.forEach(elem => {
if (!elem || alreadyAdded.has(elem)) return;
alreadyAdded.add(elem);
elem.addEventListener("click", ()=>{
refreshTacTempFiles(true);
});
});
})
@@ -1539,7 +1646,7 @@ async function setup() {
} else {
acStyle.appendChild(document.createTextNode(css));
}
gradioApp().appendChild(acStyle);
document.head.appendChild(acStyle);
// Callback
await processQueue(QUEUE_AFTER_SETUP, null);

View File

@@ -20,9 +20,27 @@ except ImportError:
TAGS_PATH = Path(scripts.basedir()).joinpath("tags").absolute()
# The path to the folder containing the wildcards and embeddings
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards").absolute()
try: # SD.Next
WILDCARD_PATH = Path(shared.opts.wildcards_dir).absolute()
except Exception: # A1111
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards").absolute()
EMB_PATH = Path(shared.cmd_opts.embeddings_dir).absolute()
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir).absolute()
# Forge Classic detection
try:
from modules_forge.forge_version import version as forge_version
IS_FORGE_CLASSIC = forge_version == "classic"
except ImportError:
IS_FORGE_CLASSIC = False
# Forge Classic skips it
if not IS_FORGE_CLASSIC:
try:
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir).absolute()
except AttributeError:
HYP_PATH = None
else:
HYP_PATH = None
try:
LORA_PATH = Path(shared.cmd_opts.lora_dir).absolute()

View File

@@ -1,19 +1,20 @@
# This helper script scans folders for wildcards and embeddings and writes them
# to a temporary file to expose it to the javascript side
import sys
import glob
import importlib
import json
import sqlite3
import sys
import urllib.parse
from asyncio import sleep
from pathlib import Path
import gradio as gr
import yaml
from fastapi import FastAPI
from fastapi.responses import Response, FileResponse, JSONResponse
from modules import script_callbacks, sd_hijack, shared, hashes
from fastapi.responses import FileResponse, JSONResponse, Response
from modules import hashes, script_callbacks, sd_hijack, sd_models, shared
from pydantic import BaseModel
from scripts.model_keyword_support import (get_lora_simple_hash,
@@ -25,7 +26,7 @@ try:
try:
from scripts import tag_frequency_db as tdb
except ModuleNotFoundError:
from inspect import getframeinfo, currentframe
from inspect import currentframe, getframeinfo
filename = getframeinfo(currentframe()).filename
parent = Path(filename).resolve().parent
sys.path.append(str(parent))
@@ -41,9 +42,32 @@ except (ImportError, ValueError, sqlite3.Error) as e:
print(f"Tag Autocomplete: Tag frequency database error - \"{e}\"")
db = None
def get_embed_db(sd_model=None):
"""Returns the embedding database, if available."""
try:
return sd_hijack.model_hijack.embedding_db
except Exception:
try: # sd next with diffusers backend
sdnext_model = sd_model if sd_model is not None else shared.sd_model
return sdnext_model.embedding_db
except Exception:
try: # forge webui
forge_model = sd_model if sd_model is not None else sd_models.model_data.get_sd_model()
if type(forge_model).__name__ == "FakeInitialModel":
return None
else:
processer = getattr(forge_model, "text_processing_engine", getattr(forge_model, "text_processing_engine_l"))
return processer.embeddings
except Exception:
return None
# Attempt to get embedding load function, using the same call as api.
try:
load_textual_inversion_embeddings = sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings
embed_db = get_embed_db()
if embed_db is not None:
load_textual_inversion_embeddings = embed_db.load_textual_inversion_embeddings
else:
load_textual_inversion_embeddings = lambda *args, **kwargs: None
except Exception as e: # Not supported.
load_textual_inversion_embeddings = lambda *args, **kwargs: None
print("Tag Autocomplete: Cannot reload embeddings instantly:", e)
@@ -51,8 +75,8 @@ except Exception as e: # Not supported.
# Sorting functions for extra networks / embeddings stuff
sort_criteria = {
"Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(),
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime,
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime if path.exists() else name.lower(),
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime if path.exists() else name.lower()
}
def sort_models(model_list, sort_method = None, name_has_subpath = False):
@@ -110,7 +134,11 @@ def is_umi_format(data):
"""Returns True if the YAML file is in UMI format."""
issue_found = False
for item in data:
if not (data[item] and 'Tags' in data[item] and isinstance(data[item]['Tags'], list)):
try:
if not (data[item] and 'Tags' in data[item] and isinstance(data[item]['Tags'], list)):
issue_found = True
break
except:
issue_found = True
break
return not issue_found
@@ -132,9 +160,12 @@ def parse_dynamic_prompt_format(yaml_wildcards, data, path):
elif not (isinstance(value, list) and all(isinstance(v, str) for v in value)):
del d[key]
recurse_dict(data)
# Add to yaml_wildcards
yaml_wildcards[path.name] = data
try:
recurse_dict(data)
# Add to yaml_wildcards
yaml_wildcards[path.name] = data
except:
return
def get_yaml_wildcards():
@@ -159,9 +190,13 @@ def get_yaml_wildcards():
parse_dynamic_prompt_format(yaml_wildcards, data, path)
else:
print('No data found in ' + path.name)
except (yaml.YAMLError, UnicodeDecodeError) as e:
except (yaml.YAMLError, UnicodeDecodeError, AttributeError, TypeError) as e:
# YAML file not in wildcard format or couldn't be read
print(f'Issue in parsing YAML file {path.name}: {e}')
continue
except Exception as e:
# Something else went wrong, just skip
continue
# Sort by count
umi_sorted = sorted(umi_tags.items(), key=lambda item: item[1], reverse=True)
@@ -190,35 +225,45 @@ def get_embeddings(sd_model):
results = []
try:
# The sd_model embedding_db reference only exists in sd.next with diffusers backend
try:
loaded_sdnext = sd_model.embedding_db.word_embeddings
skipped_sdnext = sd_model.embedding_db.skipped_embeddings
except (NameError, AttributeError):
loaded_sdnext = {}
skipped_sdnext = {}
embed_db = get_embed_db(sd_model)
# Re-register callback if needed
global load_textual_inversion_embeddings
if embed_db is not None and load_textual_inversion_embeddings != embed_db.load_textual_inversion_embeddings:
load_textual_inversion_embeddings = embed_db.load_textual_inversion_embeddings
# Get embedding dict from sd_hijack to separate v1/v2 embeddings
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
loaded = loaded | loaded_sdnext
skipped = skipped | skipped_sdnext
loaded = embed_db.word_embeddings
skipped = embed_db.skipped_embeddings
# Add embeddings to the correct list
for key, emb in (skipped | loaded).items():
if emb.filename is None:
continue
if emb.shape is None:
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
elif emb.shape == V1_SHAPE:
emb_v1.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v1"))
elif emb.shape == V2_SHAPE:
emb_v2.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v2"))
elif emb.shape == VXL_SHAPE:
emb_vXL.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "vXL"))
filename = getattr(emb, "filename", None)
if filename is None:
if emb.shape is None:
emb_unknown.append((Path(key), key, ""))
elif emb.shape == V1_SHAPE:
emb_v1.append((Path(key), key, "v1"))
elif emb.shape == V2_SHAPE:
emb_v2.append((Path(key), key, "v2"))
elif emb.shape == VXL_SHAPE:
emb_vXL.append((Path(key), key, "vXL"))
else:
emb_unknown.append((Path(key), key, ""))
else:
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
if emb.filename is None:
continue
if emb.shape is None:
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
elif emb.shape == V1_SHAPE:
emb_v1.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v1"))
elif emb.shape == V2_SHAPE:
emb_v2.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v2"))
elif emb.shape == VXL_SHAPE:
emb_vXL.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "vXL"))
else:
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
results = sort_models(emb_v1) + sort_models(emb_v2) + sort_models(emb_vXL) + sort_models(emb_unknown)
except AttributeError:
@@ -289,7 +334,7 @@ try:
import sys
from modules import extensions
sys.path.append(Path(extensions.extensions_builtin_dir).joinpath("Lora").as_posix())
import lora # pyright: ignore [reportMissingImports]
import lora # pyright: ignore [reportMissingImports]
def _get_lora():
return [
@@ -430,8 +475,11 @@ def refresh_embeddings(force: bool, *args, **kwargs):
# Fix for SD.Next infinite refresh loop due to gradio not updating after model load on demand.
# This will just skip embedding loading if no model is loaded yet (or there really are no embeddings).
# Try catch is just for safety incase sd_hijack access fails for some reason.
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
embed_db = get_embed_db()
if embed_db is None:
return
loaded = embed_db.word_embeddings
skipped = embed_db.skipped_embeddings
if len((loaded | skipped)) > 0:
load_textual_inversion_embeddings(force_reload=force)
get_embeddings(None)
@@ -444,7 +492,8 @@ def refresh_temp_files(*args, **kwargs):
if skip_wildcard_refresh:
WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
write_temp_files(skip_wildcard_refresh)
refresh_embeddings(force=True)
force_embed_refresh = getattr(shared.opts, "tac_forceRefreshEmbeddings", False)
refresh_embeddings(force=force_embed_refresh)
def write_style_names(*args, **kwargs):
styles = get_style_names()
@@ -454,7 +503,14 @@ def write_style_names(*args, **kwargs):
def write_temp_files(skip_wildcard_refresh = False):
# Write wildcards to wc.txt if found
if WILDCARD_PATH.exists() and not skip_wildcard_refresh:
wildcards = [WILDCARD_PATH.relative_to(FILE_DIR).as_posix()] + get_wildcards()
try:
# Attempt to create a relative path, but fall back to an absolute path if not possible
relative_wildcard_path = WILDCARD_PATH.relative_to(FILE_DIR).as_posix()
except ValueError:
# If the paths are not relative, use the absolute path
relative_wildcard_path = WILDCARD_PATH.as_posix()
wildcards = [relative_wildcard_path] + get_wildcards()
if wildcards:
write_to_temp_file('wc.txt', wildcards)
@@ -466,7 +522,7 @@ def write_temp_files(skip_wildcard_refresh = False):
# Write yaml extension wildcards to umi_tags.txt and wc_yaml.json if found
get_yaml_wildcards()
if HYP_PATH.exists():
if HYP_PATH is not None and HYP_PATH.exists():
hypernets = get_hypernetworks()
if hypernets:
write_to_temp_file('hyp.txt', hypernets)
@@ -541,6 +597,7 @@ def on_ui_settings():
"tac_wildcardExclusionList": shared.OptionInfo("", "Wildcard folder exclusion list").info("Add folder names that shouldn't be searched for wildcards, separated by comma.").needs_restart(),
"tac_skipWildcardRefresh": shared.OptionInfo(False, "Don't re-scan for wildcard files when pressing the extra networks refresh button").info("Useful to prevent hanging if you use a very large wildcard collection."),
"tac_useEmbeddings": shared.OptionInfo(True, "Search for embeddings"),
"tac_forceRefreshEmbeddings": shared.OptionInfo(False, "Force refresh embeddings when pressing the extra networks refresh button").info("Turn this on if you have issues with new embeddings not registering correctly in TAC. Warning: Seems to cause reloading issues in gradio for some users."),
"tac_includeEmbeddingsInNormalResults": shared.OptionInfo(False, "Include embeddings in normal tag results").info("The 'JumpTo...' keybinds (End & Home key by default) will select the first non-embedding result of their direction on the first press for quick navigation in longer lists."),
"tac_useHypernetworks": shared.OptionInfo(True, "Search for hypernetworks"),
"tac_useLoras": shared.OptionInfo(True, "Search for Loras"),
@@ -559,6 +616,7 @@ def on_ui_settings():
"tac_frequencyIncludeAlias": shared.OptionInfo(False, "Frequency sorting matches aliases for frequent tags").info("Tag frequency will be increased for the main tag even if an alias is used for completion. This option can be used to override the default behavior of alias results being ignored for frequency sorting."),
# Insertion related settings
"tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"),
"tac_undersocreReplacementExclusionList": shared.OptionInfo("0_0,(o)_(o),+_+,+_-,._.,<o>_<o>,<|>_<|>,=_=,>_<,3_3,6_9,>_o,@_@,^_^,o_o,u_u,x_x,|_|,||_||", "Underscore replacement exclusion list").info("Add tags that shouldn't have underscores replaced with spaces, separated by comma."),
"tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"),
"tac_appendComma": shared.OptionInfo(True, "Append comma on tag autocompletion"),
"tac_appendSpace": shared.OptionInfo(True, "Append space on tag autocompletion").info("will append after comma if the above is enabled"),
@@ -635,6 +693,23 @@ def on_ui_settings():
"9": ["#df3647", "#8e1c2b"],
"10": ["#c98f2b", "#7b470e"],
"11": ["#e87ebe", "#a83583"]
},
"danbooru_e621_merged": {
"-1": ["red", "maroon"],
"0": ["lightblue", "dodgerblue"],
"1": ["indianred", "firebrick"],
"3": ["violet", "darkorchid"],
"4": ["lightgreen", "darkgreen"],
"5": ["orange", "darkorange"],
"6": ["red", "maroon"],
"7": ["lightblue", "dodgerblue"],
"8": ["gold", "goldenrod"],
"9": ["gold", "goldenrod"],
"10": ["violet", "darkorchid"],
"11": ["lightgreen", "darkgreen"],
"12": ["tomato", "darksalmon"],
"14": ["whitesmoke", "black"],
"15": ["seagreen", "darkseagreen"]
}
}\
"""
@@ -696,6 +771,7 @@ def api_tac(_: gr.Blocks, app: FastAPI):
@app.post("/tacapi/v1/refresh-temp-files")
async def api_refresh_temp_files():
await sleep(0) # might help with refresh blocking gradio
refresh_temp_files()
@app.post("/tacapi/v1/refresh-embeddings")
@@ -831,5 +907,5 @@ def api_tac(_: gr.Blocks, app: FastAPI):
@app.get("/tacapi/v1/get-all-use-counts")
async def get_all_tag_counts():
return db_request(lambda: db.get_all_tags(), get=True)
script_callbacks.on_app_started(api_tac)

File diff suppressed because it is too large Load Diff

221787
tags/danbooru_e621_merged.csv Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

200358
tags/e621.csv

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff