mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-27 03:29:55 +00:00
Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb1e1820f9 | ||
|
|
bec567fe26 | ||
|
|
d4041096c9 | ||
|
|
0903259ddf | ||
|
|
f3e64b1fa5 | ||
|
|
312cec5d71 | ||
|
|
b71e6339bd | ||
|
|
7ddbc3c0b2 | ||
|
|
97c5e4f53c | ||
|
|
1d8d9f64b5 | ||
|
|
829a4a7b89 | ||
|
|
22472ac8ad | ||
|
|
5f77fa26d3 |
@@ -20,6 +20,10 @@ Booru style tag autocompletion for the AUTOMATIC1111 Stable Diffusion WebUI
|
||||
</div>
|
||||
<br/>
|
||||
|
||||
#### ⚠️ Notice:
|
||||
I am currently looking for feedback on a new feature I'm working on and want to release soon.<br/>
|
||||
Please check [the announcement post](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/discussions/270) for more info if you are interested to help.
|
||||
|
||||
# 📄 Description
|
||||
|
||||
Tag Autocomplete is an extension for the popular [AUTOMATIC1111 web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) for Stable Diffusion.
|
||||
|
||||
@@ -189,7 +189,11 @@ function toNgrams(inputArray, size) {
|
||||
);
|
||||
}
|
||||
|
||||
function escapeRegExp(string) {
|
||||
function escapeRegExp(string, wildcardMatching = false) {
|
||||
if (wildcardMatching) {
|
||||
// Escape all characters except asterisks and ?, which should be treated separately as placeholders.
|
||||
return string.replace(/[-[\]{}()+.,\\^$|#\s]/g, '\\$&').replace(/\*/g, '.*').replace(/\?/g, '.');
|
||||
}
|
||||
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
|
||||
}
|
||||
function escapeHTML(unsafeText) {
|
||||
|
||||
@@ -7,7 +7,10 @@ class ChantParser extends BaseTagParser {
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<c:") {
|
||||
let searchTerm = tagword.replace("<chant:", "").replace("<c:", "").replace("<", "");
|
||||
let filterCondition = x => x.terms.toLowerCase().includes(searchTerm) || x.name.toLowerCase().includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.terms.toLowerCase()) || regex.test(x.name.toLowerCase());
|
||||
};
|
||||
tempResults = chants.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = chants;
|
||||
@@ -51,4 +54,4 @@ PARSERS.push(new ChantParser(CHANT_TRIGGER));
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_AFTER_CONFIG_CHANGE.push(load);
|
||||
QUEUE_AFTER_CONFIG_CHANGE.push(load);
|
||||
|
||||
@@ -16,7 +16,10 @@ class EmbeddingParser extends BaseTagParser {
|
||||
searchTerm = searchTerm.slice(3);
|
||||
}
|
||||
|
||||
let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x[0].toLowerCase()) || regex.test(x[0].toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
|
||||
if (versionString)
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[2] && x[2].toLowerCase() === versionString.toLowerCase()); // Filter by tagword
|
||||
@@ -29,7 +32,11 @@ class EmbeddingParser extends BaseTagParser {
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.embedding)
|
||||
let lastDot = t[0].lastIndexOf(".") > -1 ? t[0].lastIndexOf(".") : t[0].length;
|
||||
let lastSlash = t[0].lastIndexOf("/") > -1 ? t[0].lastIndexOf("/") : -1;
|
||||
let name = t[0].trim().substring(lastSlash + 1, lastDot);
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.embedding)
|
||||
result.sortKey = t[1];
|
||||
result.meta = t[2] + " Embedding";
|
||||
finalResults.push(result);
|
||||
@@ -62,4 +69,4 @@ PARSERS.push(new EmbeddingParser(EMB_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -7,7 +7,10 @@ class HypernetParser extends BaseTagParser {
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<h:" && tagword !== "<hypernet:") {
|
||||
let searchTerm = tagword.replace("<hypernet:", "").replace("<h:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = hypernetworks;
|
||||
@@ -49,4 +52,4 @@ PARSERS.push(new HypernetParser(HYP_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -7,7 +7,10 @@ class LoraParser extends BaseTagParser {
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lora:") {
|
||||
let searchTerm = tagword.replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = loras.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = loras;
|
||||
@@ -61,4 +64,4 @@ PARSERS.push(new LoraParser(LORA_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -7,7 +7,10 @@ class LycoParser extends BaseTagParser {
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lyco:" && tagword !== "<lora:") {
|
||||
let searchTerm = tagword.replace("<lyco:", "").replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => x.toLowerCase().includes(searchTerm) || x.toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = lycos.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = lycos;
|
||||
@@ -62,4 +65,4 @@ PARSERS.push(new LycoParser(LYCO_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -18,7 +18,10 @@ class StyleParser extends BaseTagParser {
|
||||
if (tagword !== matchGroups[1]) {
|
||||
let searchTerm = tagword.replace(matchGroups[1], "");
|
||||
|
||||
let filterCondition = x => x[0].toLowerCase().includes(searchTerm) || x[0].toLowerCase().replaceAll(" ", "_").includes(searchTerm);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x[0].toLowerCase()) || regex.test(x[0].toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = styleNames.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = styleNames;
|
||||
@@ -64,4 +67,4 @@ PARSERS.push(new StyleParser(STYLE_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -216,7 +216,7 @@ async function syncOptions() {
|
||||
includeEmbeddingsInNormalResults: opts["tac_includeEmbeddingsInNormalResults"],
|
||||
useHypernetworks: opts["tac_useHypernetworks"],
|
||||
useLoras: opts["tac_useLoras"],
|
||||
useLycos: opts["tac_useLycos"],
|
||||
useLycos: opts["tac_useLycos"],
|
||||
useLoraPrefixForLycos: opts["tac_useLoraPrefixForLycos"],
|
||||
showWikiLinks: opts["tac_showWikiLinks"],
|
||||
showExtraNetworkPreviews: opts["tac_showExtraNetworkPreviews"],
|
||||
@@ -1159,12 +1159,17 @@ function navigateInList(textArea, event) {
|
||||
|
||||
if (!validKeys.includes(event.key)) return;
|
||||
if (!isVisible(textArea)) return
|
||||
// Return if ctrl key is pressed to not interfere with weight editing shortcut
|
||||
if (event.ctrlKey || event.altKey || event.shiftKey || event.metaKey) return;
|
||||
// Add modifier keys to base as text+.
|
||||
let modKey = "";
|
||||
if (event.ctrlKey) modKey += "Ctrl+";
|
||||
if (event.altKey) modKey += "Alt+";
|
||||
if (event.shiftKey) modKey += "Shift+";
|
||||
if (event.metaKey) modKey += "Meta+";
|
||||
modKey += event.key;
|
||||
|
||||
oldSelectedTag = selectedTag;
|
||||
|
||||
switch (event.key) {
|
||||
switch (modKey) {
|
||||
case keys["MoveUp"]:
|
||||
if (selectedTag === null) {
|
||||
selectedTag = resultCount - 1;
|
||||
@@ -1235,6 +1240,8 @@ function navigateInList(textArea, event) {
|
||||
case keys["Close"]:
|
||||
hideResults(textArea);
|
||||
break;
|
||||
default:
|
||||
if (event.ctrlKey || event.altKey || event.shiftKey || event.metaKey) return;
|
||||
}
|
||||
let moveKeys = [keys["MoveUp"], keys["MoveDown"], keys["JumpUp"], keys["JumpDown"], keys["JumpToStart"], keys["JumpToEnd"]];
|
||||
if (selectedTag === resultCount - 1 && moveKeys.includes(event.key)) {
|
||||
|
||||
@@ -162,26 +162,41 @@ def get_embeddings(sd_model):
|
||||
emb_v1 = []
|
||||
emb_v2 = []
|
||||
emb_vXL = []
|
||||
emb_unknown = []
|
||||
results = []
|
||||
|
||||
try:
|
||||
# The sd_model embedding_db reference only exists in sd.next with diffusers backend
|
||||
try:
|
||||
loaded_sdnext = sd_model.embedding_db.word_embeddings
|
||||
skipped_sdnext = sd_model.embedding_db.skipped_embeddings
|
||||
except (NameError, AttributeError):
|
||||
loaded_sdnext = {}
|
||||
skipped_sdnext = {}
|
||||
|
||||
# Get embedding dict from sd_hijack to separate v1/v2 embeddings
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
loaded = loaded | loaded_sdnext
|
||||
skipped = skipped | skipped_sdnext
|
||||
|
||||
# Add embeddings to the correct list
|
||||
for key, emb in (loaded | skipped).items():
|
||||
if emb.filename is None or emb.shape is None:
|
||||
if emb.filename is None:
|
||||
continue
|
||||
|
||||
if emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(emb.filename), key, "v1"))
|
||||
if emb.shape is None:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
elif emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(emb.filename), key, "v2"))
|
||||
emb_v2.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(emb.filename), key, "vXL"))
|
||||
emb_vXL.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "vXL"))
|
||||
else:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
|
||||
results = sort_models(emb_v1) + sort_models(emb_v2) + sort_models(emb_vXL)
|
||||
results = sort_models(emb_v1) + sort_models(emb_v2) + sort_models(emb_vXL) + sort_models(emb_unknown)
|
||||
except AttributeError:
|
||||
print("tag_autocomplete_helper: Old webui version or unrecognized model shape, using fallback for embedding completion.")
|
||||
# Get a list of all embeddings in the folder
|
||||
@@ -272,13 +287,21 @@ except Exception as e:
|
||||
# print(f'Exception setting-up performant fetchers: {e}')
|
||||
|
||||
|
||||
def is_visible(p: Path) -> bool:
|
||||
if getattr(shared.opts, "extra_networks_hidden_models", "When searched") != "Never":
|
||||
return True
|
||||
for part in p.parts:
|
||||
if part.startswith('.'):
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_lora():
|
||||
"""Write a list of all lora"""
|
||||
# Get hashes
|
||||
valid_loras = _get_lora()
|
||||
loras_with_hash = []
|
||||
for l in valid_loras:
|
||||
if not l.exists() or not l.is_file():
|
||||
if not l.exists() or not l.is_file() or not is_visible(l):
|
||||
continue
|
||||
name = l.relative_to(LORA_PATH).as_posix()
|
||||
if model_keyword_installed:
|
||||
@@ -296,7 +319,7 @@ def get_lyco():
|
||||
valid_lycos = _get_lyco()
|
||||
lycos_with_hash = []
|
||||
for ly in valid_lycos:
|
||||
if not ly.exists() or not ly.is_file():
|
||||
if not ly.exists() or not ly.is_file() or not is_visible(ly):
|
||||
continue
|
||||
name = ly.relative_to(LYCO_PATH).as_posix()
|
||||
if model_keyword_installed:
|
||||
@@ -560,6 +583,20 @@ def on_ui_settings():
|
||||
"6": ["red", "maroon"],
|
||||
"7": ["whitesmoke", "black"],
|
||||
"8": ["seagreen", "darkseagreen"]
|
||||
},
|
||||
"derpibooru": {
|
||||
"-1": ["red", "maroon"],
|
||||
"0": ["#60d160", "#3d9d3d"],
|
||||
"1": ["#fff956", "#918e2e"],
|
||||
"3": ["#fd9961", "#a14c2e"],
|
||||
"4": ["#cf5bbe", "#6c1e6c"],
|
||||
"5": ["#3c8ad9", "#1e5e93"],
|
||||
"6": ["#a6a6a6", "#555555"],
|
||||
"7": ["#47abc1", "#1f6c7c"],
|
||||
"8": ["#7871d0", "#392f7d"],
|
||||
"9": ["#df3647", "#8e1c2b"],
|
||||
"10": ["#c98f2b", "#7b470e"],
|
||||
"11": ["#e87ebe", "#a83583"]
|
||||
}
|
||||
}\
|
||||
"""
|
||||
|
||||
113301
tags/EnglishDictionary.csv
Normal file
113301
tags/EnglishDictionary.csv
Normal file
File diff suppressed because it is too large
Load Diff
168662
tags/danbooru.csv
168662
tags/danbooru.csv
File diff suppressed because it is too large
Load Diff
95091
tags/derpibooru.csv
Normal file
95091
tags/derpibooru.csv
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user