mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-26 19:19:57 +00:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b28497764f | ||
|
|
0d9d5f1e44 | ||
|
|
de3380818e | ||
|
|
acb85d7bb1 |
@@ -29,9 +29,9 @@ class LoraParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (loras.length === 0) {
|
||||
try {
|
||||
loras = (await readFile(`${tagBasePath}/temp/lora.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().split(",")); // Remove carriage returns and padding if it exists, split into name, hash pairs
|
||||
loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lora.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -29,9 +29,9 @@ class LycoParser extends BaseTagParser {
|
||||
async function load() {
|
||||
if (lycos.length === 0) {
|
||||
try {
|
||||
lycos = (await readFile(`${tagBasePath}/temp/lyco.txt`)).split("\n")
|
||||
.filter(x => x.trim().length > 0) // Remove empty lines
|
||||
.map(x => x.trim().split(",")); // Remove carriage returns and padding if it exists, split into name, hash pairs
|
||||
lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Trim filenames and return the name, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lyco.txt: " + e);
|
||||
}
|
||||
|
||||
@@ -5,21 +5,20 @@ async function load() {
|
||||
|
||||
if (modelKeywordPath.length > 0 && modelKeywordDict.size === 0) {
|
||||
try {
|
||||
let lines = [];
|
||||
let csv_lines = [];
|
||||
// Only add default keywords if wanted by the user
|
||||
if (TAC_CFG.modelKeywordCompletion !== "Only user list")
|
||||
lines = (await readFile(`${modelKeywordPath}/lora-keyword.txt`)).split("\n");
|
||||
csv_lines = (await loadCSV(`${modelKeywordPath}/lora-keyword.txt`));
|
||||
// Add custom user keywords if the file exists
|
||||
if (customFileExists)
|
||||
lines = lines.concat((await readFile(`${modelKeywordPath}/lora-keyword-user.txt`)).split("\n"));
|
||||
csv_lines = csv_lines.concat((await loadCSV(`${modelKeywordPath}/lora-keyword-user.txt`)));
|
||||
|
||||
if (lines.length === 0) return;
|
||||
if (csv_lines.length === 0) return;
|
||||
|
||||
csv_lines = csv_lines.filter(x => x[0].trim().length > 0 && x[0].trim()[0] !== "#") // Remove empty lines and comments
|
||||
|
||||
lines = lines.filter(x => x.trim().length > 0 && x.trim()[0] !== "#") // Remove empty lines and comments
|
||||
|
||||
// Add to the dict
|
||||
lines.forEach(line => {
|
||||
const parts = line.split(",");
|
||||
csv_lines.forEach(parts => {
|
||||
const hash = parts[0];
|
||||
const keywords = parts[1].replaceAll("| ", ", ").replaceAll("|", ", ").trim();
|
||||
const lastSepIndex = parts[2]?.lastIndexOf("/") + 1 || parts[2]?.lastIndexOf("\\") + 1 || 0;
|
||||
|
||||
@@ -449,12 +449,18 @@ async function insertTextAtCursor(textArea, result, tagword, tabCompletedWithout
|
||||
if (result.hash && result.hash !== "NOFILE" && result.hash.length > 0) {
|
||||
let keywords = null;
|
||||
let nameDict = modelKeywordDict.get(result.hash);
|
||||
let name = result.text + ".safetensors";
|
||||
let names = [result.text + ".safetensors", result.text + ".pt", result.text + ".ckpt"];
|
||||
|
||||
if (nameDict) {
|
||||
if (nameDict.has(name))
|
||||
keywords = nameDict.get(name);
|
||||
else
|
||||
let found = false;
|
||||
names.forEach(name => {
|
||||
if (!found && nameDict.has(name)) {
|
||||
found = true;
|
||||
keywords = nameDict.get(name);
|
||||
}
|
||||
});
|
||||
|
||||
if (!found)
|
||||
keywords = nameDict.get("none");
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# This file provides support for the model-keyword extension to add known lora keywords on completion
|
||||
|
||||
import csv
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
|
||||
@@ -7,8 +8,6 @@ from scripts.shared_paths import EXT_PATH, STATIC_TEMP_PATH, TEMP_PATH
|
||||
|
||||
# Set up our hash cache
|
||||
known_hashes_file = TEMP_PATH.joinpath("known_lora_hashes.txt")
|
||||
if not TEMP_PATH.exists():
|
||||
TEMP_PATH.mkdir()
|
||||
known_hashes_file.touch()
|
||||
file_needs_update = False
|
||||
|
||||
@@ -18,8 +17,11 @@ hash_dict = {}
|
||||
|
||||
def load_hash_cache():
|
||||
with open(known_hashes_file, "r", encoding="utf-8") as file:
|
||||
for line in file:
|
||||
name, hash, mtime = line.replace("\n", "").split(",")
|
||||
reader = csv.reader(
|
||||
file.readlines(), delimiter=",", quotechar='"', skipinitialspace=True
|
||||
)
|
||||
for line in reader:
|
||||
name, hash, mtime = line
|
||||
hash_dict[name] = (hash, mtime)
|
||||
|
||||
|
||||
@@ -28,7 +30,7 @@ def update_hash_cache():
|
||||
if file_needs_update:
|
||||
with open(known_hashes_file, "w", encoding="utf-8") as file:
|
||||
for name, (hash, mtime) in hash_dict.items():
|
||||
file.write(f"{name},{hash},{mtime}\n")
|
||||
file.write(f'"{name}",{hash},{mtime}\n')
|
||||
|
||||
|
||||
# Copy of the fast inaccurate hash function from the extension
|
||||
|
||||
@@ -13,13 +13,13 @@ except ImportError:
|
||||
# Webui root path
|
||||
FILE_DIR = Path().absolute()
|
||||
# The extension base path
|
||||
EXT_PATH = FILE_DIR.joinpath('extensions')
|
||||
EXT_PATH = FILE_DIR.joinpath("extensions")
|
||||
|
||||
# Tags base path
|
||||
TAGS_PATH = Path(scripts.basedir()).joinpath('tags')
|
||||
TAGS_PATH = Path(scripts.basedir()).joinpath("tags")
|
||||
|
||||
# The path to the folder containing the wildcards and embeddings
|
||||
WILDCARD_PATH = FILE_DIR.joinpath('scripts/wildcards')
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards")
|
||||
EMB_PATH = Path(shared.cmd_opts.embeddings_dir)
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir)
|
||||
|
||||
@@ -27,15 +27,16 @@ try:
|
||||
LORA_PATH = Path(shared.cmd_opts.lora_dir)
|
||||
except AttributeError:
|
||||
LORA_PATH = None
|
||||
|
||||
|
||||
try:
|
||||
LYCO_PATH = Path(shared.cmd_opts.lyco_dir)
|
||||
except AttributeError:
|
||||
LYCO_PATH = None
|
||||
|
||||
|
||||
def find_ext_wildcard_paths():
|
||||
"""Returns the path to the extension wildcards folder"""
|
||||
found = list(EXT_PATH.glob('*/wildcards/'))
|
||||
found = list(EXT_PATH.glob("*/wildcards/"))
|
||||
return found
|
||||
|
||||
|
||||
@@ -43,5 +44,12 @@ def find_ext_wildcard_paths():
|
||||
WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
|
||||
# The path to the temporary files
|
||||
STATIC_TEMP_PATH = FILE_DIR.joinpath('tmp') # In the webui root, on windows it exists by default, on linux it doesn't
|
||||
TEMP_PATH = TAGS_PATH.joinpath('temp') # Extension specific temp files
|
||||
# In the webui root, on windows it exists by default, on linux it doesn't
|
||||
STATIC_TEMP_PATH = FILE_DIR.joinpath("tmp")
|
||||
TEMP_PATH = TAGS_PATH.joinpath("temp") # Extension specific temp files
|
||||
|
||||
# Make sure these folders exist
|
||||
if not TEMP_PATH.exists():
|
||||
TEMP_PATH.mkdir()
|
||||
if not STATIC_TEMP_PATH.exists():
|
||||
STATIC_TEMP_PATH.mkdir()
|
||||
|
||||
@@ -151,7 +151,7 @@ def get_lora():
|
||||
# Sort
|
||||
sorted_loras = dict(sorted(hashes.items()))
|
||||
# Add hashes and return
|
||||
return [f"{name},{hash}" for name, hash in sorted_loras.items()]
|
||||
return [f"\"{name}\",{hash}" for name, hash in sorted_loras.items()]
|
||||
|
||||
|
||||
def get_lyco():
|
||||
@@ -170,7 +170,7 @@ def get_lyco():
|
||||
# Sort
|
||||
sorted_lycos = dict(sorted(hashes.items()))
|
||||
# Add hashes and return
|
||||
return [f"{name},{hash}" for name, hash in sorted_lycos.items()]
|
||||
return [f"\"{name}\",{hash}" for name, hash in sorted_lycos.items()]
|
||||
|
||||
|
||||
def write_tag_base_path():
|
||||
|
||||
Reference in New Issue
Block a user