diff --git a/javascript/ext_loras.js b/javascript/ext_loras.js index eb72644..b035630 100644 --- a/javascript/ext_loras.js +++ b/javascript/ext_loras.js @@ -29,9 +29,9 @@ class LoraParser extends BaseTagParser { async function load() { if (loras.length === 0) { try { - loras = (await readFile(`${tagBasePath}/temp/lora.txt`)).split("\n") - .filter(x => x.trim().length > 0) // Remove empty lines - .map(x => x.trim().split(",")); // Remove carriage returns and padding if it exists, split into name, hash pairs + loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`)) + .filter(x => x[0].trim().length > 0) // Remove empty lines + .map(x => [x[0].trim(), x[1]]); // Remove carriage returns and padding if it exists, split into name, hash pairs } catch (e) { console.error("Error loading lora.txt: " + e); } diff --git a/javascript/ext_lycos.js b/javascript/ext_lycos.js index 5effb9d..b2da91c 100644 --- a/javascript/ext_lycos.js +++ b/javascript/ext_lycos.js @@ -29,9 +29,9 @@ class LycoParser extends BaseTagParser { async function load() { if (lycos.length === 0) { try { - lycos = (await readFile(`${tagBasePath}/temp/lyco.txt`)).split("\n") - .filter(x => x.trim().length > 0) // Remove empty lines - .map(x => x.trim().split(",")); // Remove carriage returns and padding if it exists, split into name, hash pairs + lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`)) + .filter(x => x[0].trim().length > 0) // Remove empty lines + .map(x => [x[0].trim(), x[1]]); // Remove carriage returns and padding if it exists, split into name, hash pairs } catch (e) { console.error("Error loading lyco.txt: " + e); } diff --git a/javascript/ext_modelKeyword.js b/javascript/ext_modelKeyword.js index 698466a..d21576e 100644 --- a/javascript/ext_modelKeyword.js +++ b/javascript/ext_modelKeyword.js @@ -5,21 +5,21 @@ async function load() { if (modelKeywordPath.length > 0 && modelKeywordDict.size === 0) { try { - let lines = []; + let csv_lines = []; // Only add default keywords if wanted by the user if (TAC_CFG.modelKeywordCompletion !== "Only user list") - lines = (await readFile(`${modelKeywordPath}/lora-keyword.txt`)).split("\n"); + csv_lines = (await loadCSV(`${modelKeywordPath}/lora-keyword.txt`)); // Add custom user keywords if the file exists if (customFileExists) - lines = lines.concat((await readFile(`${modelKeywordPath}/lora-keyword-user.txt`)).split("\n")); + csv_lines = csv_lines.concat((await loadCSV(`${modelKeywordPath}/lora-keyword-user.txt`))); - if (lines.length === 0) return; + if (csv_lines.length === 0) return; + + csv_lines = csv_lines.filter(x => x[0].trim().length > 0 && x[0].trim()[0] !== "#") // Remove empty lines and comments + console.log(csv_lines) - lines = lines.filter(x => x.trim().length > 0 && x.trim()[0] !== "#") // Remove empty lines and comments - // Add to the dict - lines.forEach(line => { - const parts = line.split(","); + csv_lines.forEach(parts => { const hash = parts[0]; const keywords = parts[1].replaceAll("| ", ", ").replaceAll("|", ", ").trim(); const lastSepIndex = parts[2]?.lastIndexOf("/") + 1 || parts[2]?.lastIndexOf("\\") + 1 || 0; diff --git a/scripts/model_keyword_support.py b/scripts/model_keyword_support.py index c3e197e..f51104b 100644 --- a/scripts/model_keyword_support.py +++ b/scripts/model_keyword_support.py @@ -1,5 +1,6 @@ # This file provides support for the model-keyword extension to add known lora keywords on completion +import csv import hashlib from pathlib import Path @@ -16,8 +17,11 @@ hash_dict = {} def load_hash_cache(): with open(known_hashes_file, "r", encoding="utf-8") as file: - for line in file: - name, hash, mtime = line.replace("\n", "").split(",") + reader = csv.reader( + file.readlines(), delimiter=",", quotechar='"', skipinitialspace=True + ) + for line in reader: + name, hash, mtime = line hash_dict[name] = (hash, mtime) @@ -26,7 +30,7 @@ def update_hash_cache(): if file_needs_update: with open(known_hashes_file, "w", encoding="utf-8") as file: for name, (hash, mtime) in hash_dict.items(): - file.write(f"{name},{hash},{mtime}\n") + file.write(f'"{name}",{hash},{mtime}\n') # Copy of the fast inaccurate hash function from the extension diff --git a/scripts/shared_paths.py b/scripts/shared_paths.py index 1ef7a53..a3d51a5 100644 --- a/scripts/shared_paths.py +++ b/scripts/shared_paths.py @@ -13,13 +13,13 @@ except ImportError: # Webui root path FILE_DIR = Path().absolute() # The extension base path - EXT_PATH = FILE_DIR.joinpath('extensions') + EXT_PATH = FILE_DIR.joinpath("extensions") # Tags base path -TAGS_PATH = Path(scripts.basedir()).joinpath('tags') +TAGS_PATH = Path(scripts.basedir()).joinpath("tags") # The path to the folder containing the wildcards and embeddings -WILDCARD_PATH = FILE_DIR.joinpath('scripts/wildcards') +WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards") EMB_PATH = Path(shared.cmd_opts.embeddings_dir) HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir) @@ -27,15 +27,16 @@ try: LORA_PATH = Path(shared.cmd_opts.lora_dir) except AttributeError: LORA_PATH = None - + try: LYCO_PATH = Path(shared.cmd_opts.lyco_dir) except AttributeError: LYCO_PATH = None + def find_ext_wildcard_paths(): """Returns the path to the extension wildcards folder""" - found = list(EXT_PATH.glob('*/wildcards/')) + found = list(EXT_PATH.glob("*/wildcards/")) return found @@ -43,11 +44,12 @@ def find_ext_wildcard_paths(): WILDCARD_EXT_PATHS = find_ext_wildcard_paths() # The path to the temporary files -STATIC_TEMP_PATH = FILE_DIR.joinpath('tmp') # In the webui root, on windows it exists by default, on linux it doesn't -TEMP_PATH = TAGS_PATH.joinpath('temp') # Extension specific temp files +# In the webui root, on windows it exists by default, on linux it doesn't +STATIC_TEMP_PATH = FILE_DIR.joinpath("tmp") +TEMP_PATH = TAGS_PATH.joinpath("temp") # Extension specific temp files # Make sure these folders exist if not TEMP_PATH.exists(): TEMP_PATH.mkdir() if not STATIC_TEMP_PATH.exists(): - STATIC_TEMP_PATH.mkdir() \ No newline at end of file + STATIC_TEMP_PATH.mkdir() diff --git a/scripts/tag_autocomplete_helper.py b/scripts/tag_autocomplete_helper.py index 628c8a4..5635aad 100644 --- a/scripts/tag_autocomplete_helper.py +++ b/scripts/tag_autocomplete_helper.py @@ -151,7 +151,7 @@ def get_lora(): # Sort sorted_loras = dict(sorted(hashes.items())) # Add hashes and return - return [f"{name},{hash}" for name, hash in sorted_loras.items()] + return [f"\"{name}\",{hash}" for name, hash in sorted_loras.items()] def get_lyco(): @@ -170,7 +170,7 @@ def get_lyco(): # Sort sorted_lycos = dict(sorted(hashes.items())) # Add hashes and return - return [f"{name},{hash}" for name, hash in sorted_lycos.items()] + return [f"\"{name}\",{hash}" for name, hash in sorted_lycos.items()] def write_tag_base_path():