mirror of
https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git
synced 2026-01-27 11:39:55 +00:00
Compare commits
10 Commits
refactor-c
...
feature-fu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ec1bc3424 | ||
|
|
cfe026f366 | ||
|
|
c594156d30 | ||
|
|
2126a5d217 | ||
|
|
a4a656ae23 | ||
|
|
c59ffed049 | ||
|
|
625298bed6 | ||
|
|
8c0c308a6f | ||
|
|
fa4a3bc036 | ||
|
|
a8f175925f |
@@ -1,204 +1,74 @@
|
||||
// Create our TAC namespace
|
||||
var TAC = TAC || {};
|
||||
// Core components
|
||||
var TAC_CFG = null;
|
||||
var tagBasePath = "";
|
||||
var modelKeywordPath = "";
|
||||
var tacSelfTrigger = false;
|
||||
|
||||
/**
|
||||
* @typedef {Object} TAC.CFG
|
||||
* @property {string} tagFile - Tag filename
|
||||
* @property {{ global: boolean, txt2img: boolean, img2img: boolean, negativePrompts: boolean, thirdParty: boolean, modelList: string, modelListMode: "Blacklist"|"Whitelist" }} activeIn - Settings for which parts of the UI the tag completion is active in.
|
||||
* @property {boolean} slidingPopup - Move completion popup together with text cursor
|
||||
* @property {number} maxResults - Maximum results
|
||||
* @property {boolean} showAllResults - Show all results
|
||||
* @property {number} resultStepLength - How many results to load at once
|
||||
* @property {number} delayTime - Time in ms to wait before triggering completion again
|
||||
* @property {boolean} useWildcards - Search for wildcards
|
||||
* @property {boolean} sortWildcardResults - Sort wildcard file contents alphabetically
|
||||
* @property {boolean} useEmbeddings - Search for embeddings
|
||||
* @property {boolean} includeEmbeddingsInNormalResults - Include embeddings in normal tag results
|
||||
* @property {boolean} useHypernetworks - Search for hypernetworks
|
||||
* @property {boolean} useLoras - Search for Loras
|
||||
* @property {boolean} useLycos - Search for LyCORIS/LoHa
|
||||
* @property {boolean} useLoraPrefixForLycos - Use the '<lora:' prefix instead of '<lyco:' for models in the LyCORIS folder
|
||||
* @property {boolean} showWikiLinks - Show '?' next to tags, linking to its Danbooru or e621 wiki page
|
||||
* @property {boolean} showExtraNetworkPreviews - Show preview thumbnails for extra networks if available
|
||||
* @property {string} modelSortOrder - Model sort order
|
||||
* @property {boolean} frequencySort - Locally record tag usage and sort frequent tags higher
|
||||
* @property {string} frequencyFunction - Function to use for frequency sorting
|
||||
* @property {number} frequencyMinCount - Minimum number of uses for a tag to be considered frequent
|
||||
* @property {number} frequencyMaxAge - Maximum days since last use for a tag to be considered frequent
|
||||
* @property {number} frequencyRecommendCap - Maximum number of recommended tags
|
||||
* @property {boolean} frequencyIncludeAlias - Frequency sorting matches aliases for frequent tags
|
||||
* @property {boolean} useStyleVars - Search for webui style names
|
||||
* @property {boolean} replaceUnderscores - Replace underscores with spaces on insertion
|
||||
* @property {string} replaceUnderscoresExclusionList - Underscore replacement exclusion list
|
||||
* @property {boolean} escapeParentheses - Escape parentheses on insertion
|
||||
* @property {boolean} appendComma - Append comma on tag autocompletion
|
||||
* @property {boolean} appendSpace - Append space on tag autocompletion
|
||||
* @property {boolean} alwaysSpaceAtEnd - Always append space if inserting at the end of the textbox
|
||||
* @property {string} wildcardCompletionMode - How to complete nested wildcard paths
|
||||
* @property {string} modelKeywordCompletion - Try to add known trigger words for LORA/LyCO models
|
||||
* @property {string} modelKeywordLocation - Where to insert the trigger keyword
|
||||
* @property {string} wcWrap - Wrapper characters for wildcard tags.
|
||||
* @property {{ searchByAlias: boolean, onlyShowAlias: boolean }} alias - Alias-related settings.
|
||||
* @property {{ translationFile: string, oldFormat: boolean, searchByTranslation: boolean, liveTranslation: boolean }} translation - Translation-related settings.
|
||||
* @property {{ extraFile: string, addMode: "Insert before"|"Insert after" }} extra - Extra file-related settings.
|
||||
* @property {string} chantFile - Chant filename
|
||||
* @property {number} extraNetworksDefaultMultiplier - Default multiplier for extra networks.
|
||||
* @property {string} extraNetworksSeparator - Separator used for extra networks.
|
||||
* @property {{ MoveUp: string, MoveDown: string, JumpUp: string, JumpDown: string, JumpToStart: string, JumpToEnd: string, ChooseSelected: string, ChooseFirstOrSelected: string, Close: string }} keymap - Custom key mappings for tag completion.
|
||||
* @property {{ [filename: string]: { [category: string]: string[] } }} colorMap - Color mapping for tag categories.
|
||||
*/
|
||||
/** @type {TAC.CFG} */
|
||||
TAC.CFG = {
|
||||
// Main tag file
|
||||
tagFile: "",
|
||||
// Active in settings
|
||||
activeIn: {
|
||||
global: true,
|
||||
txt2img: true,
|
||||
img2img: true,
|
||||
negativePrompts: true,
|
||||
thirdParty: true,
|
||||
modelList: "",
|
||||
modelListMode: "Blacklist",
|
||||
},
|
||||
// Results related settings
|
||||
slidingPopup: true,
|
||||
maxResults: 8,
|
||||
showAllResults: false,
|
||||
resultStepLength: 500,
|
||||
delayTime: 100,
|
||||
useWildcards: true,
|
||||
sortWildcardResults: true,
|
||||
useEmbeddings: true,
|
||||
includeEmbeddingsInNormalResults: true,
|
||||
useHypernetworks: true,
|
||||
useLoras: true,
|
||||
useLycos: true,
|
||||
useLoraPrefixForLycos: true,
|
||||
showWikiLinks: false,
|
||||
showExtraNetworkPreviews: true,
|
||||
modelSortOrder: "Name",
|
||||
frequencySort: true,
|
||||
frequencyFunction: "Logarithmic (weak)",
|
||||
frequencyMinCount: 3,
|
||||
frequencyMaxAge: 30,
|
||||
frequencyRecommendCap: 10,
|
||||
frequencyIncludeAlias: false,
|
||||
useStyleVars: false,
|
||||
// Insertion related settings
|
||||
replaceUnderscores: true,
|
||||
replaceUnderscoresExclusionList: "0_0,(o)_(o),+_+,+_-,._.,<o>_<o>,<|>_<|>,=_=,>_<,3_3,6_9,>_o,@_@,^_^,o_o,u_u,x_x,|_|,||_||",
|
||||
escapeParentheses: true,
|
||||
appendComma: true,
|
||||
appendSpace: true,
|
||||
alwaysSpaceAtEnd: true,
|
||||
wildcardCompletionMode: "To next folder level",
|
||||
modelKeywordCompletion: "Never",
|
||||
modelKeywordLocation: "Start of prompt",
|
||||
wcWrap: "__", // to support custom wrapper chars set by dp_parser
|
||||
// Alias settings
|
||||
alias: {
|
||||
searchByAlias: true,
|
||||
onlyShowAlias: false,
|
||||
},
|
||||
// Translation settings
|
||||
translation: {
|
||||
translationFile: "None",
|
||||
oldFormat: false,
|
||||
searchByTranslation: true,
|
||||
liveTranslation: false,
|
||||
},
|
||||
// Extra file settings
|
||||
extra: {
|
||||
extraFile: "extra-quality-tags.csv",
|
||||
addMode: "Insert before",
|
||||
},
|
||||
// Chant file settings
|
||||
chantFile: "demo-chants.json",
|
||||
// Settings not from tac but still used by the script
|
||||
extraNetworksDefaultMultiplier: 1.0,
|
||||
extraNetworksSeparator: ", ",
|
||||
// Custom mapping settings
|
||||
keymap: {
|
||||
MoveUp: "ArrowUp",
|
||||
MoveDown: "ArrowDown",
|
||||
JumpUp: "PageUp",
|
||||
JumpDown: "PageDown",
|
||||
JumpToStart: "Home",
|
||||
JumpToEnd: "End",
|
||||
ChooseSelected: "Enter",
|
||||
ChooseFirstOrSelected: "Tab",
|
||||
Close: "Escape",
|
||||
},
|
||||
colorMap: {
|
||||
filename: { category: ["light", "dark"] },
|
||||
},
|
||||
};
|
||||
// Tag completion data loaded from files
|
||||
var allTags = [];
|
||||
var translations = new Map();
|
||||
var extras = [];
|
||||
// Same for tag-likes
|
||||
var wildcardFiles = [];
|
||||
var wildcardExtFiles = [];
|
||||
var yamlWildcards = [];
|
||||
var umiWildcards = [];
|
||||
var embeddings = [];
|
||||
var hypernetworks = [];
|
||||
var loras = [];
|
||||
var lycos = [];
|
||||
var modelKeywordDict = new Map();
|
||||
var chants = [];
|
||||
var styleNames = [];
|
||||
// uFuzzy haystacks
|
||||
var tacHaystacks = {
|
||||
"tag": [],
|
||||
"extra": [],
|
||||
"tagAlias": [],
|
||||
"extraAlias": [],
|
||||
"translationKeys": [],
|
||||
"translationValues": []
|
||||
}
|
||||
|
||||
TAC.Globals = new (function () {
|
||||
// Core components
|
||||
this.tagBasePath = "";
|
||||
this.modelKeywordPath = "";
|
||||
this.selfTrigger = false;
|
||||
// Selected model info for black/whitelisting
|
||||
var currentModelHash = "";
|
||||
var currentModelName = "";
|
||||
|
||||
// Tag completion data loaded from files
|
||||
this.allTags = [];
|
||||
this.translations = new Map();
|
||||
this.extras = [];
|
||||
// Same for tag-likes
|
||||
this.wildcardFiles = [];
|
||||
this.wildcardExtFiles = [];
|
||||
this.yamlWildcards = [];
|
||||
this.umiWildcards = [];
|
||||
this.embeddings = [];
|
||||
this.hypernetworks = [];
|
||||
this.loras = [];
|
||||
this.lycos = [];
|
||||
this.modelKeywordDict = new Map();
|
||||
this.chants = [];
|
||||
this.styleNames = [];
|
||||
// Current results
|
||||
var results = [];
|
||||
var resultCount = 0;
|
||||
|
||||
// Selected model info for black/whitelisting
|
||||
this.currentModelHash = "";
|
||||
this.currentModelName = "";
|
||||
// Relevant for parsing
|
||||
var previousTags = [];
|
||||
var tagword = "";
|
||||
var originalTagword = "";
|
||||
let hideBlocked = false;
|
||||
|
||||
// Current results
|
||||
this.results = [];
|
||||
this.resultCount = 0;
|
||||
// Tag selection for keyboard navigation
|
||||
var selectedTag = null;
|
||||
var oldSelectedTag = null;
|
||||
var resultCountBeforeNormalTags = 0;
|
||||
|
||||
// Relevant for parsing
|
||||
this.previousTags = [];
|
||||
this.tagword = "";
|
||||
this.originalTagword = "";
|
||||
this.hideBlocked = false;
|
||||
// Lora keyword undo/redo history
|
||||
var textBeforeKeywordInsertion = "";
|
||||
var textAfterKeywordInsertion = "";
|
||||
var lastEditWasKeywordInsertion = false;
|
||||
var keywordInsertionUndone = false;
|
||||
|
||||
// Tag selection for keyboard navigation
|
||||
this.selectedTag = null;
|
||||
this.oldSelectedTag = null;
|
||||
this.resultCountBeforeNormalTags = 0;
|
||||
|
||||
// Lora keyword undo/redo history
|
||||
this.textBeforeKeywordInsertion = "";
|
||||
this.textAfterKeywordInsertion = "";
|
||||
this.lastEditWasKeywordInsertion = false;
|
||||
this.keywordInsertionUndone = false;
|
||||
|
||||
// UMI
|
||||
this.umiPreviousTags = [];
|
||||
})();
|
||||
// UMI
|
||||
var umiPreviousTags = [];
|
||||
|
||||
/// Extendability system:
|
||||
/// Provides "queues" for other files of the script (or really any js)
|
||||
/// to add functions to be called at certain points in the script.
|
||||
/// Similar to a callback system, but primitive.
|
||||
TAC.Ext = new (function () {
|
||||
// Queues
|
||||
this.QUEUE_AFTER_INSERT = [];
|
||||
this.QUEUE_AFTER_SETUP = [];
|
||||
this.QUEUE_FILE_LOAD = [];
|
||||
this.QUEUE_AFTER_CONFIG_CHANGE = [];
|
||||
this.QUEUE_SANITIZE = [];
|
||||
|
||||
// List of parsers to try
|
||||
this.PARSERS = [];
|
||||
})();
|
||||
// Queues
|
||||
const QUEUE_AFTER_INSERT = [];
|
||||
const QUEUE_AFTER_SETUP = [];
|
||||
const QUEUE_FILE_LOAD = [];
|
||||
const QUEUE_AFTER_CONFIG_CHANGE = [];
|
||||
const QUEUE_SANITIZE = [];
|
||||
|
||||
// List of parsers to try
|
||||
const PARSERS = [];
|
||||
@@ -1,21 +1,21 @@
|
||||
TAC.FunctionNotOverriddenError = class FunctionNotOverriddenError extends Error {
|
||||
class FunctionNotOverriddenError extends Error {
|
||||
constructor(message = "", ...args) {
|
||||
super(message, ...args);
|
||||
this.message = message + " is an abstract base function and must be overwritten.";
|
||||
}
|
||||
}
|
||||
|
||||
TAC.BaseTagParser = class BaseTagParser {
|
||||
class BaseTagParser {
|
||||
triggerCondition = null;
|
||||
|
||||
constructor (triggerCondition) {
|
||||
if (new.target === TAC.BaseTagParser) {
|
||||
if (new.target === BaseTagParser) {
|
||||
throw new TypeError("Cannot construct abstract BaseCompletionParser directly");
|
||||
}
|
||||
this.triggerCondition = triggerCondition;
|
||||
}
|
||||
|
||||
parse() {
|
||||
throw new TAC.FunctionNotOverriddenError("parse()");
|
||||
throw new FunctionNotOverriddenError("parse()");
|
||||
}
|
||||
}
|
||||
@@ -1,146 +1,145 @@
|
||||
// From https://github.com/component/textarea-caret-position
|
||||
|
||||
TAC.getCaretCoordinates = class CaretUtils {
|
||||
// We'll copy the properties below into the mirror div.
|
||||
// Note that some browsers, such as Firefox, do not concatenate properties
|
||||
// into their shorthand (e.g. padding-top, padding-bottom etc. -> padding),
|
||||
// so we have to list every single property explicitly.
|
||||
static #properties = [
|
||||
"direction", // RTL support
|
||||
"boxSizing",
|
||||
"width", // on Chrome and IE, exclude the scrollbar, so the mirror div wraps exactly as the textarea does
|
||||
"height",
|
||||
"overflowX",
|
||||
"overflowY", // copy the scrollbar for IE
|
||||
// We'll copy the properties below into the mirror div.
|
||||
// Note that some browsers, such as Firefox, do not concatenate properties
|
||||
// into their shorthand (e.g. padding-top, padding-bottom etc. -> padding),
|
||||
// so we have to list every single property explicitly.
|
||||
var properties = [
|
||||
'direction', // RTL support
|
||||
'boxSizing',
|
||||
'width', // on Chrome and IE, exclude the scrollbar, so the mirror div wraps exactly as the textarea does
|
||||
'height',
|
||||
'overflowX',
|
||||
'overflowY', // copy the scrollbar for IE
|
||||
|
||||
"borderTopWidth",
|
||||
"borderRightWidth",
|
||||
"borderBottomWidth",
|
||||
"borderLeftWidth",
|
||||
"borderStyle",
|
||||
'borderTopWidth',
|
||||
'borderRightWidth',
|
||||
'borderBottomWidth',
|
||||
'borderLeftWidth',
|
||||
'borderStyle',
|
||||
|
||||
"paddingTop",
|
||||
"paddingRight",
|
||||
"paddingBottom",
|
||||
"paddingLeft",
|
||||
'paddingTop',
|
||||
'paddingRight',
|
||||
'paddingBottom',
|
||||
'paddingLeft',
|
||||
|
||||
// https://developer.mozilla.org/en-US/docs/Web/CSS/font
|
||||
"fontStyle",
|
||||
"fontVariant",
|
||||
"fontWeight",
|
||||
"fontStretch",
|
||||
"fontSize",
|
||||
"fontSizeAdjust",
|
||||
"lineHeight",
|
||||
"fontFamily",
|
||||
// https://developer.mozilla.org/en-US/docs/Web/CSS/font
|
||||
'fontStyle',
|
||||
'fontVariant',
|
||||
'fontWeight',
|
||||
'fontStretch',
|
||||
'fontSize',
|
||||
'fontSizeAdjust',
|
||||
'lineHeight',
|
||||
'fontFamily',
|
||||
|
||||
"textAlign",
|
||||
"textTransform",
|
||||
"textIndent",
|
||||
"textDecoration", // might not make a difference, but better be safe
|
||||
'textAlign',
|
||||
'textTransform',
|
||||
'textIndent',
|
||||
'textDecoration', // might not make a difference, but better be safe
|
||||
|
||||
"letterSpacing",
|
||||
"wordSpacing",
|
||||
'letterSpacing',
|
||||
'wordSpacing',
|
||||
|
||||
"tabSize",
|
||||
"MozTabSize",
|
||||
];
|
||||
'tabSize',
|
||||
'MozTabSize'
|
||||
|
||||
static #isBrowser = typeof window !== "undefined";
|
||||
static #isFirefox = this.#isBrowser && window.mozInnerScreenX != null;
|
||||
];
|
||||
|
||||
static getCaretCoordinates(element, position, options) {
|
||||
if (!CaretUtils.#isBrowser) {
|
||||
throw new Error(
|
||||
"textarea-caret-position#getCaretCoordinates should only be called in a browser"
|
||||
);
|
||||
}
|
||||
var isBrowser = (typeof window !== 'undefined');
|
||||
var isFirefox = (isBrowser && window.mozInnerScreenX != null);
|
||||
|
||||
var debug = (options && options.debug) || false;
|
||||
if (debug) {
|
||||
var el = document.querySelector("#input-textarea-caret-position-mirror-div");
|
||||
if (el) el.parentNode.removeChild(el);
|
||||
}
|
||||
function getCaretCoordinates(element, position, options) {
|
||||
if (!isBrowser) {
|
||||
throw new Error('textarea-caret-position#getCaretCoordinates should only be called in a browser');
|
||||
}
|
||||
|
||||
// The mirror div will replicate the textarea's style
|
||||
var div = document.createElement("div");
|
||||
div.id = "input-textarea-caret-position-mirror-div";
|
||||
document.body.appendChild(div);
|
||||
var debug = options && options.debug || false;
|
||||
if (debug) {
|
||||
var el = document.querySelector('#input-textarea-caret-position-mirror-div');
|
||||
if (el) el.parentNode.removeChild(el);
|
||||
}
|
||||
|
||||
var style = div.style;
|
||||
var computed = window.getComputedStyle
|
||||
? window.getComputedStyle(element)
|
||||
: element.currentStyle; // currentStyle for IE < 9
|
||||
var isInput = element.nodeName === "INPUT";
|
||||
// The mirror div will replicate the textarea's style
|
||||
var div = document.createElement('div');
|
||||
div.id = 'input-textarea-caret-position-mirror-div';
|
||||
document.body.appendChild(div);
|
||||
|
||||
// Default textarea styles
|
||||
style.whiteSpace = "pre-wrap";
|
||||
if (!isInput) style.wordWrap = "break-word"; // only for textarea-s
|
||||
var style = div.style;
|
||||
var computed = window.getComputedStyle ? window.getComputedStyle(element) : element.currentStyle; // currentStyle for IE < 9
|
||||
var isInput = element.nodeName === 'INPUT';
|
||||
|
||||
// Position off-screen
|
||||
style.position = "absolute"; // required to return coordinates properly
|
||||
if (!debug) style.visibility = "hidden"; // not 'display: none' because we want rendering
|
||||
// Default textarea styles
|
||||
style.whiteSpace = 'pre-wrap';
|
||||
if (!isInput)
|
||||
style.wordWrap = 'break-word'; // only for textarea-s
|
||||
|
||||
// Transfer the element's properties to the div
|
||||
CaretUtils.#properties.forEach(function (prop) {
|
||||
if (isInput && prop === "lineHeight") {
|
||||
// Special case for <input>s because text is rendered centered and line height may be != height
|
||||
if (computed.boxSizing === "border-box") {
|
||||
var height = parseInt(computed.height);
|
||||
var outerHeight =
|
||||
parseInt(computed.paddingTop) +
|
||||
parseInt(computed.paddingBottom) +
|
||||
parseInt(computed.borderTopWidth) +
|
||||
parseInt(computed.borderBottomWidth);
|
||||
var targetHeight = outerHeight + parseInt(computed.lineHeight);
|
||||
if (height > targetHeight) {
|
||||
style.lineHeight = height - outerHeight + "px";
|
||||
} else if (height === targetHeight) {
|
||||
style.lineHeight = computed.lineHeight;
|
||||
} else {
|
||||
style.lineHeight = 0;
|
||||
}
|
||||
} else {
|
||||
style.lineHeight = computed.height;
|
||||
}
|
||||
} else {
|
||||
style[prop] = computed[prop];
|
||||
}
|
||||
});
|
||||
// Position off-screen
|
||||
style.position = 'absolute'; // required to return coordinates properly
|
||||
if (!debug)
|
||||
style.visibility = 'hidden'; // not 'display: none' because we want rendering
|
||||
|
||||
if (CaretUtils.#isFirefox) {
|
||||
// Firefox lies about the overflow property for textareas: https://bugzilla.mozilla.org/show_bug.cgi?id=984275
|
||||
if (element.scrollHeight > parseInt(computed.height)) style.overflowY = "scroll";
|
||||
// Transfer the element's properties to the div
|
||||
properties.forEach(function (prop) {
|
||||
if (isInput && prop === 'lineHeight') {
|
||||
// Special case for <input>s because text is rendered centered and line height may be != height
|
||||
if (computed.boxSizing === "border-box") {
|
||||
var height = parseInt(computed.height);
|
||||
var outerHeight =
|
||||
parseInt(computed.paddingTop) +
|
||||
parseInt(computed.paddingBottom) +
|
||||
parseInt(computed.borderTopWidth) +
|
||||
parseInt(computed.borderBottomWidth);
|
||||
var targetHeight = outerHeight + parseInt(computed.lineHeight);
|
||||
if (height > targetHeight) {
|
||||
style.lineHeight = height - outerHeight + "px";
|
||||
} else if (height === targetHeight) {
|
||||
style.lineHeight = computed.lineHeight;
|
||||
} else {
|
||||
style.overflow = "hidden"; // for Chrome to not render a scrollbar; IE keeps overflowY = 'scroll'
|
||||
style.lineHeight = 0;
|
||||
}
|
||||
|
||||
div.textContent = element.value.substring(0, position);
|
||||
// The second special handling for input type="text" vs textarea:
|
||||
// spaces need to be replaced with non-breaking spaces - http://stackoverflow.com/a/13402035/1269037
|
||||
if (isInput) div.textContent = div.textContent.replace(/\s/g, "\u00a0");
|
||||
|
||||
var span = document.createElement("span");
|
||||
// Wrapping must be replicated *exactly*, including when a long word gets
|
||||
// onto the next line, with whitespace at the end of the line before (#7).
|
||||
// The *only* reliable way to do that is to copy the *entire* rest of the
|
||||
// textarea's content into the <span> created at the caret position.
|
||||
// For inputs, just '.' would be enough, but no need to bother.
|
||||
span.textContent = element.value.substring(position) || "."; // || because a completely empty faux span doesn't render at all
|
||||
div.appendChild(span);
|
||||
|
||||
var coordinates = {
|
||||
top: span.offsetTop + parseInt(computed["borderTopWidth"]),
|
||||
left: span.offsetLeft + parseInt(computed["borderLeftWidth"]),
|
||||
height: parseInt(computed["lineHeight"]),
|
||||
};
|
||||
|
||||
if (debug) {
|
||||
span.style.backgroundColor = "#aaa";
|
||||
} else {
|
||||
document.body.removeChild(div);
|
||||
}
|
||||
|
||||
return coordinates;
|
||||
} else {
|
||||
style.lineHeight = computed.height;
|
||||
}
|
||||
} else {
|
||||
style[prop] = computed[prop];
|
||||
}
|
||||
}.getCaretCoordinates;
|
||||
});
|
||||
|
||||
if (isFirefox) {
|
||||
// Firefox lies about the overflow property for textareas: https://bugzilla.mozilla.org/show_bug.cgi?id=984275
|
||||
if (element.scrollHeight > parseInt(computed.height))
|
||||
style.overflowY = 'scroll';
|
||||
} else {
|
||||
style.overflow = 'hidden'; // for Chrome to not render a scrollbar; IE keeps overflowY = 'scroll'
|
||||
}
|
||||
|
||||
div.textContent = element.value.substring(0, position);
|
||||
// The second special handling for input type="text" vs textarea:
|
||||
// spaces need to be replaced with non-breaking spaces - http://stackoverflow.com/a/13402035/1269037
|
||||
if (isInput)
|
||||
div.textContent = div.textContent.replace(/\s/g, '\u00a0');
|
||||
|
||||
var span = document.createElement('span');
|
||||
// Wrapping must be replicated *exactly*, including when a long word gets
|
||||
// onto the next line, with whitespace at the end of the line before (#7).
|
||||
// The *only* reliable way to do that is to copy the *entire* rest of the
|
||||
// textarea's content into the <span> created at the caret position.
|
||||
// For inputs, just '.' would be enough, but no need to bother.
|
||||
span.textContent = element.value.substring(position) || '.'; // || because a completely empty faux span doesn't render at all
|
||||
div.appendChild(span);
|
||||
|
||||
var coordinates = {
|
||||
top: span.offsetTop + parseInt(computed['borderTopWidth']),
|
||||
left: span.offsetLeft + parseInt(computed['borderLeftWidth']),
|
||||
height: parseInt(computed['lineHeight'])
|
||||
};
|
||||
|
||||
if (debug) {
|
||||
span.style.backgroundColor = '#aaa';
|
||||
} else {
|
||||
document.body.removeChild(div);
|
||||
}
|
||||
|
||||
return coordinates;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Result data type for cleaner use of optional completion result properties
|
||||
|
||||
// Type enum
|
||||
TAC.ResultType = Object.freeze({
|
||||
const ResultType = Object.freeze({
|
||||
"tag": 1,
|
||||
"extra": 2,
|
||||
"embedding": 3,
|
||||
@@ -17,10 +17,10 @@ TAC.ResultType = Object.freeze({
|
||||
});
|
||||
|
||||
// Class to hold result data and annotations to make it clearer to use
|
||||
TAC.AutocompleteResult = class AutocompleteResult {
|
||||
class AutocompleteResult {
|
||||
// Main properties
|
||||
text = "";
|
||||
type = TAC.ResultType.tag;
|
||||
type = ResultType.tag;
|
||||
|
||||
// Additional info, only used in some cases
|
||||
category = null;
|
||||
@@ -30,6 +30,9 @@ TAC.AutocompleteResult = class AutocompleteResult {
|
||||
meta = null;
|
||||
hash = null;
|
||||
sortKey = null;
|
||||
// uFuzzy specific
|
||||
highlightedText = null;
|
||||
matchSource = null;
|
||||
|
||||
// Constructor
|
||||
constructor(text, type) {
|
||||
|
||||
@@ -1,218 +1,197 @@
|
||||
// Utility functions to select text areas the script should work on,
|
||||
// including third party options.
|
||||
// Supported third party options so far:
|
||||
// - Dataset Tag Editor
|
||||
|
||||
TAC.TextAreas = new (function () {
|
||||
// Core text area selectors
|
||||
const core = [
|
||||
"#txt2img_prompt > label > textarea",
|
||||
"#img2img_prompt > label > textarea",
|
||||
"#txt2img_neg_prompt > label > textarea",
|
||||
"#img2img_neg_prompt > label > textarea",
|
||||
".prompt > label > textarea",
|
||||
"#txt2img_edit_style_prompt > label > textarea",
|
||||
"#txt2img_edit_style_neg_prompt > label > textarea",
|
||||
"#img2img_edit_style_prompt > label > textarea",
|
||||
"#img2img_edit_style_neg_prompt > label > textarea",
|
||||
];
|
||||
// Core text area selectors
|
||||
const core = [
|
||||
"#txt2img_prompt > label > textarea",
|
||||
"#img2img_prompt > label > textarea",
|
||||
"#txt2img_neg_prompt > label > textarea",
|
||||
"#img2img_neg_prompt > label > textarea",
|
||||
".prompt > label > textarea",
|
||||
"#txt2img_edit_style_prompt > label > textarea",
|
||||
"#txt2img_edit_style_neg_prompt > label > textarea",
|
||||
"#img2img_edit_style_prompt > label > textarea",
|
||||
"#img2img_edit_style_neg_prompt > label > textarea"
|
||||
];
|
||||
|
||||
// Third party text area selectors
|
||||
const thirdParty = {
|
||||
"dataset-tag-editor": {
|
||||
base: "#tab_dataset_tag_editor_interface",
|
||||
hasIds: false,
|
||||
selectors: [
|
||||
"Caption of Selected Image",
|
||||
"Interrogate Result",
|
||||
"Edit Caption",
|
||||
"Edit Tags",
|
||||
],
|
||||
},
|
||||
"image browser": {
|
||||
base: "#tab_image_browser",
|
||||
hasIds: false,
|
||||
selectors: ["Filename keyword search", "EXIF keyword search"],
|
||||
},
|
||||
tab_tagger: {
|
||||
base: "#tab_tagger",
|
||||
hasIds: false,
|
||||
selectors: ["Additional tags (split by comma)", "Exclude tags (split by comma)"],
|
||||
},
|
||||
"tiled-diffusion-t2i": {
|
||||
base: "#txt2img_script_container",
|
||||
hasIds: true,
|
||||
onDemand: true,
|
||||
selectors: [
|
||||
"[id^=MD-t2i][id$=prompt] textarea",
|
||||
"[id^=MD-t2i][id$=prompt] input[type='text']",
|
||||
],
|
||||
},
|
||||
"tiled-diffusion-i2i": {
|
||||
base: "#img2img_script_container",
|
||||
hasIds: true,
|
||||
onDemand: true,
|
||||
selectors: [
|
||||
"[id^=MD-i2i][id$=prompt] textarea",
|
||||
"[id^=MD-i2i][id$=prompt] input[type='text']",
|
||||
],
|
||||
},
|
||||
"adetailer-t2i": {
|
||||
base: "#txt2img_script_container",
|
||||
hasIds: true,
|
||||
onDemand: true,
|
||||
selectors: [
|
||||
"[id^=script_txt2img_adetailer_ad_prompt] textarea",
|
||||
"[id^=script_txt2img_adetailer_ad_negative_prompt] textarea",
|
||||
],
|
||||
},
|
||||
"adetailer-i2i": {
|
||||
base: "#img2img_script_container",
|
||||
hasIds: true,
|
||||
onDemand: true,
|
||||
selectors: [
|
||||
"[id^=script_img2img_adetailer_ad_prompt] textarea",
|
||||
"[id^=script_img2img_adetailer_ad_negative_prompt] textarea",
|
||||
],
|
||||
},
|
||||
"deepdanbooru-object-recognition": {
|
||||
base: "#tab_deepdanboru_object_recg_tab",
|
||||
hasIds: false,
|
||||
selectors: ["Found tags"],
|
||||
},
|
||||
TIPO: {
|
||||
base: "#tab_txt2img",
|
||||
hasIds: false,
|
||||
selectors: ["Tag Prompt"],
|
||||
},
|
||||
};
|
||||
|
||||
this.getTextAreas = function () {
|
||||
// First get all core text areas
|
||||
let textAreas = [...gradioApp().querySelectorAll(core.join(", "))];
|
||||
|
||||
for (const [key, entry] of Object.entries(thirdParty)) {
|
||||
if (entry.hasIds) {
|
||||
// If the entry has proper ids, we can just select them
|
||||
textAreas = textAreas.concat([
|
||||
...gradioApp().querySelectorAll(entry.selectors.join(", ")),
|
||||
]);
|
||||
} else {
|
||||
// Otherwise, we have to find the text areas by their adjacent labels
|
||||
let base = gradioApp().querySelector(entry.base);
|
||||
|
||||
// Safety check
|
||||
if (!base) continue;
|
||||
|
||||
let allTextAreas = [...base.querySelectorAll("textarea, input[type='text']")];
|
||||
|
||||
// Filter the text areas where the adjacent label matches one of the selectors
|
||||
let matchingTextAreas = allTextAreas.filter((ta) =>
|
||||
[...ta.parentElement.childNodes].some((x) =>
|
||||
entry.selectors.includes(x.innerText)
|
||||
)
|
||||
);
|
||||
textAreas = textAreas.concat(matchingTextAreas);
|
||||
}
|
||||
}
|
||||
|
||||
return textAreas;
|
||||
// Third party text area selectors
|
||||
const thirdParty = {
|
||||
"dataset-tag-editor": {
|
||||
"base": "#tab_dataset_tag_editor_interface",
|
||||
"hasIds": false,
|
||||
"selectors": [
|
||||
"Caption of Selected Image",
|
||||
"Interrogate Result",
|
||||
"Edit Caption",
|
||||
"Edit Tags"
|
||||
]
|
||||
},
|
||||
"image browser": {
|
||||
"base": "#tab_image_browser",
|
||||
"hasIds": false,
|
||||
"selectors": [
|
||||
"Filename keyword search",
|
||||
"EXIF keyword search"
|
||||
]
|
||||
},
|
||||
"tab_tagger": {
|
||||
"base": "#tab_tagger",
|
||||
"hasIds": false,
|
||||
"selectors": [
|
||||
"Additional tags (split by comma)",
|
||||
"Exclude tags (split by comma)"
|
||||
]
|
||||
},
|
||||
"tiled-diffusion-t2i": {
|
||||
"base": "#txt2img_script_container",
|
||||
"hasIds": true,
|
||||
"onDemand": true,
|
||||
"selectors": [
|
||||
"[id^=MD-t2i][id$=prompt] textarea",
|
||||
"[id^=MD-t2i][id$=prompt] input[type='text']"
|
||||
]
|
||||
},
|
||||
"tiled-diffusion-i2i": {
|
||||
"base": "#img2img_script_container",
|
||||
"hasIds": true,
|
||||
"onDemand": true,
|
||||
"selectors": [
|
||||
"[id^=MD-i2i][id$=prompt] textarea",
|
||||
"[id^=MD-i2i][id$=prompt] input[type='text']"
|
||||
]
|
||||
},
|
||||
"adetailer-t2i": {
|
||||
"base": "#txt2img_script_container",
|
||||
"hasIds": true,
|
||||
"onDemand": true,
|
||||
"selectors": [
|
||||
"[id^=script_txt2img_adetailer_ad_prompt] textarea",
|
||||
"[id^=script_txt2img_adetailer_ad_negative_prompt] textarea"
|
||||
]
|
||||
},
|
||||
"adetailer-i2i": {
|
||||
"base": "#img2img_script_container",
|
||||
"hasIds": true,
|
||||
"onDemand": true,
|
||||
"selectors": [
|
||||
"[id^=script_img2img_adetailer_ad_prompt] textarea",
|
||||
"[id^=script_img2img_adetailer_ad_negative_prompt] textarea"
|
||||
]
|
||||
},
|
||||
"deepdanbooru-object-recognition": {
|
||||
"base": "#tab_deepdanboru_object_recg_tab",
|
||||
"hasIds": false,
|
||||
"selectors": [
|
||||
"Found tags",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
this.addOnDemandObservers = function (setupFunction) {
|
||||
for (const [key, entry] of Object.entries(thirdParty)) {
|
||||
if (!entry.onDemand) continue;
|
||||
function getTextAreas() {
|
||||
// First get all core text areas
|
||||
let textAreas = [...gradioApp().querySelectorAll(core.join(", "))];
|
||||
|
||||
for (const [key, entry] of Object.entries(thirdParty)) {
|
||||
if (entry.hasIds) { // If the entry has proper ids, we can just select them
|
||||
textAreas = textAreas.concat([...gradioApp().querySelectorAll(entry.selectors.join(", "))]);
|
||||
} else { // Otherwise, we have to find the text areas by their adjacent labels
|
||||
let base = gradioApp().querySelector(entry.base);
|
||||
|
||||
// Safety check
|
||||
if (!base) continue;
|
||||
|
||||
let accordions = [...base?.querySelectorAll(".gradio-accordion")];
|
||||
if (!accordions) continue;
|
||||
let allTextAreas = [...base.querySelectorAll("textarea, input[type='text']")];
|
||||
|
||||
accordions.forEach((acc) => {
|
||||
let accObserver = new MutationObserver((mutationList, observer) => {
|
||||
for (const mutation of mutationList) {
|
||||
if (mutation.type === "childList") {
|
||||
let newChildren = mutation.addedNodes;
|
||||
if (!newChildren) {
|
||||
accObserver.disconnect();
|
||||
continue;
|
||||
// Filter the text areas where the adjacent label matches one of the selectors
|
||||
let matchingTextAreas = allTextAreas.filter(ta => [...ta.parentElement.childNodes].some(x => entry.selectors.includes(x.innerText)));
|
||||
textAreas = textAreas.concat(matchingTextAreas);
|
||||
}
|
||||
};
|
||||
|
||||
return textAreas;
|
||||
}
|
||||
|
||||
function addOnDemandObservers(setupFunction) {
|
||||
for (const [key, entry] of Object.entries(thirdParty)) {
|
||||
if (!entry.onDemand) continue;
|
||||
|
||||
let base = gradioApp().querySelector(entry.base);
|
||||
if (!base) continue;
|
||||
|
||||
let accordions = [...base?.querySelectorAll(".gradio-accordion")];
|
||||
if (!accordions) continue;
|
||||
|
||||
accordions.forEach(acc => {
|
||||
let accObserver = new MutationObserver((mutationList, observer) => {
|
||||
for (const mutation of mutationList) {
|
||||
if (mutation.type === "childList") {
|
||||
let newChildren = mutation.addedNodes;
|
||||
if (!newChildren) {
|
||||
accObserver.disconnect();
|
||||
continue;
|
||||
}
|
||||
|
||||
newChildren.forEach(child => {
|
||||
if (child.classList.contains("gradio-accordion") || child.querySelector(".gradio-accordion")) {
|
||||
let newAccordions = [...child.querySelectorAll(".gradio-accordion")];
|
||||
newAccordions.forEach(nAcc => accObserver.observe(nAcc, { childList: true }));
|
||||
}
|
||||
});
|
||||
|
||||
newChildren.forEach((child) => {
|
||||
if (
|
||||
child.classList.contains("gradio-accordion") ||
|
||||
child.querySelector(".gradio-accordion")
|
||||
) {
|
||||
let newAccordions = [
|
||||
...child.querySelectorAll(".gradio-accordion"),
|
||||
];
|
||||
newAccordions.forEach((nAcc) =>
|
||||
accObserver.observe(nAcc, { childList: true })
|
||||
);
|
||||
}
|
||||
});
|
||||
if (entry.hasIds) { // If the entry has proper ids, we can just select them
|
||||
[...gradioApp().querySelectorAll(entry.selectors.join(", "))].forEach(x => setupFunction(x));
|
||||
} else { // Otherwise, we have to find the text areas by their adjacent labels
|
||||
let base = gradioApp().querySelector(entry.base);
|
||||
|
||||
if (entry.hasIds) {
|
||||
// If the entry has proper ids, we can just select them
|
||||
[
|
||||
...gradioApp().querySelectorAll(entry.selectors.join(", ")),
|
||||
].forEach((x) => setupFunction(x));
|
||||
} else {
|
||||
// Otherwise, we have to find the text areas by their adjacent labels
|
||||
let base = gradioApp().querySelector(entry.base);
|
||||
// Safety check
|
||||
if (!base) continue;
|
||||
|
||||
// Safety check
|
||||
if (!base) continue;
|
||||
let allTextAreas = [...base.querySelectorAll("textarea, input[type='text']")];
|
||||
|
||||
let allTextAreas = [
|
||||
...base.querySelectorAll("textarea, input[type='text']"),
|
||||
];
|
||||
|
||||
// Filter the text areas where the adjacent label matches one of the selectors
|
||||
let matchingTextAreas = allTextAreas.filter((ta) =>
|
||||
[...ta.parentElement.childNodes].some((x) =>
|
||||
entry.selectors.includes(x.innerText)
|
||||
)
|
||||
);
|
||||
matchingTextAreas.forEach((x) => setupFunction(x));
|
||||
}
|
||||
// Filter the text areas where the adjacent label matches one of the selectors
|
||||
let matchingTextAreas = allTextAreas.filter(ta => [...ta.parentElement.childNodes].some(x => entry.selectors.includes(x.innerText)));
|
||||
matchingTextAreas.forEach(x => setupFunction(x));
|
||||
}
|
||||
}
|
||||
});
|
||||
accObserver.observe(acc, { childList: true });
|
||||
}
|
||||
});
|
||||
}
|
||||
accObserver.observe(acc, { childList: true });
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
const thirdPartyIdSet = new Set();
|
||||
// Get the identifier for the text area to differentiate between positive and negative
|
||||
function getTextAreaIdentifier(textArea) {
|
||||
let txt2img_p = gradioApp().querySelector('#txt2img_prompt > label > textarea');
|
||||
let txt2img_n = gradioApp().querySelector('#txt2img_neg_prompt > label > textarea');
|
||||
let img2img_p = gradioApp().querySelector('#img2img_prompt > label > textarea');
|
||||
let img2img_n = gradioApp().querySelector('#img2img_neg_prompt > label > textarea');
|
||||
|
||||
let modifier = "";
|
||||
switch (textArea) {
|
||||
case txt2img_p:
|
||||
modifier = ".txt2img.p";
|
||||
break;
|
||||
case txt2img_n:
|
||||
modifier = ".txt2img.n";
|
||||
break;
|
||||
case img2img_p:
|
||||
modifier = ".img2img.p";
|
||||
break;
|
||||
case img2img_n:
|
||||
modifier = ".img2img.n";
|
||||
break;
|
||||
default:
|
||||
// If the text area is not a core text area, it must be a third party text area
|
||||
// Add it to the set of third party text areas and get its index as a unique identifier
|
||||
if (!thirdPartyIdSet.has(textArea))
|
||||
thirdPartyIdSet.add(textArea);
|
||||
|
||||
modifier = `.thirdParty.ta${[...thirdPartyIdSet].indexOf(textArea)}`;
|
||||
break;
|
||||
}
|
||||
|
||||
const thirdPartyIdSet = new Set();
|
||||
// Get the identifier for the text area to differentiate between positive and negative
|
||||
this.getTextAreaIdentifier = function (textArea) {
|
||||
let txt2img_p = gradioApp().querySelector("#txt2img_prompt > label > textarea");
|
||||
let txt2img_n = gradioApp().querySelector("#txt2img_neg_prompt > label > textarea");
|
||||
let img2img_p = gradioApp().querySelector("#img2img_prompt > label > textarea");
|
||||
let img2img_n = gradioApp().querySelector("#img2img_neg_prompt > label > textarea");
|
||||
|
||||
let modifier = "";
|
||||
switch (textArea) {
|
||||
case txt2img_p:
|
||||
modifier = ".txt2img.p";
|
||||
break;
|
||||
case txt2img_n:
|
||||
modifier = ".txt2img.n";
|
||||
break;
|
||||
case img2img_p:
|
||||
modifier = ".img2img.p";
|
||||
break;
|
||||
case img2img_n:
|
||||
modifier = ".img2img.n";
|
||||
break;
|
||||
default:
|
||||
// If the text area is not a core text area, it must be a third party text area
|
||||
// Add it to the set of third party text areas and get its index as a unique identifier
|
||||
if (!thirdPartyIdSet.has(textArea)) thirdPartyIdSet.add(textArea);
|
||||
|
||||
modifier = `.thirdParty.ta${[...thirdPartyIdSet].indexOf(textArea)}`;
|
||||
break;
|
||||
}
|
||||
return modifier;
|
||||
}
|
||||
})();
|
||||
return modifier;
|
||||
}
|
||||
|
||||
250
javascript/_uFuzzy.js
Normal file
250
javascript/_uFuzzy.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@@ -1,66 +1,57 @@
|
||||
(function ChantExtension() {
|
||||
const CHANT_REGEX = /<(?!e:|h:|l:)[^,> ]*>?/g;
|
||||
const CHANT_TRIGGER = () =>
|
||||
TAC.CFG.chantFile && TAC.CFG.chantFile !== "None" && TAC.Globals.tagword.match(CHANT_REGEX);
|
||||
const CHANT_REGEX = /<(?!e:|h:|l:)[^,> ]*>?/g;
|
||||
const CHANT_TRIGGER = () => TAC_CFG.chantFile && TAC_CFG.chantFile !== "None" && tagword.match(CHANT_REGEX);
|
||||
|
||||
class ChantParser extends TAC.BaseTagParser {
|
||||
parse() {
|
||||
// Show Chant
|
||||
let tempResults = [];
|
||||
if (TAC.Globals.tagword !== "<" && TAC.Globals.tagword !== "<c:") {
|
||||
let searchTerm = TAC.Globals.tagword
|
||||
.replace("<chant:", "")
|
||||
.replace("<c:", "")
|
||||
.replace("<", "");
|
||||
let filterCondition = (x) => {
|
||||
let regex = new RegExp(TAC.Utils.escapeRegExp(searchTerm, true), "i");
|
||||
return regex.test(x.terms.toLowerCase()) || regex.test(x.name.toLowerCase());
|
||||
};
|
||||
tempResults = TAC.Globals.chants.filter((x) => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = TAC.Globals.chants;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach((t) => {
|
||||
let result = new TAC.AutocompleteResult(t.content.trim(), TAC.ResultType.chant);
|
||||
result.meta = "Chant";
|
||||
result.aliases = t.name;
|
||||
result.category = t.color;
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (TAC.CFG.chantFile && TAC.CFG.chantFile !== "None") {
|
||||
try {
|
||||
TAC.Globals.chants = await TAC.Utils.readFile(
|
||||
`${TAC.Globals.tagBasePath}/${TAC.CFG.chantFile}?`,
|
||||
true
|
||||
);
|
||||
} catch (e) {
|
||||
console.error("Error loading chants.json: " + e);
|
||||
}
|
||||
class ChantParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show Chant
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<c:") {
|
||||
let searchTerm = tagword.replace("<chant:", "").replace("<c:", "").replace("<", "");
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.terms.toLowerCase()) || regex.test(x.name.toLowerCase());
|
||||
};
|
||||
tempResults = chants.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
TAC.Globals.chants = [];
|
||||
tempResults = chants;
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === TAC.ResultType.chant) {
|
||||
return text;
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.content.trim(), ResultType.chant)
|
||||
result.meta = "Chant";
|
||||
result.aliases = t.name;
|
||||
result.category = t.color;
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (TAC_CFG.chantFile && TAC_CFG.chantFile !== "None") {
|
||||
try {
|
||||
chants = await readFile(`${tagBasePath}/${TAC_CFG.chantFile}?`, true);
|
||||
} catch (e) {
|
||||
console.error("Error loading chants.json: " + e);
|
||||
}
|
||||
return null;
|
||||
} else {
|
||||
chants = [];
|
||||
}
|
||||
}
|
||||
|
||||
TAC.Ext.PARSERS.push(new ChantParser(CHANT_TRIGGER));
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.chant) {
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
TAC.Ext.QUEUE_SANITIZE.push(sanitize);
|
||||
TAC.Ext.QUEUE_AFTER_CONFIG_CHANGE.push(load);
|
||||
})();
|
||||
PARSERS.push(new ChantParser(CHANT_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_AFTER_CONFIG_CHANGE.push(load);
|
||||
|
||||
@@ -1,85 +1,72 @@
|
||||
(function EmbeddingExtension() {
|
||||
const EMB_REGEX = /<(?!l:|h:|c:)[^,> ]*>?/g;
|
||||
const EMB_TRIGGER = () =>
|
||||
TAC.CFG.useEmbeddings &&
|
||||
(TAC.Globals.tagword.match(EMB_REGEX) || TAC.CFG.includeEmbeddingsInNormalResults);
|
||||
const EMB_REGEX = /<(?!l:|h:|c:)[^,> ]*>?/g;
|
||||
const EMB_TRIGGER = () => TAC_CFG.useEmbeddings && (tagword.match(EMB_REGEX) || TAC_CFG.includeEmbeddingsInNormalResults);
|
||||
|
||||
class EmbeddingParser extends TAC.BaseTagParser {
|
||||
parse() {
|
||||
// Show embeddings
|
||||
let tempResults = [];
|
||||
if (TAC.Globals.tagword !== "<" && TAC.Globals.tagword !== "<e:") {
|
||||
let searchTerm = TAC.Globals.tagword.replace("<e:", "").replace("<", "");
|
||||
let versionString;
|
||||
if (searchTerm.startsWith("v1") || searchTerm.startsWith("v2")) {
|
||||
versionString = searchTerm.slice(0, 2);
|
||||
searchTerm = searchTerm.slice(2);
|
||||
} else if (searchTerm.startsWith("vxl")) {
|
||||
versionString = searchTerm.slice(0, 3);
|
||||
searchTerm = searchTerm.slice(3);
|
||||
}
|
||||
|
||||
let filterCondition = (x) => {
|
||||
let regex = new RegExp(TAC.Utils.escapeRegExp(searchTerm, true), "i");
|
||||
return (
|
||||
regex.test(x[0].toLowerCase()) ||
|
||||
regex.test(x[0].toLowerCase().replaceAll(" ", "_"))
|
||||
);
|
||||
};
|
||||
|
||||
if (versionString)
|
||||
tempResults = TAC.Globals.embeddings.filter(
|
||||
(x) =>
|
||||
filterCondition(x) &&
|
||||
x[2] &&
|
||||
x[2].toLowerCase() === versionString.toLowerCase()
|
||||
); // Filter by tagword
|
||||
else tempResults = TAC.Globals.embeddings.filter((x) => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = TAC.Globals.embeddings;
|
||||
class EmbeddingParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show embeddings
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<e:") {
|
||||
let searchTerm = tagword.replace("<e:", "").replace("<", "");
|
||||
let versionString;
|
||||
if (searchTerm.startsWith("v1") || searchTerm.startsWith("v2")) {
|
||||
versionString = searchTerm.slice(0, 2);
|
||||
searchTerm = searchTerm.slice(2);
|
||||
} else if (searchTerm.startsWith("vxl")) {
|
||||
versionString = searchTerm.slice(0, 3);
|
||||
searchTerm = searchTerm.slice(3);
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach((t) => {
|
||||
let lastDot = t[0].lastIndexOf(".") > -1 ? t[0].lastIndexOf(".") : t[0].length;
|
||||
let lastSlash = t[0].lastIndexOf("/") > -1 ? t[0].lastIndexOf("/") : -1;
|
||||
let name = t[0].trim().substring(lastSlash + 1, lastDot);
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x[0].toLowerCase()) || regex.test(x[0].toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
|
||||
let result = new TAC.AutocompleteResult(name, TAC.ResultType.embedding);
|
||||
result.sortKey = t[1];
|
||||
result.meta = t[2] + " Embedding";
|
||||
finalResults.push(result);
|
||||
});
|
||||
if (versionString)
|
||||
tempResults = embeddings.filter(x => filterCondition(x) && x[2] && x[2].toLowerCase() === versionString.toLowerCase()); // Filter by tagword
|
||||
else
|
||||
tempResults = embeddings.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = embeddings;
|
||||
}
|
||||
|
||||
return finalResults;
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let lastDot = t[0].lastIndexOf(".") > -1 ? t[0].lastIndexOf(".") : t[0].length;
|
||||
let lastSlash = t[0].lastIndexOf("/") > -1 ? t[0].lastIndexOf("/") : -1;
|
||||
let name = t[0].trim().substring(lastSlash + 1, lastDot);
|
||||
|
||||
let result = new AutocompleteResult(name, ResultType.embedding)
|
||||
result.sortKey = t[1];
|
||||
result.meta = t[2] + " Embedding";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (embeddings.length === 0) {
|
||||
try {
|
||||
embeddings = (await loadCSV(`${tagBasePath}/temp/emb.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0].trim(), x[1], x[2]]); // Return name, sortKey, hash tuples
|
||||
} catch (e) {
|
||||
console.error("Error loading embeddings.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (TAC.Globals.embeddings.length === 0) {
|
||||
try {
|
||||
TAC.Globals.embeddings = (
|
||||
await TAC.Utils.loadCSV(`${TAC.Globals.tagBasePath}/temp/emb.txt`)
|
||||
)
|
||||
.filter((x) => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map((x) => [x[0].trim(), x[1], x[2]]); // Return name, sortKey, hash tuples
|
||||
} catch (e) {
|
||||
console.error("Error loading embeddings.txt: " + e);
|
||||
}
|
||||
}
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.embedding) {
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === TAC.ResultType.embedding) {
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
PARSERS.push(new EmbeddingParser(EMB_TRIGGER));
|
||||
|
||||
TAC.Ext.PARSERS.push(new EmbeddingParser(EMB_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
TAC.Ext.QUEUE_SANITIZE.push(sanitize);
|
||||
})();
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -1,69 +1,55 @@
|
||||
(function HypernetExtension() {
|
||||
const HYP_REGEX = /<(?!e:|l:|c:)[^,> ]*>?/g;
|
||||
const HYP_TRIGGER = () => TAC.CFG.useHypernetworks && TAC.Globals.tagword.match(HYP_REGEX);
|
||||
const HYP_REGEX = /<(?!e:|l:|c:)[^,> ]*>?/g;
|
||||
const HYP_TRIGGER = () => TAC_CFG.useHypernetworks && tagword.match(HYP_REGEX);
|
||||
|
||||
class HypernetParser extends TAC.BaseTagParser {
|
||||
parse() {
|
||||
// Show hypernetworks
|
||||
let tempResults = [];
|
||||
if (
|
||||
TAC.Globals.tagword !== "<" &&
|
||||
TAC.Globals.tagword !== "<h:" &&
|
||||
TAC.Globals.tagword !== "<hypernet:"
|
||||
) {
|
||||
let searchTerm = TAC.Globals.tagword
|
||||
.replace("<hypernet:", "")
|
||||
.replace("<h:", "")
|
||||
.replace("<", "");
|
||||
let filterCondition = (x) => {
|
||||
let regex = new RegExp(TAC.Utils.escapeRegExp(searchTerm, true), "i");
|
||||
return (
|
||||
regex.test(x.toLowerCase()) ||
|
||||
regex.test(x.toLowerCase().replaceAll(" ", "_"))
|
||||
);
|
||||
};
|
||||
tempResults = TAC.Globals.hypernetworks.filter((x) => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = TAC.Globals.hypernetworks;
|
||||
}
|
||||
class HypernetParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show hypernetworks
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<h:" && tagword !== "<hypernet:") {
|
||||
let searchTerm = tagword.replace("<hypernet:", "").replace("<h:", "").replace("<", "");
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = hypernetworks.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = hypernetworks;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach((t) => {
|
||||
let result = new TAC.AutocompleteResult(t[0].trim(), TAC.ResultType.hypernetwork);
|
||||
result.meta = "Hypernetwork";
|
||||
result.sortKey = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.hypernetwork)
|
||||
result.meta = "Hypernetwork";
|
||||
result.sortKey = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (hypernetworks.length === 0) {
|
||||
try {
|
||||
hypernetworks = (await loadCSV(`${tagBasePath}/temp/hyp.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1]]); // Remove carriage returns and padding if it exists
|
||||
} catch (e) {
|
||||
console.error("Error loading hypernetworks.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (TAC.Globals.hypernetworks.length === 0) {
|
||||
try {
|
||||
TAC.Globals.hypernetworks = (
|
||||
await TAC.Utils.loadCSV(`${TAC.Globals.tagBasePath}/temp/hyp.txt`)
|
||||
)
|
||||
.filter((x) => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map((x) => [x[0]?.trim(), x[1]]); // Remove carriage returns and padding if it exists
|
||||
} catch (e) {
|
||||
console.error("Error loading hypernetworks.txt: " + e);
|
||||
}
|
||||
}
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.hypernetwork) {
|
||||
return `<hypernet:${text}:${TAC_CFG.extraNetworksDefaultMultiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === TAC.ResultType.hypernetwork) {
|
||||
return `<hypernet:${text}:${TAC.CFG.extraNetworksDefaultMultiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
PARSERS.push(new HypernetParser(HYP_TRIGGER));
|
||||
|
||||
TAC.Ext.PARSERS.push(new HypernetParser(HYP_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
TAC.Ext.QUEUE_SANITIZE.push(sanitize);
|
||||
})();
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -1,81 +1,73 @@
|
||||
(function LoraExtension() {
|
||||
const LORA_REGEX = /<(?!e:|h:|c:)[^,> ]*>?/g;
|
||||
const LORA_TRIGGER = () => TAC.CFG.useLoras && TAC.Globals.tagword.match(LORA_REGEX);
|
||||
const LORA_REGEX = /<(?!e:|h:|c:)[^,> ]*>?/g;
|
||||
const LORA_TRIGGER = () => TAC_CFG.useLoras && tagword.match(LORA_REGEX);
|
||||
|
||||
class LoraParser extends TAC.BaseTagParser {
|
||||
parse() {
|
||||
// Show lora
|
||||
let tempResults = [];
|
||||
if (
|
||||
TAC.Globals.tagword !== "<" &&
|
||||
TAC.Globals.tagword !== "<l:" &&
|
||||
TAC.Globals.tagword !== "<lora:"
|
||||
) {
|
||||
let searchTerm = TAC.Globals.tagword
|
||||
.replace("<lora:", "")
|
||||
.replace("<l:", "")
|
||||
.replace("<", "");
|
||||
let filterCondition = (x) => {
|
||||
let regex = new RegExp(TAC.Utils.escapeRegExp(searchTerm, true), "i");
|
||||
return (
|
||||
regex.test(x.toLowerCase()) ||
|
||||
regex.test(x.toLowerCase().replaceAll(" ", "_"))
|
||||
);
|
||||
};
|
||||
tempResults = TAC.Globals.loras.filter((x) => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = TAC.Globals.loras;
|
||||
}
|
||||
class LoraParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show lora
|
||||
let tempResults = [];
|
||||
let searchTerm = tagword;
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lora:") {
|
||||
searchTerm = tagword.replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
filterCondition = (x) => TacFuzzy.check(x, searchTerm);
|
||||
tempResults = loras.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
searchTerm = null;
|
||||
tempResults = loras;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach((t) => {
|
||||
const text = t[0].trim();
|
||||
let lastDot = text.lastIndexOf(".") > -1 ? text.lastIndexOf(".") : text.length;
|
||||
let lastSlash = text.lastIndexOf("/") > -1 ? text.lastIndexOf("/") : -1;
|
||||
let name = text.substring(lastSlash + 1, lastDot);
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
const text = t[0].trim();
|
||||
let lastDot = text.lastIndexOf(".") > -1 ? text.lastIndexOf(".") : text.length;
|
||||
let lastSlash = text.lastIndexOf("/") > -1 ? text.lastIndexOf("/") : -1;
|
||||
let name = text.substring(lastSlash + 1, lastDot);
|
||||
|
||||
let result = new TAC.AutocompleteResult(name, TAC.ResultType.lora);
|
||||
result.meta = "Lora";
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
let result = new AutocompleteResult(name, ResultType.lora)
|
||||
result.highlightedText = TacFuzzy.manualHighlight(name, searchTerm);
|
||||
result.matchSource = "base";
|
||||
result.meta = "Lora";
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
|
||||
return finalResults;
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (loras.length === 0) {
|
||||
try {
|
||||
loras = (await loadCSV(`${tagBasePath}/temp/lora.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lora.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (TAC.Globals.loras.length === 0) {
|
||||
try {
|
||||
TAC.Globals.loras = (
|
||||
await TAC.Utils.loadCSV(`${TAC.Globals.tagBasePath}/temp/lora.txt`)
|
||||
)
|
||||
.filter((x) => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map((x) => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lora.txt: " + e);
|
||||
}
|
||||
async function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.lora) {
|
||||
let multiplier = TAC_CFG.extraNetworksDefaultMultiplier;
|
||||
let info = await fetchTacAPI(`tacapi/v1/lora-info/${text}`)
|
||||
if (info && info["preferred weight"]) {
|
||||
multiplier = info["preferred weight"];
|
||||
}
|
||||
|
||||
return `<lora:${text}:${multiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async function sanitize(tagType, text) {
|
||||
if (tagType === TAC.ResultType.lora) {
|
||||
let multiplier = TAC.CFG.extraNetworksDefaultMultiplier;
|
||||
let info = await TAC.Utils.fetchAPI(`tacapi/v1/lora-info/${text}`);
|
||||
if (info && info["preferred weight"]) {
|
||||
multiplier = info["preferred weight"];
|
||||
}
|
||||
PARSERS.push(new LoraParser(LORA_TRIGGER));
|
||||
|
||||
return `<lora:${text}:${multiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
TAC.Ext.PARSERS.push(new LoraParser(LORA_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
TAC.Ext.QUEUE_SANITIZE.push(sanitize);
|
||||
})();
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -1,84 +1,68 @@
|
||||
(function LycoExtension() {
|
||||
const LYCO_REGEX = /<(?!e:|h:|c:)[^,> ]*>?/g;
|
||||
const LYCO_TRIGGER = () => TAC.CFG.useLycos && TAC.Globals.tagword.match(LYCO_REGEX);
|
||||
const LYCO_REGEX = /<(?!e:|h:|c:)[^,> ]*>?/g;
|
||||
const LYCO_TRIGGER = () => TAC_CFG.useLycos && tagword.match(LYCO_REGEX);
|
||||
|
||||
class LycoParser extends TAC.BaseTagParser {
|
||||
parse() {
|
||||
// Show lyco
|
||||
let tempResults = [];
|
||||
if (
|
||||
TAC.Globals.tagword !== "<" &&
|
||||
TAC.Globals.tagword !== "<l:" &&
|
||||
TAC.Globals.tagword !== "<lyco:" &&
|
||||
TAC.Globals.tagword !== "<lora:"
|
||||
) {
|
||||
let searchTerm = TAC.Globals.tagword
|
||||
.replace("<lyco:", "")
|
||||
.replace("<lora:", "")
|
||||
.replace("<l:", "")
|
||||
.replace("<", "");
|
||||
let filterCondition = (x) => {
|
||||
let regex = new RegExp(TAC.Utils.escapeRegExp(searchTerm, true), "i");
|
||||
return (
|
||||
regex.test(x.toLowerCase()) ||
|
||||
regex.test(x.toLowerCase().replaceAll(" ", "_"))
|
||||
);
|
||||
};
|
||||
tempResults = TAC.Globals.lycos.filter((x) => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = TAC.Globals.lycos;
|
||||
}
|
||||
class LycoParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show lyco
|
||||
let tempResults = [];
|
||||
if (tagword !== "<" && tagword !== "<l:" && tagword !== "<lyco:" && tagword !== "<lora:") {
|
||||
let searchTerm = tagword.replace("<lyco:", "").replace("<lora:", "").replace("<l:", "").replace("<", "");
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x.toLowerCase()) || regex.test(x.toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = lycos.filter(x => filterCondition(x[0])); // Filter by tagword
|
||||
} else {
|
||||
tempResults = lycos;
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach((t) => {
|
||||
const text = t[0].trim();
|
||||
let lastDot = text.lastIndexOf(".") > -1 ? text.lastIndexOf(".") : text.length;
|
||||
let lastSlash = text.lastIndexOf("/") > -1 ? text.lastIndexOf("/") : -1;
|
||||
let name = text.substring(lastSlash + 1, lastDot);
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
const text = t[0].trim();
|
||||
let lastDot = text.lastIndexOf(".") > -1 ? text.lastIndexOf(".") : text.length;
|
||||
let lastSlash = text.lastIndexOf("/") > -1 ? text.lastIndexOf("/") : -1;
|
||||
let name = text.substring(lastSlash + 1, lastDot);
|
||||
|
||||
let result = new TAC.AutocompleteResult(name, TAC.ResultType.lyco);
|
||||
result.meta = "Lyco";
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
let result = new AutocompleteResult(name, ResultType.lyco)
|
||||
result.meta = "Lyco";
|
||||
result.sortKey = t[1];
|
||||
result.hash = t[2];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (lycos.length === 0) {
|
||||
try {
|
||||
lycos = (await loadCSV(`${tagBasePath}/temp/lyco.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map(x => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lyco.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (TAC.Globals.lycos.length === 0) {
|
||||
try {
|
||||
TAC.Globals.lycos = (
|
||||
await TAC.Utils.loadCSV(`${TAC.Globals.tagBasePath}/temp/lyco.txt`)
|
||||
)
|
||||
.filter((x) => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.map((x) => [x[0]?.trim(), x[1], x[2]]); // Trim filenames and return the name, sortKey, hash pairs
|
||||
} catch (e) {
|
||||
console.error("Error loading lyco.txt: " + e);
|
||||
}
|
||||
async function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.lyco) {
|
||||
let multiplier = TAC_CFG.extraNetworksDefaultMultiplier;
|
||||
let info = await fetchTacAPI(`tacapi/v1/lyco-info/${text}`)
|
||||
if (info && info["preferred weight"]) {
|
||||
multiplier = info["preferred weight"];
|
||||
}
|
||||
|
||||
let prefix = TAC_CFG.useLoraPrefixForLycos ? "lora" : "lyco";
|
||||
return `<${prefix}:${text}:${multiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async function sanitize(tagType, text) {
|
||||
if (tagType === TAC.ResultType.lyco) {
|
||||
let multiplier = TAC.CFG.extraNetworksDefaultMultiplier;
|
||||
let info = await TAC.Utils.fetchAPI(`tacapi/v1/lyco-info/${text}`);
|
||||
if (info && info["preferred weight"]) {
|
||||
multiplier = info["preferred weight"];
|
||||
}
|
||||
PARSERS.push(new LycoParser(LYCO_TRIGGER));
|
||||
|
||||
let prefix = TAC.CFG.useLoraPrefixForLycos ? "lora" : "lyco";
|
||||
return `<${prefix}:${text}:${multiplier}>`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
TAC.Ext.PARSERS.push(new LycoParser(LYCO_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
TAC.Ext.QUEUE_SANITIZE.push(sanitize);
|
||||
})();
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -1,56 +1,42 @@
|
||||
(function ModelKeywordExtension() {
|
||||
async function load() {
|
||||
let modelKeywordParts = (await TAC.Utils.readFile(`tmp/modelKeywordPath.txt`)).split(",");
|
||||
TAC.Globals.modelKeywordPath = modelKeywordParts[0];
|
||||
let customFileExists = modelKeywordParts[1] === "True";
|
||||
async function load() {
|
||||
let modelKeywordParts = (await readFile(`tmp/modelKeywordPath.txt`)).split(",")
|
||||
modelKeywordPath = modelKeywordParts[0];
|
||||
let customFileExists = modelKeywordParts[1] === "True";
|
||||
|
||||
if (TAC.Globals.modelKeywordPath.length > 0 && TAC.Globals.modelKeywordDict.size === 0) {
|
||||
try {
|
||||
let csv_lines = [];
|
||||
// Only add default keywords if wanted by the user
|
||||
if (TAC.CFG.modelKeywordCompletion !== "Only user list")
|
||||
csv_lines = await TAC.Utils.loadCSV(
|
||||
`${TAC.Globals.modelKeywordPath}/lora-keyword.txt`
|
||||
);
|
||||
// Add custom user keywords if the file exists
|
||||
if (customFileExists)
|
||||
csv_lines = csv_lines.concat(
|
||||
await TAC.Utils.loadCSV(
|
||||
`${TAC.Globals.modelKeywordPath}/lora-keyword-user.txt`
|
||||
)
|
||||
);
|
||||
if (modelKeywordPath.length > 0 && modelKeywordDict.size === 0) {
|
||||
try {
|
||||
let csv_lines = [];
|
||||
// Only add default keywords if wanted by the user
|
||||
if (TAC_CFG.modelKeywordCompletion !== "Only user list")
|
||||
csv_lines = (await loadCSV(`${modelKeywordPath}/lora-keyword.txt`));
|
||||
// Add custom user keywords if the file exists
|
||||
if (customFileExists)
|
||||
csv_lines = csv_lines.concat((await loadCSV(`${modelKeywordPath}/lora-keyword-user.txt`)));
|
||||
|
||||
if (csv_lines.length === 0) return;
|
||||
if (csv_lines.length === 0) return;
|
||||
|
||||
csv_lines = csv_lines.filter(
|
||||
(x) => x[0].trim().length > 0 && x[0].trim()[0] !== "#"
|
||||
); // Remove empty lines and comments
|
||||
csv_lines = csv_lines.filter(x => x[0].trim().length > 0 && x[0].trim()[0] !== "#") // Remove empty lines and comments
|
||||
|
||||
// Add to the dict
|
||||
csv_lines.forEach((parts) => {
|
||||
const hash = parts[0];
|
||||
const keywords = parts[1]
|
||||
?.replaceAll("| ", ", ")
|
||||
?.replaceAll("|", ", ")
|
||||
?.trim();
|
||||
const lastSepIndex =
|
||||
parts[2]?.lastIndexOf("/") + 1 || parts[2]?.lastIndexOf("\\") + 1 || 0;
|
||||
const name = parts[2]?.substring(lastSepIndex).trim() || "none";
|
||||
// Add to the dict
|
||||
csv_lines.forEach(parts => {
|
||||
const hash = parts[0];
|
||||
const keywords = parts[1]?.replaceAll("| ", ", ")?.replaceAll("|", ", ")?.trim();
|
||||
const lastSepIndex = parts[2]?.lastIndexOf("/") + 1 || parts[2]?.lastIndexOf("\\") + 1 || 0;
|
||||
const name = parts[2]?.substring(lastSepIndex).trim() || "none"
|
||||
|
||||
if (TAC.Globals.modelKeywordDict.has(hash) && name !== "none") {
|
||||
// Add a new name key if the hash already exists
|
||||
TAC.Globals.modelKeywordDict.get(hash).set(name, keywords);
|
||||
} else {
|
||||
// Create new hash entry
|
||||
let map = new Map().set(name, keywords);
|
||||
TAC.Globals.modelKeywordDict.set(hash, map);
|
||||
}
|
||||
});
|
||||
} catch (e) {
|
||||
console.error("Error loading model-keywords list: " + e);
|
||||
}
|
||||
if (modelKeywordDict.has(hash) && name !== "none") {
|
||||
// Add a new name key if the hash already exists
|
||||
modelKeywordDict.get(hash).set(name, keywords);
|
||||
} else {
|
||||
// Create new hash entry
|
||||
let map = new Map().set(name, keywords);
|
||||
modelKeywordDict.set(hash, map);
|
||||
}
|
||||
});
|
||||
} catch (e) {
|
||||
console.error("Error loading model-keywords list: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
})();
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
@@ -1,77 +1,70 @@
|
||||
(function StyleExtension() {
|
||||
const STYLE_REGEX = /(\$(\d*)\(?)[^$|\[\],\s]*\)?/;
|
||||
const STYLE_TRIGGER = () => TAC.CFG.useStyleVars && TAC.Globals.tagword.match(STYLE_REGEX);
|
||||
const STYLE_REGEX = /(\$(\d*)\(?)[^$|\[\],\s]*\)?/;
|
||||
const STYLE_TRIGGER = () => TAC_CFG.useStyleVars && tagword.match(STYLE_REGEX);
|
||||
|
||||
var lastStyleVarIndex = "";
|
||||
var lastStyleVarIndex = "";
|
||||
|
||||
class StyleParser extends TAC.BaseTagParser {
|
||||
async parse() {
|
||||
// Refresh if needed
|
||||
await TAC.Utils.refreshStyleNamesIfChanged();
|
||||
class StyleParser extends BaseTagParser {
|
||||
async parse() {
|
||||
// Refresh if needed
|
||||
await refreshStyleNamesIfChanged();
|
||||
|
||||
// Show styles
|
||||
let tempResults = [];
|
||||
let matchGroups = TAC.Globals.tagword.match(STYLE_REGEX);
|
||||
// Show styles
|
||||
let tempResults = [];
|
||||
let matchGroups = tagword.match(STYLE_REGEX);
|
||||
|
||||
// Save index to insert again later or clear last one
|
||||
lastStyleVarIndex = matchGroups[2] ? matchGroups[2] : "";
|
||||
|
||||
// Save index to insert again later or clear last one
|
||||
lastStyleVarIndex = matchGroups[2] ? matchGroups[2] : "";
|
||||
if (tagword !== matchGroups[1]) {
|
||||
let searchTerm = tagword.replace(matchGroups[1], "");
|
||||
|
||||
let filterCondition = x => {
|
||||
let regex = new RegExp(escapeRegExp(searchTerm, true), 'i');
|
||||
return regex.test(x[0].toLowerCase()) || regex.test(x[0].toLowerCase().replaceAll(" ", "_"));
|
||||
};
|
||||
tempResults = styleNames.filter(x => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = styleNames;
|
||||
}
|
||||
|
||||
if (TAC.Globals.tagword !== matchGroups[1]) {
|
||||
let searchTerm = TAC.Globals.tagword.replace(matchGroups[1], "");
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.styleName)
|
||||
result.meta = "Style";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
let filterCondition = (x) => {
|
||||
let regex = new RegExp(TAC.Utils.escapeRegExp(searchTerm, true), "i");
|
||||
return (
|
||||
regex.test(x[0].toLowerCase()) ||
|
||||
regex.test(x[0].toLowerCase().replaceAll(" ", "_"))
|
||||
);
|
||||
};
|
||||
tempResults = TAC.Globals.styleNames.filter((x) => filterCondition(x)); // Filter by tagword
|
||||
} else {
|
||||
tempResults = TAC.Globals.styleNames;
|
||||
}
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach((t) => {
|
||||
let result = new TAC.AutocompleteResult(t[0].trim(), TAC.ResultType.styleName);
|
||||
result.meta = "Style";
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
async function load(force = false) {
|
||||
if (styleNames.length === 0 || force) {
|
||||
try {
|
||||
styleNames = (await loadCSV(`${tagBasePath}/temp/styles.txt`))
|
||||
.filter(x => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.filter(x => x[0] !== "None") // Remove "None" style
|
||||
.map(x => [x[0].trim()]); // Trim name
|
||||
} catch (e) {
|
||||
console.error("Error loading styles.txt: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function load(force = false) {
|
||||
if (TAC.Globals.styleNames.length === 0 || force) {
|
||||
try {
|
||||
TAC.Globals.styleNames = (
|
||||
await TAC.Utils.loadCSV(`${TAC.Globals.tagBasePath}/temp/styles.txt`)
|
||||
)
|
||||
.filter((x) => x[0]?.trim().length > 0) // Remove empty lines
|
||||
.filter((x) => x[0] !== "None") // Remove "None" style
|
||||
.map((x) => [x[0].trim()]); // Trim name
|
||||
} catch (e) {
|
||||
console.error("Error loading styles.txt: " + e);
|
||||
}
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.styleName) {
|
||||
if (text.includes(" ")) {
|
||||
return `$${lastStyleVarIndex}(${text})`;
|
||||
} else {
|
||||
return`$${lastStyleVarIndex}${text}`
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === TAC.ResultType.styleName) {
|
||||
if (text.includes(" ")) {
|
||||
return `$${lastStyleVarIndex}(${text})`;
|
||||
} else {
|
||||
return `$${lastStyleVarIndex}${text}`;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
PARSERS.push(new StyleParser(STYLE_TRIGGER));
|
||||
|
||||
TAC.Ext.PARSERS.push(new StyleParser(STYLE_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
TAC.Ext.QUEUE_SANITIZE.push(sanitize);
|
||||
})();
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
|
||||
@@ -1,279 +1,245 @@
|
||||
(function UmiExtension() {
|
||||
const UMI_PROMPT_REGEX = /<[^\s]*?\[[^,<>]*[\]|]?>?/gi;
|
||||
const UMI_TAG_REGEX = /(?:\[|\||--)([^<>\[\]\-|]+)/gi;
|
||||
const UMI_PROMPT_REGEX = /<[^\s]*?\[[^,<>]*[\]|]?>?/gi;
|
||||
const UMI_TAG_REGEX = /(?:\[|\||--)([^<>\[\]\-|]+)/gi;
|
||||
|
||||
const UMI_TRIGGER = () =>
|
||||
TAC.CFG.useWildcards && [...TAC.Globals.tagword.matchAll(UMI_PROMPT_REGEX)].length > 0;
|
||||
const UMI_TRIGGER = () => TAC_CFG.useWildcards && [...tagword.matchAll(UMI_PROMPT_REGEX)].length > 0;
|
||||
|
||||
class UmiParser extends TAC.BaseTagParser {
|
||||
parse(textArea, prompt) {
|
||||
// We are in a UMI yaml tag definition, parse further
|
||||
let umiSubPrompts = [...prompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
class UmiParser extends BaseTagParser {
|
||||
parse(textArea, prompt) {
|
||||
// We are in a UMI yaml tag definition, parse further
|
||||
let umiSubPrompts = [...prompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
let umiTags = [];
|
||||
let umiTagsWithOperators = [];
|
||||
let umiTags = [];
|
||||
let umiTagsWithOperators = []
|
||||
|
||||
const insertAt = (str, char, pos) => str.slice(0, pos) + char + str.slice(pos);
|
||||
const insertAt = (str,char,pos) => str.slice(0,pos) + char + str.slice(pos);
|
||||
|
||||
umiSubPrompts.forEach((umiSubPrompt) => {
|
||||
umiTags = umiTags.concat(
|
||||
[...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map((x) => x[1].toLowerCase())
|
||||
);
|
||||
umiSubPrompts.forEach(umiSubPrompt => {
|
||||
umiTags = umiTags.concat([...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map(x => x[1].toLowerCase()));
|
||||
|
||||
const start = umiSubPrompt.index;
|
||||
const end = umiSubPrompt.index + umiSubPrompt[0].length;
|
||||
if (textArea.selectionStart >= start && textArea.selectionStart <= end) {
|
||||
umiTagsWithOperators = insertAt(
|
||||
umiSubPrompt[0],
|
||||
"###",
|
||||
textArea.selectionStart - start
|
||||
const start = umiSubPrompt.index;
|
||||
const end = umiSubPrompt.index + umiSubPrompt[0].length;
|
||||
if (textArea.selectionStart >= start && textArea.selectionStart <= end) {
|
||||
umiTagsWithOperators = insertAt(umiSubPrompt[0], '###', textArea.selectionStart - start);
|
||||
}
|
||||
});
|
||||
|
||||
// Safety check since UMI parsing sometimes seems to trigger outside of an UMI subprompt and thus fails
|
||||
if (umiTagsWithOperators.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const promptSplitToTags = umiTagsWithOperators.replace(']###[', '][').split("][");
|
||||
|
||||
const clean = (str) => str
|
||||
.replaceAll('>', '')
|
||||
.replaceAll('<', '')
|
||||
.replaceAll('[', '')
|
||||
.replaceAll(']', '')
|
||||
.trim();
|
||||
|
||||
const matches = promptSplitToTags.reduce((acc, curr) => {
|
||||
let isOptional = curr.includes("|");
|
||||
let isNegative = curr.startsWith("--");
|
||||
let out;
|
||||
if (isOptional) {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).split('|').map(x => ({
|
||||
hasCursor: x.includes("###"),
|
||||
isNegative: x.startsWith("--"),
|
||||
tag: clean(x).replaceAll("###", '').replaceAll("--", '')
|
||||
}))
|
||||
};
|
||||
acc.optional.push(out);
|
||||
acc.all.push(...out.tags.map(x => x.tag));
|
||||
} else if (isNegative) {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).replaceAll("###", '').split('|'),
|
||||
};
|
||||
out.tags = out.tags.map(x => x.startsWith("--") ? x.substring(2) : x);
|
||||
acc.negative.push(out);
|
||||
acc.all.push(...out.tags);
|
||||
} else {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).replaceAll("###", '').split('|'),
|
||||
};
|
||||
acc.positive.push(out);
|
||||
acc.all.push(...out.tags);
|
||||
}
|
||||
return acc;
|
||||
}, { positive: [], negative: [], optional: [], all: [] });
|
||||
|
||||
//console.log({ matches })
|
||||
|
||||
const filteredWildcards = (tagword) => {
|
||||
const wildcards = umiWildcards.filter(x => {
|
||||
let tags = x[1];
|
||||
const matchesNeg =
|
||||
matches.negative.length === 0
|
||||
|| matches.negative.every(x =>
|
||||
x.hasCursor
|
||||
|| x.tags.every(t => !tags[t])
|
||||
);
|
||||
}
|
||||
});
|
||||
if (!matchesNeg) return false;
|
||||
const matchesPos =
|
||||
matches.positive.length === 0
|
||||
|| matches.positive.every(x =>
|
||||
x.hasCursor
|
||||
|| x.tags.every(t => tags[t])
|
||||
);
|
||||
if (!matchesPos) return false;
|
||||
const matchesOpt =
|
||||
matches.optional.length === 0
|
||||
|| matches.optional.some(x =>
|
||||
x.tags.some(t =>
|
||||
t.hasCursor
|
||||
|| t.isNegative
|
||||
? !tags[t.tag]
|
||||
: tags[t.tag]
|
||||
));
|
||||
if (!matchesOpt) return false;
|
||||
return true;
|
||||
}).reduce((acc, val) => {
|
||||
Object.keys(val[1]).forEach(tag => acc[tag] = acc[tag] + 1 || 1);
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
// Safety check since UMI parsing sometimes seems to trigger outside of an UMI subprompt and thus fails
|
||||
if (umiTagsWithOperators.length === 0) {
|
||||
return null;
|
||||
return Object.entries(wildcards)
|
||||
.sort((a, b) => b[1] - a[1])
|
||||
.filter(x =>
|
||||
x[0] === tagword
|
||||
|| !matches.all.includes(x[0])
|
||||
);
|
||||
}
|
||||
|
||||
if (umiTags.length > 0) {
|
||||
// Get difference for subprompt
|
||||
let tagCountChange = umiTags.length - umiPreviousTags.length;
|
||||
let diff = difference(umiTags, umiPreviousTags);
|
||||
umiPreviousTags = umiTags;
|
||||
|
||||
// Show all condition
|
||||
let showAll = tagword.endsWith("[") || tagword.endsWith("[--") || tagword.endsWith("|");
|
||||
|
||||
// Exit early if the user closed the bracket manually
|
||||
if ((!diff || diff.length === 0 || (diff.length === 1 && tagCountChange < 0)) && !showAll) {
|
||||
if (!hideBlocked) hideResults(textArea);
|
||||
return;
|
||||
}
|
||||
|
||||
const promptSplitToTags = umiTagsWithOperators.replace("]###[", "][").split("][");
|
||||
|
||||
const clean = (str) =>
|
||||
str
|
||||
.replaceAll(">", "")
|
||||
.replaceAll("<", "")
|
||||
.replaceAll("[", "")
|
||||
.replaceAll("]", "")
|
||||
.trim();
|
||||
|
||||
const matches = promptSplitToTags.reduce(
|
||||
(acc, curr) => {
|
||||
let isOptional = curr.includes("|");
|
||||
let isNegative = curr.startsWith("--");
|
||||
let out;
|
||||
if (isOptional) {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr)
|
||||
.split("|")
|
||||
.map((x) => ({
|
||||
hasCursor: x.includes("###"),
|
||||
isNegative: x.startsWith("--"),
|
||||
tag: clean(x).replaceAll("###", "").replaceAll("--", ""),
|
||||
})),
|
||||
};
|
||||
acc.optional.push(out);
|
||||
acc.all.push(...out.tags.map((x) => x.tag));
|
||||
} else if (isNegative) {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).replaceAll("###", "").split("|"),
|
||||
};
|
||||
out.tags = out.tags.map((x) => (x.startsWith("--") ? x.substring(2) : x));
|
||||
acc.negative.push(out);
|
||||
acc.all.push(...out.tags);
|
||||
} else {
|
||||
out = {
|
||||
hasCursor: curr.includes("###"),
|
||||
tags: clean(curr).replaceAll("###", "").split("|"),
|
||||
};
|
||||
acc.positive.push(out);
|
||||
acc.all.push(...out.tags);
|
||||
}
|
||||
return acc;
|
||||
},
|
||||
{ positive: [], negative: [], optional: [], all: [] }
|
||||
);
|
||||
|
||||
//console.log({ matches })
|
||||
|
||||
const filteredWildcards = (tagword) => {
|
||||
const wildcards = TAC.Globals.umiWildcards
|
||||
.filter((x) => {
|
||||
let tags = x[1];
|
||||
const matchesNeg =
|
||||
matches.negative.length === 0 ||
|
||||
matches.negative.every(
|
||||
(x) => x.hasCursor || x.tags.every((t) => !tags[t])
|
||||
);
|
||||
if (!matchesNeg) return false;
|
||||
const matchesPos =
|
||||
matches.positive.length === 0 ||
|
||||
matches.positive.every(
|
||||
(x) => x.hasCursor || x.tags.every((t) => tags[t])
|
||||
);
|
||||
if (!matchesPos) return false;
|
||||
const matchesOpt =
|
||||
matches.optional.length === 0 ||
|
||||
matches.optional.some((x) =>
|
||||
x.tags.some((t) =>
|
||||
t.hasCursor || t.isNegative ? !tags[t.tag] : tags[t.tag]
|
||||
)
|
||||
);
|
||||
if (!matchesOpt) return false;
|
||||
return true;
|
||||
})
|
||||
.reduce((acc, val) => {
|
||||
Object.keys(val[1]).forEach((tag) => (acc[tag] = acc[tag] + 1 || 1));
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return Object.entries(wildcards)
|
||||
.sort((a, b) => b[1] - a[1])
|
||||
.filter((x) => x[0] === tagword || !matches.all.includes(x[0]));
|
||||
};
|
||||
|
||||
if (umiTags.length > 0) {
|
||||
// Get difference for subprompt
|
||||
let tagCountChange = umiTags.length - TAC.Globals.umiPreviousTags.length;
|
||||
let diff = TAC.Utils.difference(umiTags, TAC.Globals.umiPreviousTags);
|
||||
TAC.Globals.umiPreviousTags = umiTags;
|
||||
|
||||
// Show all condition
|
||||
let showAll =
|
||||
TAC.Globals.tagword.endsWith("[") ||
|
||||
TAC.Globals.tagword.endsWith("[--") ||
|
||||
TAC.Globals.tagword.endsWith("|");
|
||||
|
||||
// Exit early if the user closed the bracket manually
|
||||
if (
|
||||
(!diff || diff.length === 0 || (diff.length === 1 && tagCountChange < 0)) &&
|
||||
!showAll
|
||||
) {
|
||||
if (!TAC.Globals.hideBlocked) hideResults(textArea);
|
||||
return;
|
||||
}
|
||||
|
||||
let umiTagword = tagCountChange < 0 ? "" : diff[0] || "";
|
||||
let tempResults = [];
|
||||
if (umiTagword && umiTagword.length > 0) {
|
||||
umiTagword = umiTagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
TAC.Globals.originalTagword = TAC.Globals.tagword;
|
||||
TAC.Globals.tagword = umiTagword;
|
||||
let filteredWildcardsSorted = filteredWildcards(umiTagword);
|
||||
let searchRegex = new RegExp(
|
||||
`(^|[^a-zA-Z])${TAC.Utils.escapeRegExp(umiTagword)}`,
|
||||
"i"
|
||||
);
|
||||
let baseFilter = (x) => x[0].toLowerCase().search(searchRegex) > -1;
|
||||
let spaceIncludeFilter = (x) =>
|
||||
x[0].toLowerCase().replaceAll(" ", "_").search(searchRegex) > -1;
|
||||
tempResults = filteredWildcardsSorted.filter(
|
||||
(x) => baseFilter(x) || spaceIncludeFilter(x)
|
||||
); // Filter by tagword
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
tempResults.forEach((t) => {
|
||||
let result = new TAC.AutocompleteResult(
|
||||
t[0].trim(),
|
||||
TAC.ResultType.umiWildcard
|
||||
);
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
} else if (showAll) {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
filteredWildcardsSorted.forEach((t) => {
|
||||
let result = new TAC.AutocompleteResult(
|
||||
t[0].trim(),
|
||||
TAC.ResultType.umiWildcard
|
||||
);
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
TAC.Globals.originalTagword = TAC.Globals.tagword;
|
||||
TAC.Globals.tagword = "";
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
} else {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
let umiTagword = tagCountChange < 0 ? '' : diff[0] || '';
|
||||
let tempResults = [];
|
||||
if (umiTagword && umiTagword.length > 0) {
|
||||
umiTagword = umiTagword.toLowerCase().replace(/[\n\r]/g, "");
|
||||
originalTagword = tagword;
|
||||
tagword = umiTagword;
|
||||
let filteredWildcardsSorted = filteredWildcards(umiTagword);
|
||||
let searchRegex = new RegExp(`(^|[^a-zA-Z])${escapeRegExp(umiTagword)}`, 'i')
|
||||
let baseFilter = x => x[0].toLowerCase().search(searchRegex) > -1;
|
||||
let spaceIncludeFilter = x => x[0].toLowerCase().replaceAll(" ", "_").search(searchRegex) > -1;
|
||||
tempResults = filteredWildcardsSorted.filter(x => baseFilter(x) || spaceIncludeFilter(x)) // Filter by tagword
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
filteredWildcardsSorted.forEach((t) => {
|
||||
let result = new TAC.AutocompleteResult(
|
||||
t[0].trim(),
|
||||
TAC.ResultType.umiWildcard
|
||||
);
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.umiWildcard)
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
TAC.Globals.originalTagword = TAC.Globals.tagword;
|
||||
TAC.Globals.tagword = "";
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
} else if (showAll) {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
filteredWildcardsSorted.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.umiWildcard)
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let filteredWildcardsSorted = filteredWildcards("");
|
||||
|
||||
function updateUmiTags(tagType, sanitizedText, newPrompt, textArea) {
|
||||
// If it was a umi wildcard, also update the TAC.Globals.umiPreviousTags
|
||||
if (tagType === TAC.ResultType.umiWildcard && TAC.Globals.originalTagword.length > 0) {
|
||||
let umiSubPrompts = [...newPrompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
let umiTags = [];
|
||||
umiSubPrompts.forEach((umiSubPrompt) => {
|
||||
umiTags = umiTags.concat(
|
||||
[...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map((x) => x[1].toLowerCase())
|
||||
);
|
||||
// Add final results
|
||||
let finalResults = [];
|
||||
filteredWildcardsSorted.forEach(t => {
|
||||
let result = new AutocompleteResult(t[0].trim(), ResultType.umiWildcard)
|
||||
result.count = t[1];
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
TAC.Globals.umiPreviousTags = umiTags;
|
||||
originalTagword = tagword;
|
||||
tagword = "";
|
||||
|
||||
hideResults(textArea);
|
||||
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (TAC.Globals.umiWildcards.length === 0) {
|
||||
try {
|
||||
let umiTags = (
|
||||
await TAC.Utils.readFile(`${TAC.Globals.tagBasePath}/temp/umi_tags.txt`)
|
||||
).split("\n");
|
||||
// Split into tag, count pairs
|
||||
TAC.Globals.umiWildcards = umiTags
|
||||
.map((x) => x.trim().split(","))
|
||||
.map(([i, ...rest]) => [
|
||||
i,
|
||||
rest.reduce((a, b) => {
|
||||
a[b.toLowerCase()] = true;
|
||||
return a;
|
||||
}, {}),
|
||||
]);
|
||||
} catch (e) {
|
||||
console.error("Error loading umi wildcards: " + e);
|
||||
}
|
||||
finalResults = finalResults.sort((a, b) => b.count - a.count);
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
// Replace underscores only if the umi tag is not using them
|
||||
if (tagType === TAC.ResultType.umiWildcard && !TAC.Globals.umiWildcards.includes(text)) {
|
||||
return text.replaceAll("_", " ");
|
||||
}
|
||||
return null;
|
||||
function updateUmiTags(tagType, sanitizedText, newPrompt, textArea) {
|
||||
// If it was a umi wildcard, also update the umiPreviousTags
|
||||
if (tagType === ResultType.umiWildcard && originalTagword.length > 0) {
|
||||
let umiSubPrompts = [...newPrompt.matchAll(UMI_PROMPT_REGEX)];
|
||||
|
||||
let umiTags = [];
|
||||
umiSubPrompts.forEach(umiSubPrompt => {
|
||||
umiTags = umiTags.concat([...umiSubPrompt[0].matchAll(UMI_TAG_REGEX)].map(x => x[1].toLowerCase()));
|
||||
});
|
||||
|
||||
umiPreviousTags = umiTags;
|
||||
|
||||
hideResults(textArea);
|
||||
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Add UMI parser
|
||||
TAC.Ext.PARSERS.push(new UmiParser(UMI_TRIGGER));
|
||||
async function load() {
|
||||
if (umiWildcards.length === 0) {
|
||||
try {
|
||||
let umiTags = (await readFile(`${tagBasePath}/temp/umi_tags.txt`)).split("\n");
|
||||
// Split into tag, count pairs
|
||||
umiWildcards = umiTags.map(x => x
|
||||
.trim()
|
||||
.split(","))
|
||||
.map(([i, ...rest]) => [
|
||||
i,
|
||||
rest.reduce((a, b) => {
|
||||
a[b.toLowerCase()] = true;
|
||||
return a;
|
||||
}, {}),
|
||||
]);
|
||||
} catch (e) {
|
||||
console.error("Error loading umi wildcards: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
TAC.Ext.QUEUE_SANITIZE.push(sanitize);
|
||||
TAC.Ext.QUEUE_AFTER_INSERT.push(updateUmiTags);
|
||||
})();
|
||||
function sanitize(tagType, text) {
|
||||
// Replace underscores only if the umi tag is not using them
|
||||
if (tagType === ResultType.umiWildcard && !umiWildcards.includes(text)) {
|
||||
return text.replaceAll("_", " ");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Add UMI parser
|
||||
PARSERS.push(new UmiParser(UMI_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_AFTER_INSERT.push(updateUmiTags);
|
||||
@@ -1,232 +1,178 @@
|
||||
(function WildcardExtension() {
|
||||
// Regex
|
||||
const WC_REGEX = new RegExp(/__([^,]+)__([^, ]*)/g);
|
||||
// Regex
|
||||
const WC_REGEX = new RegExp(/__([^,]+)__([^, ]*)/g);
|
||||
|
||||
// Trigger conditions
|
||||
const WC_TRIGGER = () =>
|
||||
TAC.CFG.useWildcards &&
|
||||
[
|
||||
...TAC.Globals.tagword.matchAll(
|
||||
new RegExp(
|
||||
WC_REGEX.source.replaceAll("__", TAC.Utils.escapeRegExp(TAC.CFG.wcWrap)),
|
||||
"g"
|
||||
)
|
||||
),
|
||||
].length > 0;
|
||||
const WC_FILE_TRIGGER = () =>
|
||||
TAC.CFG.useWildcards &&
|
||||
((TAC.Globals.tagword.startsWith(TAC.CFG.wcWrap) &&
|
||||
!TAC.Globals.tagword.endsWith(TAC.CFG.wcWrap)) ||
|
||||
TAC.Globals.tagword === TAC.CFG.wcWrap);
|
||||
// Trigger conditions
|
||||
const WC_TRIGGER = () => TAC_CFG.useWildcards && [...tagword.matchAll(new RegExp(WC_REGEX.source.replaceAll("__", escapeRegExp(TAC_CFG.wcWrap)), "g"))].length > 0;
|
||||
const WC_FILE_TRIGGER = () => TAC_CFG.useWildcards && (tagword.startsWith(TAC_CFG.wcWrap) && !tagword.endsWith(TAC_CFG.wcWrap) || tagword === TAC_CFG.wcWrap);
|
||||
|
||||
class WildcardParser extends TAC.BaseTagParser {
|
||||
async parse() {
|
||||
// Show wildcards from a file with that name
|
||||
let wcMatch = [
|
||||
...TAC.Globals.tagword.matchAll(
|
||||
new RegExp(
|
||||
WC_REGEX.source.replaceAll("__", TAC.Utils.escapeRegExp(TAC.CFG.wcWrap)),
|
||||
"g"
|
||||
)
|
||||
),
|
||||
];
|
||||
let wcFile = wcMatch[0][1];
|
||||
let wcWord = wcMatch[0][2];
|
||||
class WildcardParser extends BaseTagParser {
|
||||
async parse() {
|
||||
// Show wildcards from a file with that name
|
||||
let wcMatch = [...tagword.matchAll(new RegExp(WC_REGEX.source.replaceAll("__", escapeRegExp(TAC_CFG.wcWrap)), "g"))];
|
||||
let wcFile = wcMatch[0][1];
|
||||
let wcWord = wcMatch[0][2];
|
||||
|
||||
// Look in normal wildcard files
|
||||
let wcFound = TAC.Globals.wildcardFiles.filter((x) => x[1].toLowerCase() === wcFile);
|
||||
if (wcFound.length === 0) wcFound = null;
|
||||
// Use found wildcard file or look in external wildcard files
|
||||
let wcPairs =
|
||||
wcFound ||
|
||||
TAC.Globals.wildcardExtFiles.filter((x) => x[1].toLowerCase() === wcFile);
|
||||
// Look in normal wildcard files
|
||||
let wcFound = wildcardFiles.filter(x => x[1].toLowerCase() === wcFile);
|
||||
if (wcFound.length === 0) wcFound = null;
|
||||
// Use found wildcard file or look in external wildcard files
|
||||
let wcPairs = wcFound || wildcardExtFiles.filter(x => x[1].toLowerCase() === wcFile);
|
||||
|
||||
if (!wcPairs) return [];
|
||||
if (!wcPairs) return [];
|
||||
|
||||
let wildcards = [];
|
||||
for (let i = 0; i < wcPairs.length; i++) {
|
||||
const basePath = wcPairs[i][0];
|
||||
const fileName = wcPairs[i][1];
|
||||
if (!basePath || !fileName) return;
|
||||
let wildcards = [];
|
||||
for (let i = 0; i < wcPairs.length; i++) {
|
||||
const basePath = wcPairs[i][0];
|
||||
const fileName = wcPairs[i][1];
|
||||
if (!basePath || !fileName) return;
|
||||
|
||||
// YAML wildcards are already loaded as json, so we can get the values directly.
|
||||
// basePath is the name of the file in this case, and fileName the key
|
||||
if (basePath.endsWith(".yaml")) {
|
||||
const getDescendantProp = (obj, desc) => {
|
||||
const arr = desc.split("/");
|
||||
while (arr.length) {
|
||||
obj = obj[arr.shift()];
|
||||
}
|
||||
return obj;
|
||||
};
|
||||
wildcards = wildcards.concat(
|
||||
getDescendantProp(TAC.Globals.yamlWildcards[basePath], fileName)
|
||||
);
|
||||
} else {
|
||||
const fileContent = (
|
||||
await TAC.Utils.fetchAPI(
|
||||
`tacapi/v1/wildcard-contents?basepath=${basePath}&filename=${fileName}.txt`,
|
||||
false
|
||||
)
|
||||
)
|
||||
.split("\n")
|
||||
.filter((x) => x.trim().length > 0 && !x.startsWith("#")); // Remove empty lines and comments
|
||||
wildcards = wildcards.concat(fileContent);
|
||||
// YAML wildcards are already loaded as json, so we can get the values directly.
|
||||
// basePath is the name of the file in this case, and fileName the key
|
||||
if (basePath.endsWith(".yaml")) {
|
||||
const getDescendantProp = (obj, desc) => {
|
||||
const arr = desc.split("/");
|
||||
while (arr.length) {
|
||||
obj = obj[arr.shift()];
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
if (TAC.CFG.sortWildcardResults) wildcards.sort((a, b) => a.localeCompare(b));
|
||||
|
||||
let finalResults = [];
|
||||
let tempResults = wildcards.filter((x) =>
|
||||
wcWord !== null && wcWord.length > 0 ? x.toLowerCase().includes(wcWord) : x
|
||||
); // Filter by tagword
|
||||
tempResults.forEach((t) => {
|
||||
let result = new TAC.AutocompleteResult(t.trim(), TAC.ResultType.wildcardTag);
|
||||
result.meta = wcFile;
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
class WildcardFileParser extends TAC.BaseTagParser {
|
||||
parse() {
|
||||
// Show available wildcard files
|
||||
let tempResults = [];
|
||||
if (TAC.Globals.tagword !== TAC.CFG.wcWrap) {
|
||||
let lmb = (x) =>
|
||||
x[1].toLowerCase().includes(TAC.Globals.tagword.replace(TAC.CFG.wcWrap, ""));
|
||||
tempResults = TAC.Globals.wildcardFiles
|
||||
.filter(lmb)
|
||||
.concat(TAC.Globals.wildcardExtFiles.filter(lmb)); // Filter by tagword
|
||||
wildcards = wildcards.concat(getDescendantProp(yamlWildcards[basePath], fileName));
|
||||
} else {
|
||||
tempResults = TAC.Globals.wildcardFiles.concat(TAC.Globals.wildcardExtFiles);
|
||||
const fileContent = (await fetchTacAPI(`tacapi/v1/wildcard-contents?basepath=${basePath}&filename=${fileName}.txt`, false))
|
||||
.split("\n")
|
||||
.filter(x => x.trim().length > 0 && !x.startsWith('#')); // Remove empty lines and comments
|
||||
wildcards = wildcards.concat(fileContent);
|
||||
}
|
||||
}
|
||||
|
||||
if (TAC_CFG.sortWildcardResults)
|
||||
wildcards.sort((a, b) => a.localeCompare(b));
|
||||
|
||||
let finalResults = [];
|
||||
let tempResults = wildcards.filter(x => (wcWord !== null && wcWord.length > 0) ? x.toLowerCase().includes(wcWord) : x) // Filter by tagword
|
||||
tempResults.forEach(t => {
|
||||
let result = new AutocompleteResult(t.trim(), ResultType.wildcardTag);
|
||||
result.meta = wcFile;
|
||||
finalResults.push(result);
|
||||
});
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
class WildcardFileParser extends BaseTagParser {
|
||||
parse() {
|
||||
// Show available wildcard files
|
||||
let tempResults = [];
|
||||
if (tagword !== TAC_CFG.wcWrap) {
|
||||
let lmb = (x) => x[1].toLowerCase().includes(tagword.replace(TAC_CFG.wcWrap, ""))
|
||||
tempResults = wildcardFiles.filter(lmb).concat(wildcardExtFiles.filter(lmb)) // Filter by tagword
|
||||
} else {
|
||||
tempResults = wildcardFiles.concat(wildcardExtFiles);
|
||||
}
|
||||
|
||||
let finalResults = [];
|
||||
const alreadyAdded = new Map();
|
||||
// Get final results
|
||||
tempResults.forEach(wcFile => {
|
||||
// Skip duplicate entries incase multiple files have the same name or yaml category
|
||||
if (alreadyAdded.has(wcFile[1])) return;
|
||||
|
||||
let result = null;
|
||||
if (wcFile[0].endsWith(".yaml")) {
|
||||
result = new AutocompleteResult(wcFile[1].trim(), ResultType.yamlWildcard);
|
||||
result.meta = "YAML wildcard collection";
|
||||
} else {
|
||||
result = new AutocompleteResult(wcFile[1].trim(), ResultType.wildcardFile);
|
||||
result.meta = "Wildcard file";
|
||||
result.sortKey = wcFile[2].trim();
|
||||
}
|
||||
|
||||
let finalResults = [];
|
||||
const alreadyAdded = new Map();
|
||||
// Get final results
|
||||
tempResults.forEach((wcFile) => {
|
||||
// Skip duplicate entries incase multiple files have the same name or yaml category
|
||||
if (alreadyAdded.has(wcFile[1])) return;
|
||||
finalResults.push(result);
|
||||
alreadyAdded.set(wcFile[1], true);
|
||||
});
|
||||
|
||||
let result = null;
|
||||
if (wcFile[0].endsWith(".yaml")) {
|
||||
result = new TAC.AutocompleteResult(
|
||||
wcFile[1].trim(),
|
||||
TAC.ResultType.yamlWildcard
|
||||
);
|
||||
result.meta = "YAML wildcard collection";
|
||||
} else {
|
||||
result = new TAC.AutocompleteResult(
|
||||
wcFile[1].trim(),
|
||||
TAC.ResultType.wildcardFile
|
||||
);
|
||||
result.meta = "Wildcard file";
|
||||
result.sortKey = wcFile[2].trim();
|
||||
}
|
||||
finalResults.sort(getSortFunction());
|
||||
|
||||
finalResults.push(result);
|
||||
alreadyAdded.set(wcFile[1], true);
|
||||
});
|
||||
|
||||
finalResults.sort(TAC.Utils.getSortFunction());
|
||||
|
||||
return finalResults;
|
||||
}
|
||||
return finalResults;
|
||||
}
|
||||
}
|
||||
|
||||
async function load() {
|
||||
if (TAC.Globals.wildcardFiles.length === 0 && TAC.Globals.wildcardExtFiles.length === 0) {
|
||||
try {
|
||||
let wcFileArr = await TAC.Utils.loadCSV(`${TAC.Globals.tagBasePath}/temp/wc.txt`);
|
||||
if (wcFileArr && wcFileArr.length > 0) {
|
||||
let wcBasePath = wcFileArr[0][0].trim(); // First line should be the base path
|
||||
TAC.Globals.wildcardFiles = wcFileArr
|
||||
.slice(1)
|
||||
.filter((x) => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map((x) => [wcBasePath, x[0]?.trim().replace(".txt", ""), x[1]]); // Remove file extension & newlines
|
||||
async function load() {
|
||||
if (wildcardFiles.length === 0 && wildcardExtFiles.length === 0) {
|
||||
try {
|
||||
let wcFileArr = await loadCSV(`${tagBasePath}/temp/wc.txt`);
|
||||
if (wcFileArr && wcFileArr.length > 0) {
|
||||
let wcBasePath = wcFileArr[0][0].trim(); // First line should be the base path
|
||||
wildcardFiles = wcFileArr.slice(1)
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [wcBasePath, x[0]?.trim().replace(".txt", ""), x[1]]); // Remove file extension & newlines
|
||||
}
|
||||
|
||||
// To support multiple sources, we need to separate them using the provided "-----" strings
|
||||
let wcExtFileArr = await loadCSV(`${tagBasePath}/temp/wce.txt`);
|
||||
let splitIndices = [];
|
||||
for (let index = 0; index < wcExtFileArr.length; index++) {
|
||||
if (wcExtFileArr[index][0].trim() === "-----") {
|
||||
splitIndices.push(index);
|
||||
}
|
||||
}
|
||||
// For each group, add them to the wildcardFiles array with the base path as the first element
|
||||
for (let i = 0; i < splitIndices.length; i++) {
|
||||
let start = splitIndices[i - 1] || 0;
|
||||
if (i > 0) start++; // Skip the "-----" line
|
||||
let end = splitIndices[i];
|
||||
|
||||
// To support multiple sources, we need to separate them using the provided "-----" strings
|
||||
let wcExtFileArr = await TAC.Utils.loadCSV(
|
||||
`${TAC.Globals.tagBasePath}/temp/wce.txt`
|
||||
);
|
||||
let splitIndices = [];
|
||||
for (let index = 0; index < wcExtFileArr.length; index++) {
|
||||
if (wcExtFileArr[index][0].trim() === "-----") {
|
||||
splitIndices.push(index);
|
||||
}
|
||||
let wcExtFile = wcExtFileArr.slice(start, end);
|
||||
if (wcExtFile && wcExtFile.length > 0) {
|
||||
let base = wcExtFile[0][0].trim() + "/";
|
||||
wcExtFile = wcExtFile.slice(1)
|
||||
.filter(x => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map(x => [base, x[0]?.trim().replace(base, "").replace(".txt", ""), x[1]]);
|
||||
wildcardExtFiles.push(...wcExtFile);
|
||||
}
|
||||
// For each group, add them to the wildcardFiles array with the base path as the first element
|
||||
for (let i = 0; i < splitIndices.length; i++) {
|
||||
let start = splitIndices[i - 1] || 0;
|
||||
if (i > 0) start++; // Skip the "-----" line
|
||||
let end = splitIndices[i];
|
||||
}
|
||||
|
||||
let wcExtFile = wcExtFileArr.slice(start, end);
|
||||
if (wcExtFile && wcExtFile.length > 0) {
|
||||
let base = wcExtFile[0][0].trim() + "/";
|
||||
wcExtFile = wcExtFile
|
||||
.slice(1)
|
||||
.filter((x) => x[0]?.trim().length > 0) //Remove empty lines
|
||||
.map((x) => [
|
||||
base,
|
||||
x[0]?.trim().replace(base, "").replace(".txt", ""),
|
||||
x[1],
|
||||
]);
|
||||
TAC.Globals.wildcardExtFiles.push(...wcExtFile);
|
||||
}
|
||||
}
|
||||
// Load the yaml wildcard json file and append it as a wildcard file, appending each key as a path component until we reach the end
|
||||
yamlWildcards = await readFile(`${tagBasePath}/temp/wc_yaml.json`, true);
|
||||
|
||||
// Load the yaml wildcard json file and append it as a wildcard file, appending each key as a path component until we reach the end
|
||||
TAC.Globals.yamlWildcards = await TAC.Utils.readFile(
|
||||
`${TAC.Globals.tagBasePath}/temp/wc_yaml.json`,
|
||||
true
|
||||
);
|
||||
|
||||
// Append each key as a path component until we reach a leaf
|
||||
Object.keys(TAC.Globals.yamlWildcards).forEach((file) => {
|
||||
const flattened = TAC.Utils.flatten(TAC.Globals.yamlWildcards[file], [], "/");
|
||||
Object.keys(flattened).forEach((key) => {
|
||||
TAC.Globals.wildcardExtFiles.push([file, key]);
|
||||
});
|
||||
// Append each key as a path component until we reach a leaf
|
||||
Object.keys(yamlWildcards).forEach(file => {
|
||||
const flattened = flatten(yamlWildcards[file], [], "/");
|
||||
Object.keys(flattened).forEach(key => {
|
||||
wildcardExtFiles.push([file, key]);
|
||||
});
|
||||
} catch (e) {
|
||||
console.error("Error loading wildcards: " + e);
|
||||
}
|
||||
});
|
||||
} catch (e) {
|
||||
console.error("Error loading wildcards: " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === TAC.ResultType.wildcardFile || tagType === TAC.ResultType.yamlWildcard) {
|
||||
return `${TAC.CFG.wcWrap}${text}${TAC.CFG.wcWrap}`;
|
||||
} else if (tagType === TAC.ResultType.wildcardTag) {
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
function sanitize(tagType, text) {
|
||||
if (tagType === ResultType.wildcardFile || tagType === ResultType.yamlWildcard) {
|
||||
return `${TAC_CFG.wcWrap}${text}${TAC_CFG.wcWrap}`;
|
||||
} else if (tagType === ResultType.wildcardTag) {
|
||||
return text;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function keepOpenIfWildcard(tagType, sanitizedText, newPrompt, textArea) {
|
||||
// If it's a wildcard, we want to keep the results open so the user can select another wildcard
|
||||
if (tagType === TAC.ResultType.wildcardFile || tagType === TAC.ResultType.yamlWildcard) {
|
||||
TAC.Globals.hideBlocked = true;
|
||||
setTimeout(() => {
|
||||
TAC.Globals.hideBlocked = false;
|
||||
}, 450);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
function keepOpenIfWildcard(tagType, sanitizedText, newPrompt, textArea) {
|
||||
// If it's a wildcard, we want to keep the results open so the user can select another wildcard
|
||||
if (tagType === ResultType.wildcardFile || tagType === ResultType.yamlWildcard) {
|
||||
hideBlocked = true;
|
||||
setTimeout(() => { hideBlocked = false; }, 450);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Register the parsers
|
||||
TAC.Ext.PARSERS.push(new WildcardParser(WC_TRIGGER));
|
||||
TAC.Ext.PARSERS.push(new WildcardFileParser(WC_FILE_TRIGGER));
|
||||
// Register the parsers
|
||||
PARSERS.push(new WildcardParser(WC_TRIGGER));
|
||||
PARSERS.push(new WildcardFileParser(WC_FILE_TRIGGER));
|
||||
|
||||
// Add our utility functions to their respective queues
|
||||
TAC.Ext.QUEUE_FILE_LOAD.push(load);
|
||||
TAC.Ext.QUEUE_SANITIZE.push(sanitize);
|
||||
TAC.Ext.QUEUE_AFTER_INSERT.push(keepOpenIfWildcard);
|
||||
})();
|
||||
// Add our utility functions to their respective queues
|
||||
QUEUE_FILE_LOAD.push(load);
|
||||
QUEUE_SANITIZE.push(sanitize);
|
||||
QUEUE_AFTER_INSERT.push(keepOpenIfWildcard);
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,27 +20,9 @@ except ImportError:
|
||||
TAGS_PATH = Path(scripts.basedir()).joinpath("tags").absolute()
|
||||
|
||||
# The path to the folder containing the wildcards and embeddings
|
||||
try: # SD.Next
|
||||
WILDCARD_PATH = Path(shared.opts.wildcards_dir).absolute()
|
||||
except Exception: # A1111
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards").absolute()
|
||||
WILDCARD_PATH = FILE_DIR.joinpath("scripts/wildcards").absolute()
|
||||
EMB_PATH = Path(shared.cmd_opts.embeddings_dir).absolute()
|
||||
|
||||
# Forge Classic detection
|
||||
try:
|
||||
from modules_forge.forge_version import version as forge_version
|
||||
IS_FORGE_CLASSIC = forge_version == "classic"
|
||||
except ImportError:
|
||||
IS_FORGE_CLASSIC = False
|
||||
|
||||
# Forge Classic skips it
|
||||
if not IS_FORGE_CLASSIC:
|
||||
try:
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir).absolute()
|
||||
except AttributeError:
|
||||
HYP_PATH = None
|
||||
else:
|
||||
HYP_PATH = None
|
||||
HYP_PATH = Path(shared.cmd_opts.hypernetwork_dir).absolute()
|
||||
|
||||
try:
|
||||
LORA_PATH = Path(shared.cmd_opts.lora_dir).absolute()
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
# This helper script scans folders for wildcards and embeddings and writes them
|
||||
# to a temporary file to expose it to the javascript side
|
||||
|
||||
import sys
|
||||
import glob
|
||||
import importlib
|
||||
import json
|
||||
import sqlite3
|
||||
import sys
|
||||
import urllib.parse
|
||||
from asyncio import sleep
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import yaml
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import FileResponse, JSONResponse, Response
|
||||
from modules import hashes, script_callbacks, sd_hijack, sd_models, shared
|
||||
from fastapi.responses import Response, FileResponse, JSONResponse
|
||||
from modules import script_callbacks, sd_hijack, shared, hashes
|
||||
from pydantic import BaseModel
|
||||
|
||||
from scripts.model_keyword_support import (get_lora_simple_hash,
|
||||
@@ -26,7 +25,7 @@ try:
|
||||
try:
|
||||
from scripts import tag_frequency_db as tdb
|
||||
except ModuleNotFoundError:
|
||||
from inspect import currentframe, getframeinfo
|
||||
from inspect import getframeinfo, currentframe
|
||||
filename = getframeinfo(currentframe()).filename
|
||||
parent = Path(filename).resolve().parent
|
||||
sys.path.append(str(parent))
|
||||
@@ -42,32 +41,9 @@ except (ImportError, ValueError, sqlite3.Error) as e:
|
||||
print(f"Tag Autocomplete: Tag frequency database error - \"{e}\"")
|
||||
db = None
|
||||
|
||||
def get_embed_db(sd_model=None):
|
||||
"""Returns the embedding database, if available."""
|
||||
try:
|
||||
return sd_hijack.model_hijack.embedding_db
|
||||
except Exception:
|
||||
try: # sd next with diffusers backend
|
||||
sdnext_model = sd_model if sd_model is not None else shared.sd_model
|
||||
return sdnext_model.embedding_db
|
||||
except Exception:
|
||||
try: # forge webui
|
||||
forge_model = sd_model if sd_model is not None else sd_models.model_data.get_sd_model()
|
||||
if type(forge_model).__name__ == "FakeInitialModel":
|
||||
return None
|
||||
else:
|
||||
processer = getattr(forge_model, "text_processing_engine", getattr(forge_model, "text_processing_engine_l"))
|
||||
return processer.embeddings
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# Attempt to get embedding load function, using the same call as api.
|
||||
try:
|
||||
embed_db = get_embed_db()
|
||||
if embed_db is not None:
|
||||
load_textual_inversion_embeddings = embed_db.load_textual_inversion_embeddings
|
||||
else:
|
||||
load_textual_inversion_embeddings = lambda *args, **kwargs: None
|
||||
load_textual_inversion_embeddings = sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings
|
||||
except Exception as e: # Not supported.
|
||||
load_textual_inversion_embeddings = lambda *args, **kwargs: None
|
||||
print("Tag Autocomplete: Cannot reload embeddings instantly:", e)
|
||||
@@ -75,8 +51,8 @@ except Exception as e: # Not supported.
|
||||
# Sorting functions for extra networks / embeddings stuff
|
||||
sort_criteria = {
|
||||
"Name": lambda path, name, subpath: name.lower() if subpath else path.stem.lower(),
|
||||
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime if path.exists() else name.lower(),
|
||||
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime if path.exists() else name.lower()
|
||||
"Date Modified (newest first)": lambda path, name, subpath: path.stat().st_mtime,
|
||||
"Date Modified (oldest first)": lambda path, name, subpath: path.stat().st_mtime
|
||||
}
|
||||
|
||||
def sort_models(model_list, sort_method = None, name_has_subpath = False):
|
||||
@@ -98,9 +74,9 @@ def sort_models(model_list, sort_method = None, name_has_subpath = False):
|
||||
# During merging on the JS side we need to re-sort anyway, so here only the sort criteria are calculated.
|
||||
# The list itself doesn't need to get sorted at this point.
|
||||
if len(model_list[0]) > 2:
|
||||
results = [f'"{name}","{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list]
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}",{meta}' for path, name, meta in model_list]
|
||||
else:
|
||||
results = [f'"{name}","{sorter(path, name, name_has_subpath)}"' for path, name in model_list]
|
||||
results = [f'{name},"{sorter(path, name, name_has_subpath)}"' for path, name in model_list]
|
||||
return results
|
||||
|
||||
|
||||
@@ -134,11 +110,7 @@ def is_umi_format(data):
|
||||
"""Returns True if the YAML file is in UMI format."""
|
||||
issue_found = False
|
||||
for item in data:
|
||||
try:
|
||||
if not (data[item] and 'Tags' in data[item] and isinstance(data[item]['Tags'], list)):
|
||||
issue_found = True
|
||||
break
|
||||
except:
|
||||
if not (data[item] and 'Tags' in data[item] and isinstance(data[item]['Tags'], list)):
|
||||
issue_found = True
|
||||
break
|
||||
return not issue_found
|
||||
@@ -160,12 +132,9 @@ def parse_dynamic_prompt_format(yaml_wildcards, data, path):
|
||||
elif not (isinstance(value, list) and all(isinstance(v, str) for v in value)):
|
||||
del d[key]
|
||||
|
||||
try:
|
||||
recurse_dict(data)
|
||||
# Add to yaml_wildcards
|
||||
yaml_wildcards[path.name] = data
|
||||
except:
|
||||
return
|
||||
recurse_dict(data)
|
||||
# Add to yaml_wildcards
|
||||
yaml_wildcards[path.name] = data
|
||||
|
||||
|
||||
def get_yaml_wildcards():
|
||||
@@ -190,13 +159,9 @@ def get_yaml_wildcards():
|
||||
parse_dynamic_prompt_format(yaml_wildcards, data, path)
|
||||
else:
|
||||
print('No data found in ' + path.name)
|
||||
except (yaml.YAMLError, UnicodeDecodeError, AttributeError, TypeError) as e:
|
||||
# YAML file not in wildcard format or couldn't be read
|
||||
except (yaml.YAMLError, UnicodeDecodeError) as e:
|
||||
print(f'Issue in parsing YAML file {path.name}: {e}')
|
||||
continue
|
||||
except Exception as e:
|
||||
# Something else went wrong, just skip
|
||||
continue
|
||||
|
||||
# Sort by count
|
||||
umi_sorted = sorted(umi_tags.items(), key=lambda item: item[1], reverse=True)
|
||||
@@ -225,45 +190,35 @@ def get_embeddings(sd_model):
|
||||
results = []
|
||||
|
||||
try:
|
||||
embed_db = get_embed_db(sd_model)
|
||||
# Re-register callback if needed
|
||||
global load_textual_inversion_embeddings
|
||||
if embed_db is not None and load_textual_inversion_embeddings != embed_db.load_textual_inversion_embeddings:
|
||||
load_textual_inversion_embeddings = embed_db.load_textual_inversion_embeddings
|
||||
# The sd_model embedding_db reference only exists in sd.next with diffusers backend
|
||||
try:
|
||||
loaded_sdnext = sd_model.embedding_db.word_embeddings
|
||||
skipped_sdnext = sd_model.embedding_db.skipped_embeddings
|
||||
except (NameError, AttributeError):
|
||||
loaded_sdnext = {}
|
||||
skipped_sdnext = {}
|
||||
|
||||
loaded = embed_db.word_embeddings
|
||||
skipped = embed_db.skipped_embeddings
|
||||
# Get embedding dict from sd_hijack to separate v1/v2 embeddings
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
loaded = loaded | loaded_sdnext
|
||||
skipped = skipped | skipped_sdnext
|
||||
|
||||
# Add embeddings to the correct list
|
||||
for key, emb in (skipped | loaded).items():
|
||||
filename = getattr(emb, "filename", None)
|
||||
|
||||
if filename is None:
|
||||
if emb.shape is None:
|
||||
emb_unknown.append((Path(key), key, ""))
|
||||
elif emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(key), key, "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(key), key, "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(key), key, "vXL"))
|
||||
else:
|
||||
emb_unknown.append((Path(key), key, ""))
|
||||
|
||||
else:
|
||||
if emb.filename is None:
|
||||
continue
|
||||
if emb.filename is None:
|
||||
continue
|
||||
|
||||
if emb.shape is None:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
elif emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "vXL"))
|
||||
else:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
if emb.shape is None:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
elif emb.shape == V1_SHAPE:
|
||||
emb_v1.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v1"))
|
||||
elif emb.shape == V2_SHAPE:
|
||||
emb_v2.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "v2"))
|
||||
elif emb.shape == VXL_SHAPE:
|
||||
emb_vXL.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), "vXL"))
|
||||
else:
|
||||
emb_unknown.append((Path(emb.filename), Path(emb.filename).relative_to(EMB_PATH).as_posix(), ""))
|
||||
|
||||
results = sort_models(emb_v1) + sort_models(emb_v2) + sort_models(emb_vXL) + sort_models(emb_unknown)
|
||||
except AttributeError:
|
||||
@@ -334,7 +289,7 @@ try:
|
||||
import sys
|
||||
from modules import extensions
|
||||
sys.path.append(Path(extensions.extensions_builtin_dir).joinpath("Lora").as_posix())
|
||||
import lora # pyright: ignore [reportMissingImports]
|
||||
import lora # pyright: ignore [reportMissingImports]
|
||||
|
||||
def _get_lora():
|
||||
return [
|
||||
@@ -475,11 +430,8 @@ def refresh_embeddings(force: bool, *args, **kwargs):
|
||||
# Fix for SD.Next infinite refresh loop due to gradio not updating after model load on demand.
|
||||
# This will just skip embedding loading if no model is loaded yet (or there really are no embeddings).
|
||||
# Try catch is just for safety incase sd_hijack access fails for some reason.
|
||||
embed_db = get_embed_db()
|
||||
if embed_db is None:
|
||||
return
|
||||
loaded = embed_db.word_embeddings
|
||||
skipped = embed_db.skipped_embeddings
|
||||
loaded = sd_hijack.model_hijack.embedding_db.word_embeddings
|
||||
skipped = sd_hijack.model_hijack.embedding_db.skipped_embeddings
|
||||
if len((loaded | skipped)) > 0:
|
||||
load_textual_inversion_embeddings(force_reload=force)
|
||||
get_embeddings(None)
|
||||
@@ -492,8 +444,7 @@ def refresh_temp_files(*args, **kwargs):
|
||||
if skip_wildcard_refresh:
|
||||
WILDCARD_EXT_PATHS = find_ext_wildcard_paths()
|
||||
write_temp_files(skip_wildcard_refresh)
|
||||
force_embed_refresh = getattr(shared.opts, "tac_forceRefreshEmbeddings", False)
|
||||
refresh_embeddings(force=force_embed_refresh)
|
||||
refresh_embeddings(force=True)
|
||||
|
||||
def write_style_names(*args, **kwargs):
|
||||
styles = get_style_names()
|
||||
@@ -503,14 +454,7 @@ def write_style_names(*args, **kwargs):
|
||||
def write_temp_files(skip_wildcard_refresh = False):
|
||||
# Write wildcards to wc.txt if found
|
||||
if WILDCARD_PATH.exists() and not skip_wildcard_refresh:
|
||||
try:
|
||||
# Attempt to create a relative path, but fall back to an absolute path if not possible
|
||||
relative_wildcard_path = WILDCARD_PATH.relative_to(FILE_DIR).as_posix()
|
||||
except ValueError:
|
||||
# If the paths are not relative, use the absolute path
|
||||
relative_wildcard_path = WILDCARD_PATH.as_posix()
|
||||
|
||||
wildcards = [relative_wildcard_path] + get_wildcards()
|
||||
wildcards = [WILDCARD_PATH.relative_to(FILE_DIR).as_posix()] + get_wildcards()
|
||||
if wildcards:
|
||||
write_to_temp_file('wc.txt', wildcards)
|
||||
|
||||
@@ -522,7 +466,7 @@ def write_temp_files(skip_wildcard_refresh = False):
|
||||
# Write yaml extension wildcards to umi_tags.txt and wc_yaml.json if found
|
||||
get_yaml_wildcards()
|
||||
|
||||
if HYP_PATH is not None and HYP_PATH.exists():
|
||||
if HYP_PATH.exists():
|
||||
hypernets = get_hypernetworks()
|
||||
if hypernets:
|
||||
write_to_temp_file('hyp.txt', hypernets)
|
||||
@@ -597,7 +541,6 @@ def on_ui_settings():
|
||||
"tac_wildcardExclusionList": shared.OptionInfo("", "Wildcard folder exclusion list").info("Add folder names that shouldn't be searched for wildcards, separated by comma.").needs_restart(),
|
||||
"tac_skipWildcardRefresh": shared.OptionInfo(False, "Don't re-scan for wildcard files when pressing the extra networks refresh button").info("Useful to prevent hanging if you use a very large wildcard collection."),
|
||||
"tac_useEmbeddings": shared.OptionInfo(True, "Search for embeddings"),
|
||||
"tac_forceRefreshEmbeddings": shared.OptionInfo(False, "Force refresh embeddings when pressing the extra networks refresh button").info("Turn this on if you have issues with new embeddings not registering correctly in TAC. Warning: Seems to cause reloading issues in gradio for some users."),
|
||||
"tac_includeEmbeddingsInNormalResults": shared.OptionInfo(False, "Include embeddings in normal tag results").info("The 'JumpTo...' keybinds (End & Home key by default) will select the first non-embedding result of their direction on the first press for quick navigation in longer lists."),
|
||||
"tac_useHypernetworks": shared.OptionInfo(True, "Search for hypernetworks"),
|
||||
"tac_useLoras": shared.OptionInfo(True, "Search for Loras"),
|
||||
@@ -616,7 +559,6 @@ def on_ui_settings():
|
||||
"tac_frequencyIncludeAlias": shared.OptionInfo(False, "Frequency sorting matches aliases for frequent tags").info("Tag frequency will be increased for the main tag even if an alias is used for completion. This option can be used to override the default behavior of alias results being ignored for frequency sorting."),
|
||||
# Insertion related settings
|
||||
"tac_replaceUnderscores": shared.OptionInfo(True, "Replace underscores with spaces on insertion"),
|
||||
"tac_undersocreReplacementExclusionList": shared.OptionInfo("0_0,(o)_(o),+_+,+_-,._.,<o>_<o>,<|>_<|>,=_=,>_<,3_3,6_9,>_o,@_@,^_^,o_o,u_u,x_x,|_|,||_||", "Underscore replacement exclusion list").info("Add tags that shouldn't have underscores replaced with spaces, separated by comma."),
|
||||
"tac_escapeParentheses": shared.OptionInfo(True, "Escape parentheses on insertion"),
|
||||
"tac_appendComma": shared.OptionInfo(True, "Append comma on tag autocompletion"),
|
||||
"tac_appendSpace": shared.OptionInfo(True, "Append space on tag autocompletion").info("will append after comma if the above is enabled"),
|
||||
@@ -693,23 +635,6 @@ def on_ui_settings():
|
||||
"9": ["#df3647", "#8e1c2b"],
|
||||
"10": ["#c98f2b", "#7b470e"],
|
||||
"11": ["#e87ebe", "#a83583"]
|
||||
},
|
||||
"danbooru_e621_merged": {
|
||||
"-1": ["red", "maroon"],
|
||||
"0": ["lightblue", "dodgerblue"],
|
||||
"1": ["indianred", "firebrick"],
|
||||
"3": ["violet", "darkorchid"],
|
||||
"4": ["lightgreen", "darkgreen"],
|
||||
"5": ["orange", "darkorange"],
|
||||
"6": ["red", "maroon"],
|
||||
"7": ["lightblue", "dodgerblue"],
|
||||
"8": ["gold", "goldenrod"],
|
||||
"9": ["gold", "goldenrod"],
|
||||
"10": ["violet", "darkorchid"],
|
||||
"11": ["lightgreen", "darkgreen"],
|
||||
"12": ["tomato", "darksalmon"],
|
||||
"14": ["whitesmoke", "black"],
|
||||
"15": ["seagreen", "darkseagreen"]
|
||||
}
|
||||
}\
|
||||
"""
|
||||
@@ -748,7 +673,7 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
json_candidates = glob.glob(base_path.as_posix() + f"/**/{glob.escape(filename)}.json", recursive=True)
|
||||
json_candidates = glob.glob(base_path.as_posix() + f"/**/{filename}.json", recursive=True)
|
||||
if json_candidates is not None and len(json_candidates) > 0 and Path(json_candidates[0]).is_file():
|
||||
return FileResponse(json_candidates[0])
|
||||
except Exception as e:
|
||||
@@ -759,7 +684,7 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
return Response(status_code=404)
|
||||
|
||||
try:
|
||||
img_glob = glob.glob(base_path.as_posix() + f"/**/{glob.escape(filename)}.*", recursive=True)
|
||||
img_glob = glob.glob(base_path.as_posix() + f"/**/{filename}.*", recursive=True)
|
||||
img_candidates = [img for img in img_glob if Path(img).suffix in [".png", ".jpg", ".jpeg", ".webp", ".gif"] and Path(img).is_file()]
|
||||
if img_candidates is not None and len(img_candidates) > 0:
|
||||
if blob:
|
||||
@@ -771,7 +696,6 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
|
||||
@app.post("/tacapi/v1/refresh-temp-files")
|
||||
async def api_refresh_temp_files():
|
||||
await sleep(0) # might help with refresh blocking gradio
|
||||
refresh_temp_files()
|
||||
|
||||
@app.post("/tacapi/v1/refresh-embeddings")
|
||||
@@ -788,7 +712,7 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
|
||||
@app.get("/tacapi/v1/lora-cached-hash/{lora_name}")
|
||||
async def get_lora_cached_hash(lora_name: str):
|
||||
path_glob = glob.glob(LORA_PATH.as_posix() + f"/**/{glob.escape(lora_name)}.*", recursive=True)
|
||||
path_glob = glob.glob(LORA_PATH.as_posix() + f"/**/{lora_name}.*", recursive=True)
|
||||
paths = [lora for lora in path_glob if Path(lora).suffix in [".safetensors", ".ckpt", ".pt"] and Path(lora).is_file()]
|
||||
if paths is not None and len(paths) > 0:
|
||||
path = paths[0]
|
||||
@@ -907,5 +831,5 @@ def api_tac(_: gr.Blocks, app: FastAPI):
|
||||
@app.get("/tacapi/v1/get-all-use-counts")
|
||||
async def get_all_tag_counts():
|
||||
return db_request(lambda: db.get_all_tags(), get=True)
|
||||
|
||||
|
||||
script_callbacks.on_app_started(api_tac)
|
||||
|
||||
238326
tags/danbooru.csv
238326
tags/danbooru.csv
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
125642
tags/derpibooru.csv
125642
tags/derpibooru.csv
File diff suppressed because it is too large
Load Diff
200392
tags/e621.csv
200392
tags/e621.csv
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user