Fix global var access for coqui

This commit is contained in:
SillyLossy
2023-07-26 22:27:15 +03:00
parent ec7cf2f1df
commit 30a2424e8f
2 changed files with 41 additions and 18 deletions

View File

@@ -167,6 +167,28 @@ cd SillyTavern-extras
| `--sd-remote-ssl` | Use SSL for the remote SD backend<br>Default: **False** | | `--sd-remote-ssl` | Use SSL for the remote SD backend<br>Default: **False** |
| `--sd-remote-auth` | Specify the `username:password` for the remote SD backend (if required) | | `--sd-remote-auth` | Specify the `username:password` for the remote SD backend (if required) |
## Coqui TTS
### Running on Mac M1
#### ImportError: symbol not found
If you're getting the following error when running coqui-tts module on M1 Mac:
```
ImportError: dlopen(/Users/user/.../lib/python3.11/site-packages/MeCab/_MeCab.cpython-311-darwin.so, 0x0002): symbol not found in flat namespace '__ZN5MeCab11createModelEPKc'
```
Do the following:
1. Install homebrew: https://brew.sh/
2. Build and install the `mecab` package
```
brew install --build-from-source mecab
ARCHFLAGS='-arch arm64' pip install --no-binary :all: --compile --use-pep517 --no-cache-dir --force mecab-python3
```
## ChromaDB ## ChromaDB
ChromaDB is a blazing fast and open source database that is used for long-term memory when chatting with characters. It can be run in-memory or on a local server on your LAN. ChromaDB is a blazing fast and open source database that is used for long-term memory when chatting with characters. It can be run in-memory or on a local server on your LAN.

View File

@@ -54,7 +54,7 @@ def load_model(_model, _gpu, _progress):
status = None status = None
_model_directory, _file = os.path.split(_model) _model_directory, _file = os.path.split(_model)
if _model_directory == "": #make it assign vars correctly if no filename provioded if _model_directory == "": #make it assign vars correctly if no filename provioded
_model_directory = _file _model_directory = _file
@@ -63,10 +63,10 @@ def load_model(_model, _gpu, _progress):
if _model is None: if _model is None:
status = "ERROR: Invalid model name or path." status = "ERROR: Invalid model name or path."
else: else:
try: try:
if _gpu == True: #Reclaim memory if _gpu == True: #Reclaim memory
del tts del tts
try: try:
import gc import gc
gc.collect() gc.collect()
torch.cuda.empty_cache() torch.cuda.empty_cache()
@@ -120,7 +120,7 @@ def load_model(_model, _gpu, _progress):
print("Continuing with other parts of the code...") print("Continuing with other parts of the code...")
else: else:
pass pass
type = model_type(_config_path) type = model_type(_config_path)
print("Type: ", type) print("Type: ", type)
@@ -128,7 +128,7 @@ def load_model(_model, _gpu, _progress):
status = "Unknown error occurred" status = "Unknown error occurred"
if type is None: if type is None:
type = "Unknown" type = "Unknown"
return status return status
def is_multi_speaker_model(): def is_multi_speaker_model():
@@ -136,15 +136,16 @@ def is_multi_speaker_model():
global type global type
global spkdirectory global spkdirectory
global multspeakjson global multspeakjson
global tts
if tts is None: if tts is None:
multspeak = "None" multspeak = "None"
return multspeak return multspeak
try: try:
if type == "bark": if type == "bark":
_target_directory = ModelManager().output_prefix _target_directory = ModelManager().output_prefix
# Convert _target_directory to a string and remove the trailing backslash if present # Convert _target_directory to a string and remove the trailing backslash if present
_target_directory_str = str(_target_directory) _target_directory_str = str(_target_directory)
if _target_directory_str.endswith("\\"): if _target_directory_str.endswith("\\"):
@@ -179,6 +180,7 @@ def is_multi_speaker_model():
def is_multi_lang_model(): def is_multi_lang_model():
global multlang global multlang
global tts
if tts is None: if tts is None:
multlang = "None" multlang = "None"
return multlang return multlang
@@ -194,7 +196,7 @@ def is_multi_lang_model():
except Exception as e: except Exception as e:
print("Error:", e) print("Error:", e)
multlang = "None" multlang = "None"
return multlang return multlang
def get_coqui_models(): #DROPDOWN MODELS def get_coqui_models(): #DROPDOWN MODELS
@@ -281,7 +283,7 @@ def get_coqui_download_models(): #Avail voices list
json_data = json.dumps(formatted_list, indent=4) json_data = json.dumps(formatted_list, indent=4)
return json_data return json_data
def coqui_modeldownload(_modeldownload): #Avail voices function def coqui_modeldownload(_modeldownload): #Avail voices function
print(_modeldownload) print(_modeldownload)
try: try:
@@ -317,32 +319,32 @@ def coqui_tts(text, speaker_id, mspker_id, style_wav, language_id):
print("mspker_id: ", mspker_id) print("mspker_id: ", mspker_id)
print("language_id: ", language_id) print("language_id: ", language_id)
try: #see is values passed in URL try: #see is values passed in URL
if language_id is not None: if language_id is not None:
float(language_id) float(language_id)
multlang = float(language_id) multlang = float(language_id)
else: else:
pass pass
except ValueError: except ValueError:
pass pass
try: try:
if mspker_id is not None: if mspker_id is not None:
float(mspker_id) float(mspker_id)
multspeak = float(mspker_id) multspeak = float(mspker_id)
else: else:
pass pass
except ValueError: except ValueError:
pass pass
if loadedModel != speaker_id: if loadedModel != speaker_id:
print("MODEL NOT LOADED!!! Loading... ", loadedModel, speaker_id) print("MODEL NOT LOADED!!! Loading... ", loadedModel, speaker_id)
load_model(speaker_id, True, True) #use GPU and progress bar? load_model(speaker_id, True, True) #use GPU and progress bar?
audio_buffer = io.BytesIO() audio_buffer = io.BytesIO()
@@ -381,8 +383,7 @@ def coqui_tts(text, speaker_id, mspker_id, style_wav, language_id):
audio_buffer.seek(0) audio_buffer.seek(0)
response = send_file(audio_buffer, mimetype="audio/wav") response = send_file(audio_buffer, mimetype="audio/wav")
#reset for next dynamic tts #reset for next dynamic tts
multlang = None multlang = None
multspeak = None multspeak = None
return response return response