mirror of
https://github.com/SillyTavern/SillyTavern-Extras.git
synced 2026-05-01 03:41:24 +00:00
Clean up and prevent double load
This commit is contained in:
@@ -666,10 +666,7 @@ def result_feed():
|
|||||||
def load_model():
|
def load_model():
|
||||||
# Accessing the URL parameters
|
# Accessing the URL parameters
|
||||||
_model = request.args.get('_model')
|
_model = request.args.get('_model')
|
||||||
|
|
||||||
_gpu = False if args.coqui_gpu else True
|
_gpu = False if args.coqui_gpu else True
|
||||||
print(_gpu)
|
|
||||||
|
|
||||||
_progress = request.args.get('_progress')
|
_progress = request.args.get('_progress')
|
||||||
return coqui.load_model(_model, _gpu, _progress)
|
return coqui.load_model(_model, _gpu, _progress)
|
||||||
|
|
||||||
|
|||||||
25
tts_coqui.py
25
tts_coqui.py
@@ -24,6 +24,7 @@ multspeak = "None"
|
|||||||
loadedModel = "None"
|
loadedModel = "None"
|
||||||
spkdirectory = ""
|
spkdirectory = ""
|
||||||
multspeakjson = ""
|
multspeakjson = ""
|
||||||
|
status = ""
|
||||||
_gpu = False
|
_gpu = False
|
||||||
is_coqui_available = os.environ.get("COQUI_STUDIO_TOKEN")
|
is_coqui_available = os.environ.get("COQUI_STUDIO_TOKEN")
|
||||||
|
|
||||||
@@ -58,10 +59,9 @@ def load_model(_model, _gpu, _progress):
|
|||||||
global loadedModel
|
global loadedModel
|
||||||
global multlang
|
global multlang
|
||||||
global multspeak
|
global multspeak
|
||||||
|
global status
|
||||||
status = None
|
|
||||||
|
|
||||||
print("GPU is set to: ", _gpu)
|
#print("GPU is set to: ", _gpu)
|
||||||
|
|
||||||
_model_directory, _file = os.path.split(_model)
|
_model_directory, _file = os.path.split(_model)
|
||||||
|
|
||||||
@@ -94,8 +94,22 @@ def load_model(_model, _gpu, _progress):
|
|||||||
|
|
||||||
_config_path = os.path.join(_target_directory, _modified_speaker_id, "config.json")
|
_config_path = os.path.join(_target_directory, _modified_speaker_id, "config.json")
|
||||||
|
|
||||||
|
|
||||||
|
#prevent multiple loading
|
||||||
|
if status == "Loading":
|
||||||
|
status = "Loading"
|
||||||
|
print(status)
|
||||||
|
return status
|
||||||
|
|
||||||
|
#prevent multiple loading
|
||||||
|
if os.path.join(_modified_speaker_id, _file) == loadedModel:
|
||||||
|
status = "Already Loaded"
|
||||||
|
print(status)
|
||||||
|
return status
|
||||||
|
|
||||||
if model_type(_config_path) == "tortoise":
|
if model_type(_config_path) == "tortoise":
|
||||||
print("Loading Tortoise...")
|
print("Loading Tortoise...")
|
||||||
|
status = "Loading"
|
||||||
_loadtortoisemodel = _model_directory.replace("--", "/")
|
_loadtortoisemodel = _model_directory.replace("--", "/")
|
||||||
tts = TTS(_loadtortoisemodel, gpu=_gpu)
|
tts = TTS(_loadtortoisemodel, gpu=_gpu)
|
||||||
loadedModel = _model
|
loadedModel = _model
|
||||||
@@ -110,7 +124,7 @@ def load_model(_model, _gpu, _progress):
|
|||||||
if model_type(_config_path) not in _loadertypes:
|
if model_type(_config_path) not in _loadertypes:
|
||||||
try:
|
try:
|
||||||
print("Loading ", model_type(_config_path))
|
print("Loading ", model_type(_config_path))
|
||||||
print("Load Line:", _model_path, _progress, _gpu)
|
#print("Load Line:", _model_path, _progress, _gpu)
|
||||||
tts = TTS(model_path=_model_path, config_path=_config_path, progress_bar=_progress, gpu=_gpu)
|
tts = TTS(model_path=_model_path, config_path=_config_path, progress_bar=_progress, gpu=_gpu)
|
||||||
status = "Loaded"
|
status = "Loaded"
|
||||||
loadedModel = _model
|
loadedModel = _model
|
||||||
@@ -121,7 +135,8 @@ def load_model(_model, _gpu, _progress):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
type = model_type(_config_path)
|
type = model_type(_config_path)
|
||||||
print("Type: ", type)
|
#print("Type: ", type)
|
||||||
|
#print("Status", status)
|
||||||
|
|
||||||
if status is None:
|
if status is None:
|
||||||
status = "Unknown error occurred"
|
status = "Unknown error occurred"
|
||||||
|
|||||||
Reference in New Issue
Block a user