Model: Prompt users to install extras if dependencies don't exist

Ex: tokenizers, lmfe, outlines.

Signed-off-by: kingbri <bdashore3@proton.me>
This commit is contained in:
kingbri
2024-03-22 22:13:55 -04:00
parent f952b81ccf
commit 1755f284cf
2 changed files with 19 additions and 4 deletions

View File

@@ -71,8 +71,9 @@ class ExLlamaV2Grammar:
logger.error(
"Skipping JSON schema parsing because "
"lm-format-enforcer is not installed.\n"
"Please run the following command: "
"pip install lm-format-enforcer"
"Please run the following command in your environment "
"to install extra packages:\n"
"pip install -U .[extras]"
)
return
@@ -113,7 +114,9 @@ class ExLlamaV2Grammar:
except ImportError:
logger.error(
"Skipping EBNF parsing because Outlines is not installed.\n"
"Please run the following command: pip install outlines"
"Please run the following command in your environment "
"to install extra packages:\n"
"pip install -U .[extras]"
)
return

View File

@@ -399,7 +399,19 @@ class ExllamaV2Container:
ExLlamaV2Tokenizer.extended_id_to_piece = {}
ExLlamaV2Tokenizer.extended_piece_to_id = {}
self.tokenizer = ExLlamaV2Tokenizer(self.config)
try:
self.tokenizer = ExLlamaV2Tokenizer(self.config)
except AssertionError as exc:
if "HF tokenizer" in str(exc):
raise ImportError(
"Could not create ExllamaV2's tokenizer for this model "
"because tokenizers is not installed.\n"
"Please run the following command in your environment "
"to install extra packages:\n"
"pip install -U .[extras]"
) from exc
else:
raise exc
# Calculate autosplit reserve for all GPUs
gpu_count = torch.cuda.device_count()