mirror of
https://github.com/theroyallab/tabbyAPI.git
synced 2026-03-15 00:07:28 +00:00
Dependencies: Require tokenizers
This is used for some models and isn't too big in size (compared to other huggingface dependencies), so include it by default. Signed-off-by: kingbri <bdashore3@proton.me>
This commit is contained in:
@@ -399,19 +399,7 @@ class ExllamaV2Container:
|
||||
ExLlamaV2Tokenizer.extended_id_to_piece = {}
|
||||
ExLlamaV2Tokenizer.extended_piece_to_id = {}
|
||||
|
||||
try:
|
||||
self.tokenizer = ExLlamaV2Tokenizer(self.config)
|
||||
except AssertionError as exc:
|
||||
if "HF tokenizer" in str(exc):
|
||||
raise ImportError(
|
||||
"Could not create ExllamaV2's tokenizer for this model "
|
||||
"because tokenizers is not installed.\n"
|
||||
"Please run the following command in your environment "
|
||||
"to install extra packages:\n"
|
||||
"pip install -U .[extras]"
|
||||
) from exc
|
||||
else:
|
||||
raise exc
|
||||
self.tokenizer = ExLlamaV2Tokenizer(self.config)
|
||||
|
||||
# Calculate autosplit reserve for all GPUs
|
||||
gpu_count = torch.cuda.device_count()
|
||||
|
||||
@@ -25,6 +25,7 @@ dependencies = [
|
||||
"loguru",
|
||||
"sse-starlette",
|
||||
"packaging",
|
||||
"tokenizers",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -32,7 +33,6 @@ dependencies = [
|
||||
|
||||
[project.optional-dependencies]
|
||||
extras = [
|
||||
"tokenizers",
|
||||
"outlines",
|
||||
"lm-format-enforcer",
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user