mirror of
https://github.com/theroyallab/tabbyAPI.git
synced 2026-04-27 09:41:54 +00:00
feat: workflows for formatting/linting (#35)
* add github workflows for pylint and yapf * yapf * docstrings for auth * fix auth.py * fix generators.py * fix gen_logging.py * fix main.py * fix model.py * fix templating.py * fix utils.py * update formatting.sh to include subdirs for pylint * fix model_test.py * fix wheel_test.py * rename utils to utils_oai * fix OAI/utils_oai.py * fix completion.py * fix token.py * fix lora.py * fix common.py * add pylintrc and fix model.py * finish up pylint * fix attribute error * main.py formatting * add formatting batch script * Main: Remove unnecessary global Linter suggestion. Signed-off-by: kingbri <bdashore3@proton.me> * switch to ruff * Formatting + Linting: Add ruff.toml Signed-off-by: kingbri <bdashore3@proton.me> * Formatting + Linting: Switch scripts to use ruff Also remove the file and recent file change functions from both scripts. Signed-off-by: kingbri <bdashore3@proton.me> * Tree: Format and lint Signed-off-by: kingbri <bdashore3@proton.me> * Scripts + Workflows: Format Signed-off-by: kingbri <bdashore3@proton.me> * Tree: Remove pylint flags We use ruff now Signed-off-by: kingbri <bdashore3@proton.me> * Tree: Format Signed-off-by: kingbri <bdashore3@proton.me> * Formatting: Line length is 88 Use the same value as Black. Signed-off-by: kingbri <bdashore3@proton.me> * Tree: Format Update to new line length rules. Signed-off-by: kingbri <bdashore3@proton.me> --------- Authored-by: AlpinDale <52078762+AlpinDale@users.noreply.github.com> Co-authored-by: kingbri <bdashore3@proton.me>
This commit is contained in:
114
OAI/utils_oai.py
Normal file
114
OAI/utils_oai.py
Normal file
@@ -0,0 +1,114 @@
|
||||
""" Utility functions for the OpenAI server. """
|
||||
import pathlib
|
||||
from typing import Optional
|
||||
|
||||
from OAI.types.chat_completion import (
|
||||
ChatCompletionMessage,
|
||||
ChatCompletionRespChoice,
|
||||
ChatCompletionStreamChunk,
|
||||
ChatCompletionResponse,
|
||||
ChatCompletionStreamChoice,
|
||||
)
|
||||
from OAI.types.completion import CompletionResponse, CompletionRespChoice
|
||||
from OAI.types.common import UsageStats
|
||||
from OAI.types.lora import LoraList, LoraCard
|
||||
from OAI.types.model import ModelList, ModelCard
|
||||
|
||||
from utils import unwrap
|
||||
|
||||
|
||||
def create_completion_response(
|
||||
text: str,
|
||||
prompt_tokens: int,
|
||||
completion_tokens: int,
|
||||
model_name: Optional[str],
|
||||
):
|
||||
"""Create a completion response from the provided text."""
|
||||
choice = CompletionRespChoice(finish_reason="Generated", text=text)
|
||||
|
||||
response = CompletionResponse(
|
||||
choices=[choice],
|
||||
model=unwrap(model_name, ""),
|
||||
usage=UsageStats(
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=prompt_tokens + completion_tokens,
|
||||
),
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def create_chat_completion_response(
|
||||
text: str,
|
||||
prompt_tokens: int,
|
||||
completion_tokens: int,
|
||||
model_name: Optional[str],
|
||||
):
|
||||
"""Create a chat completion response from the provided text."""
|
||||
message = ChatCompletionMessage(role="assistant", content=text)
|
||||
|
||||
choice = ChatCompletionRespChoice(finish_reason="Generated", message=message)
|
||||
|
||||
response = ChatCompletionResponse(
|
||||
choices=[choice],
|
||||
model=unwrap(model_name, ""),
|
||||
usage=UsageStats(
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=prompt_tokens + completion_tokens,
|
||||
),
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def create_chat_completion_stream_chunk(
|
||||
const_id: str,
|
||||
text: Optional[str] = None,
|
||||
model_name: Optional[str] = None,
|
||||
finish_reason: Optional[str] = None,
|
||||
):
|
||||
"""Create a chat completion stream chunk from the provided text."""
|
||||
if finish_reason:
|
||||
message = {}
|
||||
else:
|
||||
message = ChatCompletionMessage(role="assistant", content=text)
|
||||
|
||||
# The finish reason can be None
|
||||
choice = ChatCompletionStreamChoice(finish_reason=finish_reason, delta=message)
|
||||
|
||||
chunk = ChatCompletionStreamChunk(
|
||||
id=const_id, choices=[choice], model=unwrap(model_name, "")
|
||||
)
|
||||
|
||||
return chunk
|
||||
|
||||
|
||||
def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = None):
|
||||
"""Get the list of models from the provided path."""
|
||||
|
||||
# Convert the provided draft model path to a pathlib path for
|
||||
# equality comparisons
|
||||
if draft_model_path:
|
||||
draft_model_path = pathlib.Path(draft_model_path).resolve()
|
||||
|
||||
model_card_list = ModelList()
|
||||
for path in model_path.iterdir():
|
||||
# Don't include the draft models path
|
||||
if path.is_dir() and path != draft_model_path:
|
||||
model_card = ModelCard(id=path.name)
|
||||
model_card_list.data.append(model_card) # pylint: disable=no-member
|
||||
|
||||
return model_card_list
|
||||
|
||||
|
||||
def get_lora_list(lora_path: pathlib.Path):
|
||||
"""Get the list of Lora cards from the provided path."""
|
||||
lora_list = LoraList()
|
||||
for path in lora_path.iterdir():
|
||||
if path.is_dir():
|
||||
lora_card = LoraCard(id=path.name)
|
||||
lora_list.data.append(lora_card) # pylint: disable=no-member
|
||||
|
||||
return lora_list
|
||||
Reference in New Issue
Block a user