mirror of
https://github.com/theroyallab/tabbyAPI.git
synced 2026-04-20 22:38:55 +00:00
API: Split functions into their own files
Previously, generation function were bundled with the request function causing the overall code structure and API to look ugly and unreadable. Split these up and cleanup a lot of the methods that were previously overlooked in the API itself. Signed-off-by: kingbri <bdashore3@proton.me>
This commit is contained in:
199
endpoints/OAI/utils/chat_completion.py
Normal file
199
endpoints/OAI/utils/chat_completion.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""Chat completion utilities for OAI server."""
|
||||
import pathlib
|
||||
from typing import Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from fastapi import HTTPException, Request
|
||||
from fastapi.concurrency import run_in_threadpool
|
||||
from jinja2 import TemplateError
|
||||
from loguru import logger
|
||||
|
||||
from common import model
|
||||
from common.generators import release_semaphore
|
||||
from common.templating import get_prompt_from_template
|
||||
from common.utils import get_generator_error, handle_request_error, unwrap
|
||||
from endpoints.OAI.types.chat_completion import (
|
||||
ChatCompletionLogprobs,
|
||||
ChatCompletionLogprob,
|
||||
ChatCompletionMessage,
|
||||
ChatCompletionRequest,
|
||||
ChatCompletionRespChoice,
|
||||
ChatCompletionStreamChunk,
|
||||
ChatCompletionResponse,
|
||||
ChatCompletionStreamChoice,
|
||||
)
|
||||
from endpoints.OAI.types.common import UsageStats
|
||||
|
||||
|
||||
def _create_response(generation: dict, model_name: Optional[str]):
|
||||
"""Create a chat completion response from the provided text."""
|
||||
|
||||
message = ChatCompletionMessage(
|
||||
role="assistant", content=unwrap(generation.get("text"), "")
|
||||
)
|
||||
|
||||
logprob_response = None
|
||||
|
||||
token_probs = unwrap(generation.get("token_probs"), {})
|
||||
if token_probs:
|
||||
logprobs = unwrap(generation.get("logprobs"), [])
|
||||
|
||||
collected_token_probs = []
|
||||
for index, token in enumerate(token_probs.keys()):
|
||||
top_logprobs = [
|
||||
ChatCompletionLogprob(token=token, logprob=logprob)
|
||||
for token, logprob in logprobs[index].items()
|
||||
]
|
||||
|
||||
collected_token_probs.append(
|
||||
ChatCompletionLogprob(
|
||||
token=token,
|
||||
logprob=token_probs[token],
|
||||
top_logprobs=top_logprobs,
|
||||
)
|
||||
)
|
||||
|
||||
logprob_response = ChatCompletionLogprobs(content=collected_token_probs)
|
||||
|
||||
choice = ChatCompletionRespChoice(
|
||||
finish_reason="Generated", message=message, logprobs=logprob_response
|
||||
)
|
||||
|
||||
prompt_tokens = unwrap(generation.get("prompt_tokens"), 0)
|
||||
completion_tokens = unwrap(generation.get("completion_tokens"), 0)
|
||||
|
||||
response = ChatCompletionResponse(
|
||||
choices=[choice],
|
||||
model=unwrap(model_name, ""),
|
||||
usage=UsageStats(
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=prompt_tokens + completion_tokens,
|
||||
),
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def _create_stream_chunk(
|
||||
const_id: str,
|
||||
generation: Optional[dict] = None,
|
||||
model_name: Optional[str] = None,
|
||||
finish_reason: Optional[str] = None,
|
||||
):
|
||||
"""Create a chat completion stream chunk from the provided text."""
|
||||
|
||||
logprob_response = None
|
||||
|
||||
if finish_reason:
|
||||
message = {}
|
||||
else:
|
||||
message = ChatCompletionMessage(
|
||||
role="assistant", content=unwrap(generation.get("text"), "")
|
||||
)
|
||||
|
||||
token_probs = unwrap(generation.get("token_probs"), {})
|
||||
if token_probs:
|
||||
logprobs = unwrap(generation.get("logprobs"), {})
|
||||
top_logprobs = [
|
||||
ChatCompletionLogprob(token=token, logprob=logprob)
|
||||
for token, logprob in logprobs.items()
|
||||
]
|
||||
|
||||
generated_token = next(iter(token_probs))
|
||||
token_prob_response = ChatCompletionLogprob(
|
||||
token=generated_token,
|
||||
logprob=token_probs[generated_token],
|
||||
top_logprobs=top_logprobs,
|
||||
)
|
||||
|
||||
logprob_response = ChatCompletionLogprobs(content=[token_prob_response])
|
||||
|
||||
# The finish reason can be None
|
||||
choice = ChatCompletionStreamChoice(
|
||||
finish_reason=finish_reason, delta=message, logprobs=logprob_response
|
||||
)
|
||||
|
||||
chunk = ChatCompletionStreamChunk(
|
||||
id=const_id, choices=[choice], model=unwrap(model_name, "")
|
||||
)
|
||||
|
||||
return chunk
|
||||
|
||||
|
||||
def format_prompt_with_template(data: ChatCompletionRequest):
|
||||
try:
|
||||
special_tokens_dict = model.container.get_special_tokens(
|
||||
unwrap(data.add_bos_token, True),
|
||||
unwrap(data.ban_eos_token, False),
|
||||
)
|
||||
|
||||
return get_prompt_from_template(
|
||||
data.messages,
|
||||
model.container.prompt_template,
|
||||
data.add_generation_prompt,
|
||||
special_tokens_dict,
|
||||
)
|
||||
except KeyError as exc:
|
||||
raise HTTPException(
|
||||
400,
|
||||
"Could not find a Conversation from prompt template "
|
||||
f"'{model.container.prompt_template.name}'. "
|
||||
"Check your spelling?",
|
||||
) from exc
|
||||
except TemplateError as exc:
|
||||
raise HTTPException(
|
||||
400,
|
||||
f"TemplateError: {str(exc)}",
|
||||
) from exc
|
||||
|
||||
|
||||
async def stream_generate_chat_completion(
|
||||
prompt: str, request: Request, data: ChatCompletionRequest, model_path: pathlib.Path
|
||||
):
|
||||
"""Generator for the generation process."""
|
||||
try:
|
||||
const_id = f"chatcmpl-{uuid4().hex}"
|
||||
|
||||
new_generation = model.container.generate_gen(prompt, **data.to_gen_params())
|
||||
for generation in new_generation:
|
||||
# Get out if the request gets disconnected
|
||||
if await request.is_disconnected():
|
||||
release_semaphore()
|
||||
logger.error("Chat completion generation cancelled by user.")
|
||||
return
|
||||
|
||||
response = _create_stream_chunk(const_id, generation, model_path.name)
|
||||
|
||||
yield response.model_dump_json()
|
||||
|
||||
# Yield a finish response on successful generation
|
||||
finish_response = _create_stream_chunk(const_id, finish_reason="stop")
|
||||
|
||||
yield finish_response.model_dump_json()
|
||||
except Exception:
|
||||
yield get_generator_error(
|
||||
"Chat completion aborted. Please check the server console."
|
||||
)
|
||||
|
||||
|
||||
async def generate_chat_completion(
|
||||
prompt: str, request: Request, data: ChatCompletionRequest, model_path: pathlib.Path
|
||||
):
|
||||
try:
|
||||
generation = await run_in_threadpool(
|
||||
model.container.generate,
|
||||
prompt,
|
||||
**data.to_gen_params(),
|
||||
)
|
||||
response = _create_response(generation, model_path.name)
|
||||
|
||||
return response
|
||||
except Exception as exc:
|
||||
error_message = handle_request_error(
|
||||
"Chat completion aborted. Maybe the model was unloaded? "
|
||||
"Please check the server console."
|
||||
).error.message
|
||||
|
||||
# Server error if there's a generation exception
|
||||
raise HTTPException(503, error_message) from exc
|
||||
@@ -1,17 +1,15 @@
|
||||
""" Utility functions for the OpenAI server. """
|
||||
"""Completion utilities for OAI server."""
|
||||
import pathlib
|
||||
from fastapi import HTTPException, Request
|
||||
from fastapi.concurrency import run_in_threadpool
|
||||
from loguru import logger
|
||||
from typing import Optional
|
||||
|
||||
from common.utils import unwrap
|
||||
from endpoints.OAI.types.chat_completion import (
|
||||
ChatCompletionLogprobs,
|
||||
ChatCompletionLogprob,
|
||||
ChatCompletionMessage,
|
||||
ChatCompletionRespChoice,
|
||||
ChatCompletionStreamChunk,
|
||||
ChatCompletionResponse,
|
||||
ChatCompletionStreamChoice,
|
||||
)
|
||||
from common import model
|
||||
from common.generators import release_semaphore
|
||||
from common.utils import get_generator_error, handle_request_error, unwrap
|
||||
from endpoints.OAI.types.completion import (
|
||||
CompletionRequest,
|
||||
CompletionResponse,
|
||||
CompletionRespChoice,
|
||||
CompletionLogProbs,
|
||||
@@ -19,7 +17,7 @@ from endpoints.OAI.types.completion import (
|
||||
from endpoints.OAI.types.common import UsageStats
|
||||
|
||||
|
||||
def create_completion_response(generation: dict, model_name: Optional[str]):
|
||||
def _create_response(generation: dict, model_name: Optional[str]):
|
||||
"""Create a completion response from the provided text."""
|
||||
|
||||
logprob_response = None
|
||||
@@ -58,97 +56,49 @@ def create_completion_response(generation: dict, model_name: Optional[str]):
|
||||
return response
|
||||
|
||||
|
||||
def create_chat_completion_response(generation: dict, model_name: Optional[str]):
|
||||
"""Create a chat completion response from the provided text."""
|
||||
|
||||
message = ChatCompletionMessage(
|
||||
role="assistant", content=unwrap(generation.get("text"), "")
|
||||
)
|
||||
|
||||
logprob_response = None
|
||||
|
||||
token_probs = unwrap(generation.get("token_probs"), {})
|
||||
if token_probs:
|
||||
logprobs = unwrap(generation.get("logprobs"), [])
|
||||
|
||||
collected_token_probs = []
|
||||
for index, token in enumerate(token_probs.keys()):
|
||||
top_logprobs = [
|
||||
ChatCompletionLogprob(token=token, logprob=logprob)
|
||||
for token, logprob in logprobs[index].items()
|
||||
]
|
||||
|
||||
collected_token_probs.append(
|
||||
ChatCompletionLogprob(
|
||||
token=token,
|
||||
logprob=token_probs[token],
|
||||
top_logprobs=top_logprobs,
|
||||
)
|
||||
)
|
||||
|
||||
logprob_response = ChatCompletionLogprobs(content=collected_token_probs)
|
||||
|
||||
choice = ChatCompletionRespChoice(
|
||||
finish_reason="Generated", message=message, logprobs=logprob_response
|
||||
)
|
||||
|
||||
prompt_tokens = unwrap(generation.get("prompt_tokens"), 0)
|
||||
completion_tokens = unwrap(generation.get("completion_tokens"), 0)
|
||||
|
||||
response = ChatCompletionResponse(
|
||||
choices=[choice],
|
||||
model=unwrap(model_name, ""),
|
||||
usage=UsageStats(
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=prompt_tokens + completion_tokens,
|
||||
),
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def create_chat_completion_stream_chunk(
|
||||
const_id: str,
|
||||
generation: Optional[dict] = None,
|
||||
model_name: Optional[str] = None,
|
||||
finish_reason: Optional[str] = None,
|
||||
async def stream_generate_completion(
|
||||
request: Request, data: CompletionRequest, model_path: pathlib.Path
|
||||
):
|
||||
"""Create a chat completion stream chunk from the provided text."""
|
||||
"""Streaming generation for completions."""
|
||||
|
||||
logprob_response = None
|
||||
try:
|
||||
new_generation = model.container.generate_gen(
|
||||
data.prompt, **data.to_gen_params()
|
||||
)
|
||||
for generation in new_generation:
|
||||
# Get out if the request gets disconnected
|
||||
if await request.is_disconnected():
|
||||
release_semaphore()
|
||||
logger.error("Completion generation cancelled by user.")
|
||||
return
|
||||
|
||||
if finish_reason:
|
||||
message = {}
|
||||
else:
|
||||
message = ChatCompletionMessage(
|
||||
role="assistant", content=unwrap(generation.get("text"), "")
|
||||
response = _create_response(generation, model_path.name)
|
||||
|
||||
yield response.model_dump_json()
|
||||
|
||||
# Yield a finish response on successful generation
|
||||
yield "[DONE]"
|
||||
except Exception:
|
||||
yield get_generator_error(
|
||||
"Completion aborted. Please check the server console."
|
||||
)
|
||||
|
||||
token_probs = unwrap(generation.get("token_probs"), {})
|
||||
if token_probs:
|
||||
logprobs = unwrap(generation.get("logprobs"), {})
|
||||
top_logprobs = [
|
||||
ChatCompletionLogprob(token=token, logprob=logprob)
|
||||
for token, logprob in logprobs.items()
|
||||
]
|
||||
|
||||
generated_token = next(iter(token_probs))
|
||||
token_prob_response = ChatCompletionLogprob(
|
||||
token=generated_token,
|
||||
logprob=token_probs[generated_token],
|
||||
top_logprobs=top_logprobs,
|
||||
)
|
||||
async def generate_completion(data: CompletionRequest, model_path: pathlib.Path):
|
||||
"""Non-streaming generate for completions"""
|
||||
|
||||
logprob_response = ChatCompletionLogprobs(content=[token_prob_response])
|
||||
try:
|
||||
generation = await run_in_threadpool(
|
||||
model.container.generate, data.prompt, **data.to_gen_params()
|
||||
)
|
||||
|
||||
# The finish reason can be None
|
||||
choice = ChatCompletionStreamChoice(
|
||||
finish_reason=finish_reason, delta=message, logprobs=logprob_response
|
||||
)
|
||||
response = _create_response(generation, model_path.name)
|
||||
return response
|
||||
except Exception as exc:
|
||||
error_message = handle_request_error(
|
||||
"Completion aborted. Maybe the model was unloaded? "
|
||||
"Please check the server console."
|
||||
).error.message
|
||||
|
||||
chunk = ChatCompletionStreamChunk(
|
||||
id=const_id, choices=[choice], model=unwrap(model_name, "")
|
||||
)
|
||||
|
||||
return chunk
|
||||
# Server error if there's a generation exception
|
||||
raise HTTPException(503, error_message) from exc
|
||||
|
||||
@@ -1,7 +1,19 @@
|
||||
import pathlib
|
||||
from asyncio import CancelledError
|
||||
from fastapi import Request
|
||||
from loguru import logger
|
||||
from typing import Optional
|
||||
|
||||
from endpoints.OAI.types.model import ModelCard, ModelList
|
||||
from common import model
|
||||
from common.generators import release_semaphore
|
||||
from common.utils import get_generator_error
|
||||
|
||||
from endpoints.OAI.types.model import (
|
||||
ModelCard,
|
||||
ModelList,
|
||||
ModelLoadRequest,
|
||||
ModelLoadResponse,
|
||||
)
|
||||
|
||||
|
||||
def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = None):
|
||||
@@ -20,3 +32,55 @@ def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = N
|
||||
model_card_list.data.append(model_card) # pylint: disable=no-member
|
||||
|
||||
return model_card_list
|
||||
|
||||
|
||||
async def stream_model_load(
|
||||
request: Request,
|
||||
data: ModelLoadRequest,
|
||||
model_path: pathlib.Path,
|
||||
draft_model_path: str,
|
||||
):
|
||||
"""Request generation wrapper for the loading process."""
|
||||
|
||||
# Set the draft model path if it exists
|
||||
load_data = data.model_dump()
|
||||
if draft_model_path:
|
||||
load_data["draft"]["draft_model_dir"] = draft_model_path
|
||||
|
||||
load_status = model.load_model_gen(model_path, **load_data)
|
||||
try:
|
||||
async for module, modules, model_type in load_status:
|
||||
if await request.is_disconnected():
|
||||
release_semaphore()
|
||||
logger.error(
|
||||
"Model load cancelled by user. "
|
||||
"Please make sure to run unload to free up resources."
|
||||
)
|
||||
return
|
||||
|
||||
if module != 0:
|
||||
response = ModelLoadResponse(
|
||||
model_type=model_type,
|
||||
module=module,
|
||||
modules=modules,
|
||||
status="processing",
|
||||
)
|
||||
|
||||
yield response.model_dump_json()
|
||||
|
||||
if module == modules:
|
||||
response = ModelLoadResponse(
|
||||
model_type=model_type,
|
||||
module=module,
|
||||
modules=modules,
|
||||
status="finished",
|
||||
)
|
||||
|
||||
yield response.model_dump_json()
|
||||
except CancelledError:
|
||||
logger.error(
|
||||
"Model load cancelled by user. "
|
||||
"Please make sure to run unload to free up resources."
|
||||
)
|
||||
except Exception as exc:
|
||||
yield get_generator_error(str(exc))
|
||||
|
||||
Reference in New Issue
Block a user