From 7ab7ffd5629ce0a4a5c563e9b495e25a3211018c Mon Sep 17 00:00:00 2001 From: DocShotgun <126566557+DocShotgun@users.noreply.github.com> Date: Sun, 26 May 2024 15:48:18 -0700 Subject: [PATCH] Tree: Format --- common/args.py | 2 +- endpoints/OAI/types/model.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/common/args.py b/common/args.py index 09c0620..14508a7 100644 --- a/common/args.py +++ b/common/args.py @@ -85,7 +85,7 @@ def add_model_args(parser: argparse.ArgumentParser): model_group.add_argument( "--cache-size", type=int, - help="The size of the prompt cache (in number of tokens) to allocate" + help="The size of the prompt cache (in number of tokens) to allocate", ) model_group.add_argument( "--rope-scale", type=float, help="Sets rope_scale or compress_pos_emb" diff --git a/endpoints/OAI/types/model.py b/endpoints/OAI/types/model.py index 9fdbf92..1a98c17 100644 --- a/endpoints/OAI/types/model.py +++ b/endpoints/OAI/types/model.py @@ -74,9 +74,7 @@ class ModelLoadRequest(BaseModel): examples=[4096], ) cache_size: Optional[int] = Field( - description=( - "Number in tokens, must be greater than or equal to max_seq_len" - ), + description=("Number in tokens, must be greater than or equal to max_seq_len"), default=None, examples=[4096], )