mirror of
https://github.com/theroyallab/tabbyAPI.git
synced 2026-03-14 15:57:27 +00:00
OAI: Fix model parameter placement
Accidentally edited the Model Card parameters vs the model load request ones. Signed-off-by: kingbri <bdashore3@proton.me>
This commit is contained in:
@@ -6,7 +6,7 @@ from gen_logging import LogConfig
|
||||
class ModelCardParameters(BaseModel):
|
||||
max_seq_len: Optional[int] = 4096
|
||||
rope_scale: Optional[float] = 1.0
|
||||
rope_alpha: Optional[float] = None
|
||||
rope_alpha: Optional[float] = 1.0
|
||||
prompt_template: Optional[str] = None
|
||||
cache_mode: Optional[str] = "FP16"
|
||||
draft: Optional['ModelCard'] = None
|
||||
@@ -35,7 +35,7 @@ class ModelLoadRequest(BaseModel):
|
||||
gpu_split_auto: Optional[bool] = True
|
||||
gpu_split: Optional[List[float]] = Field(default_factory=list)
|
||||
rope_scale: Optional[float] = 1.0
|
||||
rope_alpha: Optional[float] = 1.0
|
||||
rope_alpha: Optional[float] = None
|
||||
no_flash_attention: Optional[bool] = False
|
||||
# low_mem: Optional[bool] = False
|
||||
cache_mode: Optional[str] = "FP16"
|
||||
|
||||
Reference in New Issue
Block a user