Builds successfully

This commit is contained in:
Iwan Kawrakow
2025-08-13 14:31:48 +03:00
parent 6b6d25bfbf
commit 949b686412
10 changed files with 3148 additions and 1863 deletions

View File

@@ -70,50 +70,52 @@ extern "C" {
typedef int32_t llama_seq_id;
enum llama_vocab_type {
LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization
LLAMA_VOCAB_TYPE_PLAMO2 = 6, // PLaMo-2 tokenizer based on Aho-Corasick with dynamic programming
};
// pre-tokenization types
enum llama_vocab_pre_type {
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
LLAMA_VOCAB_PRE_TYPE_MPT = 5,
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
LLAMA_VOCAB_PRE_TYPE_PORO = 15,
LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, //llama.cpp lists this as 28
LLAMA_VOCAB_PRE_TYPE_GPT4O = 29,
LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30,
LLAMA_VOCAB_PRE_TYPE_TRILLION = 31,
LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
LLAMA_VOCAB_PRE_TYPE_FALCON_3 = 34,
LLAMA_VOCAB_PRE_TYPE_FALCON_E = 35,
LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 36, //llama.cpp lists this as 35
LLAMA_VOCAB_PRE_TYPE_HUNYUAN = 37, //llama.cpp lists this as 36
LLAMA_VOCAB_PRE_TYPE_KIMI_K2 = 38, //llama.cpp lists this as 37
};
//enum llama_vocab_pre_type {
// LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
// LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
// LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
// LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
// LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
// LLAMA_VOCAB_PRE_TYPE_MPT = 5,
// LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
// LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
// LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
// LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
// LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
// LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
// LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
// LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
// LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
// LLAMA_VOCAB_PRE_TYPE_PORO = 15,
// LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
// LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
// LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
// LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
// LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
// LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
// LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
// LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, //llama.cpp lists this as 28
// LLAMA_VOCAB_PRE_TYPE_GPT4O = 29,
// LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30,
// LLAMA_VOCAB_PRE_TYPE_TRILLION = 31,
// LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
// LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
// LLAMA_VOCAB_PRE_TYPE_FALCON_3 = 34,
// LLAMA_VOCAB_PRE_TYPE_FALCON_E = 35,
// LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 36, //llama.cpp lists this as 35
// LLAMA_VOCAB_PRE_TYPE_HUNYUAN = 37, //llama.cpp lists this as 36
// LLAMA_VOCAB_PRE_TYPE_KIMI_K2 = 38, //llama.cpp lists this as 37
//};
// note: these values should be synchronized with ggml_rope
// TODO: maybe move this enum to ggml.h (ggml_rope_type)