mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-05 05:50:12 +00:00
@@ -4196,6 +4196,9 @@ class SmolLM3Model(LlamaModel):
|
||||
chat_template = tokenizer.chat_template.replace("[:]", "")
|
||||
self.gguf_writer.add_chat_template(chat_template)
|
||||
|
||||
@Model.register("SeedOssForCausalLM")
|
||||
class SeedOssModel(Model):
|
||||
model_arch = gguf.MODEL_ARCH.SEED_OSS
|
||||
|
||||
@Model.register("Dots1ForCausalLM")
|
||||
class Dots1Model(Qwen2MoeModel):
|
||||
|
||||
@@ -250,6 +250,7 @@ class MODEL_ARCH(IntEnum):
|
||||
BAILINGMOE2 = auto()
|
||||
MINIMAXM2 = auto()
|
||||
SMOLLM3 = auto()
|
||||
SEED_OSS = auto()
|
||||
|
||||
class MODEL_TENSOR(IntEnum):
|
||||
TOKEN_EMBD = auto()
|
||||
@@ -398,6 +399,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
||||
MODEL_ARCH.BAILINGMOE2: "bailingmoe2",
|
||||
MODEL_ARCH.MINIMAXM2: "minimax-m2",
|
||||
MODEL_ARCH.SMOLLM3: "smollm3",
|
||||
MODEL_ARCH.SEED_OSS: "seed_oss",
|
||||
}
|
||||
|
||||
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
||||
@@ -1362,6 +1364,20 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
],
|
||||
MODEL_ARCH.SEED_OSS: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_K,
|
||||
MODEL_TENSOR.ATTN_V,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.ATTN_POST_NORM,
|
||||
MODEL_TENSOR.FFN_GATE,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
],
|
||||
# TODO
|
||||
}
|
||||
|
||||
@@ -1537,78 +1553,90 @@ class ExpertGatingFuncType(IntEnum):
|
||||
# from llama_ftype in llama.h
|
||||
# ALL VALUES SHOULD BE THE SAME HERE AS THEY ARE OVER THERE.
|
||||
class LlamaFileType(IntEnum):
|
||||
ALL_F32 = 0
|
||||
MOSTLY_F16 = 1 #except 1d tensors
|
||||
MOSTLY_Q4_0 = 2 #except 1d tensors
|
||||
MOSTLY_Q4_1 = 3 #except 1d tensors
|
||||
MOSTLY_Q4_1_SOME_F16 = 4 #tok_embeddings.weight and output.weight are F16
|
||||
MOSTLY_Q8_0 = 7 #except 1d tensors
|
||||
MOSTLY_Q5_0 = 8 #except 1d tensors
|
||||
MOSTLY_Q5_1 = 9 #except 1d tensors
|
||||
MOSTLY_Q2_K = 10 #except 1d tensors
|
||||
MOSTLY_Q3_K = 11 #except 1d tensors
|
||||
MOSTLY_Q4_K = 12 #except 1d tensors
|
||||
MOSTLY_Q5_K = 13 #except 1d tensors
|
||||
MOSTLY_Q6_K = 14 #except 1d tensors
|
||||
MOSTLY_IQ2_XXS = 15 #except 1d tensors
|
||||
MOSTLY_IQ2_XS = 16 #except 1d tensors
|
||||
MOSTLY_IQ3_XXS = 17 #except 1d tensors
|
||||
MOSTLY_IQ1_S = 18 #except 1d tensors
|
||||
MOSTLY_IQ4_NL = 19 #except 1d tensors
|
||||
MOSTLY_IQ3_S = 20 #except 1d tensors
|
||||
MOSTLY_IQ2_S = 21 #except 1d tensors
|
||||
MOSTLY_IQ4_XS = 22 #except 1d tensors
|
||||
MOSTLY_IQ1_M = 23 #except 1d tensors
|
||||
MOSTLY_BF16 = 24 #except 1d tensors
|
||||
MOSTLY_MXFP4 = 25 #except 1d tensors
|
||||
MOSTLY_Q4_0_4_4 = 26 #except 1d tensors
|
||||
MOSTLY_Q4_0_4_8 = 27 #except 1d tensors
|
||||
MOSTLY_Q4_0_8_8 = 28 #except 1d tensors
|
||||
MOSTLY_Q6_0 = 127 #except 1d tensors
|
||||
MOSTLY_IQ1_BN = 128 #except 1d tensors
|
||||
MOSTLY_IQ2_BN = 129 #except 1d tensors
|
||||
MOSTLY_IQ2_K = 130 #except 1d tensors
|
||||
MOSTLY_IQ3_K = 131 #except 1d tensors
|
||||
MOSTLY_IQ4_K = 132 #except 1d tensors
|
||||
MOSTLY_IQ5_K = 133 #except 1d tensors
|
||||
MOSTLY_IQ6_K = 134 #except 1d tensors
|
||||
MOSTLY_IQ4_KS = 137 #except 1d tensors
|
||||
MOSTLY_IQ2_KS = 138 #except 1d tensors
|
||||
MOSTLY_IQ4_KSS = 139 #except 1d tensors
|
||||
MOSTLY_Q8_KV = 140 #except 1d tensors
|
||||
MOSTLY_IQ5_KS = 141 #except 1d tensors
|
||||
MOSTLY_IQ2_KT = 142 #except 1d tensors
|
||||
MOSTLY_IQ3_KT = 143 #except 1d tensors
|
||||
MOSTLY_IQ4_KT = 144 #except 1d tensors
|
||||
MOSTLY_Q4_0_R8 = 202 #except 1d tensors
|
||||
MOSTLY_Q8_0_R8 = 207 #except 1d tensors
|
||||
MOSTLY_Q5_0_R4 = 208 #except 1d tensors
|
||||
MOSTLY_Q2_K_R4 = 210 #except 1d tensors
|
||||
MOSTLY_Q3_K_R4 = 211 #except 1d tensors
|
||||
MOSTLY_Q4_K_R4 = 212 #except 1d tensors
|
||||
MOSTLY_Q5_K_R4 = 213 #except 1d tensors
|
||||
MOSTLY_Q6_K_R4 = 214 #except 1d tensors
|
||||
MOSTLY_IQ2_XXS_R4 = 215 #except 1d tensors
|
||||
MOSTLY_IQ2_XS_R4 = 216 #except 1d tensors
|
||||
MOSTLY_IQ3_XXS_R4 = 217 #except 1d tensors
|
||||
MOSTLY_IQ1_S_R4 = 218 #except 1d tensors
|
||||
MOSTLY_IQ4_NL_R4 = 219 #except 1d tensors
|
||||
MOSTLY_IQ3_S_R4 = 220 #except 1d tensors
|
||||
MOSTLY_IQ2_S_R4 = 221 #except 1d tensors
|
||||
MOSTLY_IQ4_XS_R8 = 222 #except 1d tensors
|
||||
MOSTLY_IQ1_M_R4 = 223 #except 1d tensors
|
||||
MOSTLY_BF16_R16 = 224 #except 1d tensors
|
||||
MOSTLY_Q6_0_R4 = 227 #except 1d tensors
|
||||
MOSTLY_IQ2_BN_R4 = 329 #except 1d tensors
|
||||
MOSTLY_IQ2_K_R4 = 330 #except 1d tensors
|
||||
MOSTLY_IQ3_K_R4 = 331 #except 1d tensors
|
||||
MOSTLY_IQ4_K_R4 = 332 #except 1d tensors
|
||||
MOSTLY_IQ5_K_R4 = 333 #except 1d tensors
|
||||
MOSTLY_IQ4_KS_R4 = 337 #except 1d tensors
|
||||
MOSTLY_IQ5_KS_R4 = 341 #except 1d tensors
|
||||
MOSTLY_Q8_KV_R8 = 398 #except 1d tensors
|
||||
MOSTLY_Q8_K_R8 = 399 #except 1d tensors
|
||||
ALL_F32 = 0
|
||||
MOSTLY_F16 = 1 #except 1d tensors
|
||||
MOSTLY_Q4_0 = 2 #except 1d tensors
|
||||
MOSTLY_Q4_1 = 3 #except 1d tensors
|
||||
MOSTLY_Q8_0 = 7 #except 1d tensors
|
||||
MOSTLY_Q5_0 = 8 #except 1d tensors
|
||||
MOSTLY_Q5_1 = 9 #except 1d tensors
|
||||
MOSTLY_Q2_K = 10 #except 1d tensors
|
||||
MOSTLY_Q3_K_S = 11 #except 1d tensors
|
||||
MOSTLY_Q3_K_M = 12 #except 1d tensors
|
||||
MOSTLY_Q3_K_L = 13 #except 1d tensors
|
||||
MOSTLY_Q4_K_S = 14 #except 1d tensors
|
||||
MOSTLY_Q4_K_M = 15 #except 1d tensors
|
||||
MOSTLY_Q5_K_S = 16 #except 1d tensors
|
||||
MOSTLY_Q5_K_M = 17 #except 1d tensors
|
||||
MOSTLY_Q6_K = 18 #except 1d tensors
|
||||
MOSTLY_IQ2_XXS = 19 #except 1d tensors
|
||||
MOSTLY_IQ2_XS = 20 #except 1d tensors
|
||||
MOSTLY_Q2_K_S = 21 #except 1d tensors
|
||||
MOSTLY_IQ3_XS = 22 #except 1d tensors
|
||||
MOSTLY_IQ3_XXS = 23 #except 1d tensors
|
||||
MOSTLY_IQ1_S = 24 #except 1d tensors
|
||||
MOSTLY_IQ4_NL = 25 #except 1d tensors
|
||||
MOSTLY_IQ3_S = 26 #except 1d tensors
|
||||
MOSTLY_IQ3_M = 27 #except 1d tensors
|
||||
MOSTLY_IQ2_S = 28 #except 1d tensors
|
||||
MOSTLY_IQ2_M = 29 #except 1d tensors
|
||||
MOSTLY_IQ4_XS = 30 #except 1d tensors
|
||||
MOSTLY_IQ1_M = 31 #except 1d tensors
|
||||
MOSTLY_BF16 = 32 #except 1d tensors
|
||||
MOSTLY_Q4_0_4_4 = 33 #except 1d tensors
|
||||
MOSTLY_Q4_0_4_8 = 34 #except 1d tensors
|
||||
MOSTLY_Q4_0_8_8 = 35 #except 1d tensors
|
||||
MOSTLY_MXFP4 = 38 #except 1d tensors, 38 to be compatible with mainline
|
||||
|
||||
MOSTLY_Q6_0 = 135 #except 1d tensors
|
||||
MOSTLY_IQ1_BN = 136 #except 1d tensors
|
||||
MOSTLY_IQ2_BN = 137 #except 1d tensors
|
||||
MOSTLY_IQ2_K = 138 #except 1d tensors
|
||||
MOSTLY_IQ3_K = 139 #except 1d tensors
|
||||
MOSTLY_IQ4_K = 140 #except 1d tensors
|
||||
MOSTLY_IQ5_K = 141 #except 1d tensors
|
||||
MOSTLY_IQ6_K = 142 #except 1d tensors
|
||||
MOSTLY_IQ4_KS = 145 #except 1d tensors
|
||||
MOSTLY_IQ3_KL = 146 #except 1d tensors
|
||||
MOSTLY_IQ2_KS = 147 #except 1d tensors
|
||||
MOSTLY_IQ4_KSS = 148 #except 1d tensors
|
||||
MOSTLY_Q8_KV = 149 #except 1d tensors
|
||||
MOSTLY_IQ5_KS = 150 #except 1d tensors
|
||||
MOSTLY_IQ2_KT = 151 #except 1d tensors
|
||||
MOSTLY_IQ3_KT = 152 #except 1d tensors
|
||||
MOSTLY_IQ4_KT = 153 #except 1d tensors
|
||||
MOSTLY_IQ3_KS = 154 #except 1d tensors
|
||||
MOSTLY_IQ2_KL = 155 #except 1d tensors
|
||||
MOSTLY_IQ1_KT = 156 #except 1d tensors
|
||||
|
||||
MOSTLY_Q4_0_R8 = 202 #except 1d tensors
|
||||
MOSTLY_Q8_0_R8 = 207 #except 1d tensors
|
||||
MOSTLY_Q5_0_R4 = 208 #except 1d tensors
|
||||
MOSTLY_Q2_K_R4 = 210 #except 1d tensors
|
||||
MOSTLY_Q3_K_R4 = 211 #except 1d tensors
|
||||
MOSTLY_Q4_K_R4 = 214 #except 1d tensors
|
||||
MOSTLY_Q5_K_R4 = 216 #except 1d tensors
|
||||
MOSTLY_Q6_K_R4 = 218 #except 1d tensors
|
||||
MOSTLY_IQ2_XXS_R4 = 219 #except 1d tensors
|
||||
MOSTLY_IQ2_XS_R4 = 220 #except 1d tensors
|
||||
MOSTLY_IQ3_XXS_R4 = 223 #except 1d tensors
|
||||
MOSTLY_IQ1_S_R4 = 224 #except 1d tensors
|
||||
MOSTLY_IQ4_NL_R4 = 225 #except 1d tensors
|
||||
MOSTLY_IQ3_S_R4 = 226 #except 1d tensors
|
||||
MOSTLY_IQ2_M_R4 = 229 #except 1d tensors
|
||||
MOSTLY_IQ4_XS_R8 = 230 #except 1d tensors
|
||||
MOSTLY_IQ1_M_R4 = 231 #except 1d tensors
|
||||
MOSTLY_Q6_0_R4 = 335 #except 1d tensors
|
||||
MOSTLY_BF16_R16 = 232 #except 1d tensors
|
||||
MOSTLY_IQ2_BN_R4 = 337 #except 1d tensors
|
||||
MOSTLY_IQ2_K_R4 = 338 #except 1d tensors
|
||||
MOSTLY_IQ3_K_R4 = 339 #except 1d tensors
|
||||
MOSTLY_IQ4_K_R4 = 340 #except 1d tensors
|
||||
MOSTLY_IQ5_K_R4 = 341 #except 1d tensors
|
||||
MOSTLY_IQ4_KS_R4 = 345 #except 1d tensors
|
||||
MOSTLY_IQ5_KS_R4 = 350 #except 1d tensors
|
||||
MOSTLY_Q8_KV_R8 = 398 #except 1d tensors
|
||||
MOSTLY_Q8_K_R8 = 399 #except 1d tensors
|
||||
|
||||
GUESSED = 1024 # not specified in the model file
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
||||
{ LLM_ARCH_SMOLLM3, "smollm3" },
|
||||
{ LLM_ARCH_MISTRAL3, "mistral3" },
|
||||
{ LLM_ARCH_MIMO2, "mimo2" },
|
||||
{ LLM_ARCH_SEED_OSS, "seed_oss" },
|
||||
{ LLM_ARCH_UNKNOWN, "(unknown)" },
|
||||
};
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ enum llm_arch {
|
||||
LLM_ARCH_SMOLLM3,
|
||||
LLM_ARCH_MISTRAL3,
|
||||
LLM_ARCH_MIMO2,
|
||||
LLM_ARCH_SEED_OSS,
|
||||
LLM_ARCH_UNKNOWN,
|
||||
};
|
||||
|
||||
|
||||
@@ -3506,6 +3506,102 @@ ggml_cgraph * llm_build_context::build_stablelm() {
|
||||
return gf;
|
||||
}
|
||||
|
||||
ggml_cgraph * llm_build_context::build_seedoss() {
|
||||
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
|
||||
|
||||
const int64_t n_embd_head = hparams.n_embd_head_v;
|
||||
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
||||
GGML_ASSERT(n_embd_head == hparams.n_rot);
|
||||
|
||||
struct ggml_tensor * cur;
|
||||
struct ggml_tensor * inpL;
|
||||
|
||||
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
||||
|
||||
// inp_pos - contains the positions
|
||||
struct ggml_tensor * inp_pos = build_inp_pos();
|
||||
|
||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||
|
||||
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * inpSA = inpL;
|
||||
|
||||
cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, cb, il);
|
||||
cb(cur, "attn_norm", il);
|
||||
|
||||
// self-attention
|
||||
{
|
||||
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur, model.layers[il].wq, model.layers[il].bq,
|
||||
model.layers[il].wk, model.layers[il].bk,
|
||||
model.layers[il].wv, model.layers[il].bv, 0.f, il);
|
||||
|
||||
Qcur = ggml_rope_ext(
|
||||
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
|
||||
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||
ext_factor, attn_factor, beta_fast, beta_slow
|
||||
);
|
||||
cb(Qcur, "Qcur", il);
|
||||
|
||||
Kcur = ggml_rope_ext(
|
||||
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
|
||||
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||
ext_factor, attn_factor, beta_fast, beta_slow
|
||||
);
|
||||
cb(Kcur, "Kcur", il);
|
||||
|
||||
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
|
||||
model.layers[il].wo, model.layers[il].bo,
|
||||
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);
|
||||
}
|
||||
|
||||
if (il == n_layer - 1) {
|
||||
struct ggml_tensor * inp_out_ids = build_inp_out_ids();
|
||||
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
||||
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
||||
cb(ffn_inp, "ffn_inp", il);
|
||||
|
||||
// feed-forward forward
|
||||
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, cb, il);
|
||||
cb(cur, "attn_post_norm", il);
|
||||
cur = llm_build_ffn(ctx0, lctx, model.layers[il].attn_post_norm, ffn_inp,
|
||||
model.layers[il].ffn_up, NULL, NULL,
|
||||
model.layers[il].ffn_gate, NULL, NULL,
|
||||
model.layers[il].ffn_down, NULL, NULL,
|
||||
NULL,
|
||||
LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
|
||||
cb(cur, "ffn_out", il);
|
||||
|
||||
cur = ggml_add(ctx0, cur, ffn_inp);
|
||||
cb(cur, "ffn_out", il);
|
||||
|
||||
cur = lctx.cvec.apply_to(ctx0, cur, il);
|
||||
cb(cur, "l_out", il);
|
||||
|
||||
// input for next layer
|
||||
inpL = cur;
|
||||
}
|
||||
|
||||
cur = inpL;
|
||||
|
||||
cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM_RMS, cb, -1);
|
||||
cb(cur, "result_norm", -1);
|
||||
|
||||
// lm_head
|
||||
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
|
||||
cb(cur, "result_output", -1);
|
||||
|
||||
ggml_build_forward_expand(gf, cur);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
|
||||
ggml_cgraph * llm_build_context::build_qwen() {
|
||||
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
|
||||
|
||||
@@ -9299,6 +9395,10 @@ ggml_cgraph * llm_build_context::llama_build_graph(
|
||||
{
|
||||
result = llm.build_mimo2();
|
||||
} break;
|
||||
case LLM_ARCH_SEED_OSS:
|
||||
{
|
||||
result = llm.build_seedoss();
|
||||
} break;
|
||||
default:
|
||||
GGML_ABORT("fatal error");
|
||||
}
|
||||
|
||||
@@ -278,6 +278,8 @@ struct llm_build_context {
|
||||
|
||||
ggml_cgraph * build_mimo2();
|
||||
|
||||
ggml_cgraph * build_seedoss();
|
||||
|
||||
//
|
||||
static ggml_tensor * llm_build_lora_mm(llama_context & lctx, ggml_context * ctx0,
|
||||
ggml_tensor * w, ggml_tensor * cur);
|
||||
|
||||
@@ -1107,7 +1107,14 @@ void llm_load_hparams(
|
||||
}
|
||||
|
||||
} break;
|
||||
|
||||
case LLM_ARCH_SEED_OSS:
|
||||
{
|
||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||
switch (hparams.n_layer) {
|
||||
case 64: model.type = e_model::MODEL_36B; break;
|
||||
default: model.type = e_model::MODEL_UNKNOWN;
|
||||
}
|
||||
} break;
|
||||
default: (void)0;
|
||||
}
|
||||
|
||||
|
||||
@@ -139,6 +139,8 @@ struct create_tensors_helper : public create_tensors_helper_interface {
|
||||
|
||||
bool create_mimo2_tensors(const LLM_TN & tn);
|
||||
|
||||
bool create_seedoss_tensors(const LLM_TN & tn);
|
||||
|
||||
llama_model_loader & ml;
|
||||
llama_model & model;
|
||||
|
||||
@@ -981,6 +983,49 @@ bool create_tensors_helper::create_stablelm_tensors(const LLM_TN & tn) {
|
||||
return use_mmap_buffer;
|
||||
}
|
||||
|
||||
bool create_tensors_helper::create_seedoss_tensors(const LLM_TN & tn) {
|
||||
LOADING_PRELUDE
|
||||
|
||||
const int64_t n_qo_dim = n_head * n_embd_head_k;
|
||||
const int64_t n_kv_dim = n_head_kv * n_embd_head_k;
|
||||
|
||||
model.tok_embd = create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||
|
||||
// output
|
||||
{
|
||||
model.output_norm = create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
||||
model.output = create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
// if output is NULL, init from the input tok embed
|
||||
if (model.output == NULL) {
|
||||
model.output = create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
ggml_context * ctx_layer = ctx_for_layer(i);
|
||||
ggml_context * ctx_split = ctx_for_layer_split(i);
|
||||
|
||||
auto & layer = model.layers[i];
|
||||
|
||||
layer.attn_norm = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
||||
|
||||
layer.wq = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_qo_dim});
|
||||
layer.wk = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_kv_dim});
|
||||
layer.wv = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_kv_dim});
|
||||
layer.wo = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_qo_dim, n_embd});
|
||||
|
||||
// optional bias tensors
|
||||
layer.bq = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_qo_dim}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.bk = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_kv_dim}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.bv = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_kv_dim}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
|
||||
layer.attn_post_norm = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd});
|
||||
|
||||
create_std_ffn(i, tn, layer, n_ff, n_embd, ctx_split);
|
||||
}
|
||||
return use_mmap_buffer;
|
||||
}
|
||||
|
||||
bool create_tensors_helper::create_qwen_tensors(const LLM_TN & tn) {
|
||||
LOADING_PRELUDE
|
||||
create_embd_output(tn, n_embd, n_vocab);
|
||||
@@ -3058,6 +3103,8 @@ bool create_tensors_helper::create_tensors() {
|
||||
use_mmap_buffer = create_smollm3_tensors(tn); break;
|
||||
case LLM_ARCH_MIMO2:
|
||||
use_mmap_buffer = create_mimo2_tensors(tn); break;
|
||||
case LLM_ARCH_SEED_OSS:
|
||||
use_mmap_buffer = create_seedoss_tensors(tn); break;
|
||||
default:
|
||||
throw std::runtime_error("unknown architecture");
|
||||
}
|
||||
|
||||
@@ -1317,6 +1317,23 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
|
||||
{ LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
|
||||
},
|
||||
},
|
||||
{
|
||||
LLM_ARCH_SEED_OSS,
|
||||
{
|
||||
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||
{ LLM_TENSOR_OUTPUT, "output" },
|
||||
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||
{ LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
|
||||
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||
},
|
||||
},
|
||||
{
|
||||
LLM_ARCH_UNKNOWN,
|
||||
{
|
||||
|
||||
@@ -224,6 +224,7 @@ enum llm_chat_template {
|
||||
LLM_CHAT_TEMPLATE_BAILING,
|
||||
LLM_CHAT_TEMPLATE_BAILING_THINK,
|
||||
LLM_CHAT_TEMPLATE_BAILING2,
|
||||
LLM_CHAT_TEMPLATE_SEED_OSS,
|
||||
LLM_CHAT_TEMPLATE_UNKNOWN,
|
||||
};
|
||||
|
||||
@@ -269,6 +270,7 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
|
||||
{ "bailing", LLM_CHAT_TEMPLATE_BAILING },
|
||||
{ "bailing-think", LLM_CHAT_TEMPLATE_BAILING_THINK },
|
||||
{ "bailing2", LLM_CHAT_TEMPLATE_BAILING2 },
|
||||
{ "seed_oss", LLM_CHAT_TEMPLATE_SEED_OSS },
|
||||
|
||||
};
|
||||
|
||||
@@ -5047,6 +5049,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
||||
case LLM_ARCH_BAILINGMOE2:
|
||||
case LLM_ARCH_MINIMAX_M2:
|
||||
case LLM_ARCH_MIMO2:
|
||||
case LLM_ARCH_SEED_OSS:
|
||||
return LLAMA_ROPE_TYPE_NEOX;
|
||||
|
||||
case LLM_ARCH_QWEN2VL:
|
||||
@@ -7004,6 +7007,8 @@ static llm_chat_template llama_chat_detect_template(const std::string & tmpl) {
|
||||
return LLM_CHAT_TEMPLATE_GROK_2;
|
||||
} else if (tmpl_contains("<|start|>") && tmpl_contains("<|channel|>")) {
|
||||
return LLM_CHAT_TEMPLATE_OPENAI_MOE;
|
||||
} else if (tmpl_contains("<seed:bos>")) {
|
||||
return LLM_CHAT_TEMPLATE_SEED_OSS;
|
||||
}
|
||||
return LLM_CHAT_TEMPLATE_UNKNOWN;
|
||||
}
|
||||
@@ -7533,6 +7538,14 @@ static int32_t llama_chat_apply_template_internal(
|
||||
if (add_ass) {
|
||||
ss << "Assistant:";
|
||||
}
|
||||
} else if (tmpl == LLM_CHAT_TEMPLATE_SEED_OSS) {
|
||||
for (auto message: chat) {
|
||||
std::string role(message->role);
|
||||
ss << "<seed:bos>" << role << "\n" << (role == "assistant" ? trim(message->content) : message->content) << "<seed:eos>";
|
||||
}
|
||||
if (add_ass) {
|
||||
ss << "<seed:bos>assistant\n";
|
||||
}
|
||||
} else {
|
||||
// template not supported
|
||||
return -1;
|
||||
|
||||
Reference in New Issue
Block a user