gpt-oss: duplicate experts biases when necessary (#829)

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-10-14 14:38:40 +03:00
committed by GitHub
parent 2b71974af9
commit ba9fefb73d
4 changed files with 47 additions and 18 deletions

View File

@@ -1862,7 +1862,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
std::vector<uint32_t> unique_ids;
ggml_tensor * last_ids_tensor = nullptr;
for (int i = 0; i < sched->n_splits; i++) {
#if IK_PRINT_TIMING
int64_t tim1 = ggml_time_us();
@@ -1872,7 +1871,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
ggml_backend_t split_backend = sched->backends[split_backend_id];
ggml_backend_t last_input_backend = nullptr;
int cur_arg = 0;
// copy the input tensors to the split backend
for (int j = 0; j < split->n_inputs; j++) {
@@ -1900,7 +1898,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
if (sched->only_active_experts && split->graph.n_nodes > 0 &&
ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
ggml_backend_buffer_is_host(input->buffer) &&
node->src[cur_arg] == input_cpy &&
(node->op == GGML_OP_MUL_MAT_ID || node->op == GGML_OP_MOE_FUSED_UP_GATE)) {
if (input_backend != last_input_backend) {
@@ -1922,7 +1919,7 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
}
}
int n_expert = input->ne[2];
int n_expert = node->src[0]->ne[2];
if (ids_tensor != last_ids_tensor) {
ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t));
@@ -1943,12 +1940,15 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
last_ids_tensor = ids_tensor;
}
const size_t expert_size = input->ne[2] > 1 ? input->nb[2] : input->nb[1];
if (input->ne[2] > 1) {
auto copy_experts = [&](int32_t first_id, int32_t last_id) {
const size_t expert_size = (node->op == GGML_OP_MUL_MAT_ID || node->op == GGML_OP_MOE_FUSED_UP_GATE) ? input->nb[2] : input->nb[1];
const size_t expert_offset = first_id * expert_size;
const size_t expert_size_copy = (last_id - first_id + 1) * expert_size;
const size_t padding = 512;
const size_t padding_end = last_id < input->ne[2] - 1 ? std::min<size_t>(expert_size, padding) : 0;
const size_t padding_end = last_id < n_expert - 1 ? std::min<size_t>(expert_size, padding) : 0;
ggml_backend_tensor_set_async(split_backend,
input_cpy,
@@ -1974,7 +1974,11 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
first_id = next_on_id(last_id);
}
if (node->op == GGML_OP_MOE_FUSED_UP_GATE) ++cur_arg;
} else {
auto copy_size = ggml_nbytes(input);
ggml_backend_tensor_set_async(split_backend, input_cpy, input->data, 0, copy_size);
}
} else
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface

View File

@@ -8151,12 +8151,16 @@ ggml_cgraph * llm_build_context::build_openai_moe() {
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_post_norm, nullptr, LLM_NORM_RMS, cb, il);
cb(cur, "attn_post_norm", il);
bool use_dup_bias = cur->ne[1] < 32 && model.layers[il].ffn_up_exps_b_dup &&
model.layers[il].ffn_gate_exps_b_dup &&
model.layers[il].ffn_down_exps_b_dup;
// MoE branch
cur = llm_build_moe_ffn(ctx0, lctx, cur,
model.layers[il].ffn_gate_inp, model.layers[il].ffn_gate_inp_b,
model.layers[il].ffn_up_exps, model.layers[il].ffn_up_exps_b,
model.layers[il].ffn_gate_exps, model.layers[il].ffn_gate_exps_b,
model.layers[il].ffn_down_exps, model.layers[il].ffn_down_exps_b,
model.layers[il].ffn_up_exps, use_dup_bias ? model.layers[il].ffn_up_exps_b_dup : model.layers[il].ffn_up_exps_b,
model.layers[il].ffn_gate_exps, use_dup_bias ? model.layers[il].ffn_gate_exps_b_dup : model.layers[il].ffn_gate_exps_b,
model.layers[il].ffn_down_exps, use_dup_bias ? model.layers[il].ffn_down_exps_b_dup : model.layers[il].ffn_down_exps_b,
nullptr,
n_expert, n_expert_used,
LLM_FFN_SWIGLU_OAI_MOE, false,

View File

@@ -127,7 +127,8 @@ struct create_tensors_helper : public create_tensors_helper_interface {
llama_model_loader & ml;
llama_model & model;
ggml_tensor * create_tensor(ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, int flags = 0);
ggml_tensor * create_tensor(ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, int flags = 0,
ggml_context ** actual_ctx = nullptr);
void create_default_embd_output(const LLM_TN & tn, int n_embd, int n_vocab, bool norm_bias);
void create_embd_output(const LLM_TN & tn, int n_embd, int n_vocab, bool has_norm = true);
@@ -198,7 +199,8 @@ create_tensors_helper::create_tensors_helper(llama_model_loader & _ml, llama_mod
}
}
ggml_tensor * create_tensors_helper::create_tensor(ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, int flags) {
ggml_tensor * create_tensors_helper::create_tensor(ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne,
int flags, ggml_context ** actual_context) {
if (ml.tensor_buft_overrides) {
for (const auto * overrides = ml.tensor_buft_overrides; overrides->pattern != nullptr; ++overrides) {
std::regex pattern(overrides->pattern);
@@ -209,6 +211,7 @@ ggml_tensor * create_tensors_helper::create_tensor(ggml_context * ctx, const std
}
}
}
if (actual_context) *actual_context = ctx;
return ml.create_tensor(ctx, name, ne, flags);
}
@@ -2311,10 +2314,11 @@ bool create_tensors_helper::create_openai_moe_tensors(const LLM_TN & tn) {
layer.attn_sinks = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_SINKS, "weight", i), {n_head}, 0);
ggml_context *ctx_ffn_gate, *ctx_ffn_up, *ctx_ffn_down;
layer.ffn_gate_inp = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert}, 0);
layer.ffn_gate_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
layer.ffn_down_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
layer.ffn_up_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
layer.ffn_gate_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0, &ctx_ffn_gate);
layer.ffn_down_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0, &ctx_ffn_down);
layer.ffn_up_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0, &ctx_ffn_up);
// bias
layer.bq = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_head * n_rot}, 0);
@@ -2322,10 +2326,24 @@ bool create_tensors_helper::create_openai_moe_tensors(const LLM_TN & tn) {
layer.bv = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_head_kv * n_rot}, 0);
layer.bo = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
ggml_context *ctx_ffn_gate_b, *ctx_ffn_up_b, *ctx_ffn_down_b;
layer.ffn_gate_inp_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "bias", i), {n_expert}, 0);
layer.ffn_gate_exps_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert}, 0);
layer.ffn_down_exps_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), { n_embd, n_expert}, 0);
layer.ffn_up_exps_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP_EXPS, "bias", i), {n_ff_exp, n_expert}, 0);
layer.ffn_gate_exps_b = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert}, 0, &ctx_ffn_gate_b);
layer.ffn_down_exps_b = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), { n_embd, n_expert}, 0, &ctx_ffn_down_b);
layer.ffn_up_exps_b = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "bias", i), {n_ff_exp, n_expert}, 0, &ctx_ffn_up_b);
if (ctx_ffn_gate_b != ctx_ffn_gate) {
layer.ffn_gate_exps_b_dup = create_tensor(ctx_ffn_gate, tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert},
llama_model_loader::TENSOR_DUPLICATED);
}
if (ctx_ffn_up_b != ctx_ffn_up) {
layer.ffn_up_exps_b_dup = create_tensor(ctx_ffn_up, tn(LLM_TENSOR_FFN_UP_EXPS, "bias", i), {n_ff_exp, n_expert},
llama_model_loader::TENSOR_DUPLICATED);
}
if (ctx_ffn_down_b != ctx_ffn_down) {
layer.ffn_down_exps_b_dup = create_tensor(ctx_ffn_down, tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), { n_embd, n_expert},
llama_model_loader::TENSOR_DUPLICATED);
}
}
return use_mmap_buffer;
}

View File

@@ -178,6 +178,9 @@ struct llama_layer {
struct ggml_tensor * ffn_gate_exps_b = nullptr;
struct ggml_tensor * ffn_down_exps_b = nullptr;
struct ggml_tensor * ffn_up_exps_b = nullptr;
struct ggml_tensor * ffn_gate_exps_b_dup = nullptr;
struct ggml_tensor * ffn_down_exps_b_dup = nullptr;
struct ggml_tensor * ffn_up_exps_b_dup = nullptr;
// ff shared expert (shexp)
struct ggml_tensor * ffn_gate_inp_shexp = nullptr;