mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-04-27 18:01:45 +00:00
Be able to set reduce op data type for split mode "graph" (#1087)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -697,7 +697,7 @@ ggml_tensor * llm_build_context::llm_build_ffn(
|
||||
// GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
|
||||
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
|
||||
}
|
||||
if (cur->ne[1] >= 32) {
|
||||
if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) {
|
||||
cur = ggml_cast(ctx, cur, GGML_TYPE_F16);
|
||||
}
|
||||
if (graph) {
|
||||
@@ -1185,7 +1185,7 @@ llm_expert_gating_func_type gating_op,
|
||||
split_down_shexp->splits[id], split_down_b_shexp ? split_down_b_shexp->splits[id] : nullptr, nullptr,
|
||||
nullptr, type_op_shexp, LLM_FFN_PAR, cb, il);
|
||||
cb(shared_out, "ffn_shexp_out", il_cb);
|
||||
if (shared_out->ne[1] > 32) {
|
||||
if (shared_out->ne[1] > 32 && lctx.cparams.split_mode_f16) {
|
||||
shared_out = ggml_cast(ctx, shared_out, GGML_TYPE_F16);
|
||||
}
|
||||
results.push_back(shared_out);
|
||||
@@ -1202,7 +1202,7 @@ llm_expert_gating_func_type gating_op,
|
||||
cb(cur, "ffn_shared_combined", il);
|
||||
}
|
||||
}
|
||||
if (routed_out->ne[1] > 32) {
|
||||
if (routed_out->ne[1] > 32 && lctx.cparams.split_mode_f16) {
|
||||
auto routed_out_f16 = ggml_cast(ctx, routed_out, GGML_TYPE_F16);
|
||||
cur = ggml_add(ctx, routed_out_f16, cur);
|
||||
} else {
|
||||
@@ -1279,7 +1279,7 @@ llm_expert_gating_func_type gating_op,
|
||||
} else {
|
||||
cur = routed_out;
|
||||
}
|
||||
if (cur->ne[1] >= 32) {
|
||||
if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) {
|
||||
cur = ggml_cast(ctx, cur, GGML_TYPE_F16);
|
||||
cb(cur, "ffn_out_f16", il_cb);
|
||||
}
|
||||
@@ -9513,7 +9513,7 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
|
||||
cur = ggml_add(ctx0, cur, bo->splits[id]);
|
||||
cb(cur, "kqv_wo_biased", il_cb);
|
||||
}
|
||||
if (cur->ne[1] >= 32) {
|
||||
if (cur->ne[1] > 32 && lctx.cparams.split_mode_f16) {
|
||||
cur = ggml_cast(ctx0, cur, GGML_TYPE_F16);
|
||||
}
|
||||
ggml_build_forward_expand(gf, cur);
|
||||
|
||||
@@ -41,6 +41,7 @@ struct llama_cparams {
|
||||
bool graph_reuse;
|
||||
bool k_cache_hadamard;
|
||||
bool split_mode_graph_scheduling;
|
||||
bool split_mode_f16;
|
||||
int min_experts;
|
||||
float thresh_experts;
|
||||
|
||||
|
||||
@@ -2265,7 +2265,6 @@ bool create_tensors_helper::create_cohere2_tensors(const LLM_TN & tn) {
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
auto & layer = model.layers[i];
|
||||
ggml_context * ctx_split = ctx_for_layer_split(i);
|
||||
ggml_context * ctx_layer = ctx_for_layer(i);
|
||||
|
||||
layer.attn_norm = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
|
||||
|
||||
|
||||
@@ -4055,6 +4055,7 @@ struct llama_context_params llama_context_default_params() {
|
||||
/*.only_active_experts =*/ false,
|
||||
/*.k_cache_hadamard =*/ false,
|
||||
/*.split_mode_graph_scheduling =*/ false,
|
||||
/*.split_mode_f16 =*/ true,
|
||||
/*.abort_callback =*/ nullptr,
|
||||
/*.abort_callback_data =*/ nullptr,
|
||||
/*.offload_policy =*/ nullptr,
|
||||
@@ -4344,6 +4345,7 @@ struct llama_context * llama_new_context_with_model(
|
||||
cparams.graph_reuse = params.graph_reuse;
|
||||
cparams.k_cache_hadamard = params.k_cache_hadamard;
|
||||
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
|
||||
cparams.split_mode_f16 = params.split_mode_f16;
|
||||
cparams.min_experts = params.min_experts;
|
||||
cparams.thresh_experts = params.thresh_experts;
|
||||
cparams.cuda_params = params.cuda_params;
|
||||
@@ -4433,6 +4435,7 @@ struct llama_context * llama_new_context_with_model(
|
||||
LLAMA_LOG_INFO("%s: graph_reuse = %d\n", __func__, cparams.graph_reuse);
|
||||
LLAMA_LOG_INFO("%s: k_cache_hadam = %d\n", __func__, cparams.k_cache_hadamard);
|
||||
LLAMA_LOG_INFO("%s: split_mode_graph_scheduling = %d\n", __func__, cparams.split_mode_graph_scheduling);
|
||||
LLAMA_LOG_INFO("%s: split_mode_f16= %d\n", __func__, cparams.split_mode_f16);
|
||||
LLAMA_LOG_INFO("%s: ser = %d, %g\n", __func__, cparams.min_experts, cparams.thresh_experts);
|
||||
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
|
||||
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
|
||||
|
||||
Reference in New Issue
Block a user