mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Graph parallel for Mimo-V2-Flash (#1105)
* WIP * Cleanup * Set max_gpu to 2 for Mimo2 --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -1170,7 +1170,7 @@ endif()
|
||||
set(CUDA_CXX_FLAGS "")
|
||||
|
||||
if (GGML_CUDA)
|
||||
set(CUDA_FLAGS -use_fast_math -extended-lambda)
|
||||
set(CUDA_FLAGS -use_fast_math -extended-lambda -lineinfo)
|
||||
|
||||
if (GGML_FATAL_WARNINGS)
|
||||
list(APPEND CUDA_FLAGS -Werror all-warnings)
|
||||
|
||||
@@ -1394,13 +1394,6 @@ static ggml_tensor * llm_build_kqv(
|
||||
|
||||
auto kq_size = k->ne[1]*q->ne[1]*q->ne[2]*sizeof(float)/(1024*1024);
|
||||
if (cparams.attn_max_batch == 0 || cparams.attn_max_batch >= kq_size || k->ne[2] != q->ne[2] || v->ne[2] != q->ne[2] || sinks) {
|
||||
//if (n_swa > 0 && k->ne[1] > n_swa + q->ne[1]) {
|
||||
// auto nton = n_swa + q->ne[1];
|
||||
// auto first = k->ne[1] - nton;
|
||||
// k = ggml_view_3d(ctx, k, k->ne[0], nton, k->ne[2], k->nb[1], k->nb[2], k->nb[1]*first);
|
||||
// v = ggml_view_3d(ctx, v, v->ne[0], nton, v->ne[2], v->nb[1], v->nb[2], v->nb[1]*first);
|
||||
// kq_mask = ggml_view_3d(ctx, kq_mask, nton, kq_mask->ne[1], kq_mask->ne[2], kq_mask->nb[1], kq_mask->nb[2], kq_mask->nb[0]*first);
|
||||
//}
|
||||
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
||||
cb(kq, "kq", il);
|
||||
|
||||
@@ -9433,7 +9426,6 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
|
||||
if (!model.layers[il].wqkv && !model.layers[il].wqk && cparams.flash_attn &&
|
||||
model.layers[il].wq->extra && model.layers[il].wk->extra && model.layers[il].wv->extra && model.layers[il].wo->extra) {
|
||||
if (kv_self.k_l[il]->extra && kv_self.v_l[il]->extra) {
|
||||
//printf("%s: %s\n", __func__, ggml_op_name(input->op));
|
||||
ggml_split_tensor_t * attn_norm = the_attn_norm ? (ggml_split_tensor_t *)the_attn_norm->extra : nullptr;
|
||||
auto wq = (ggml_split_tensor_t *)model.layers[il].wq->extra;
|
||||
auto wk = (ggml_split_tensor_t *)model.layers[il].wk->extra;
|
||||
@@ -9481,11 +9473,6 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
|
||||
cur = llm_build_norm(ctx0, cur, lctx.model.hparams, attn_norm->splits[id], NULL, LLM_NORM_RMS, cb, il);
|
||||
}
|
||||
}
|
||||
//if (attn_norm) {
|
||||
// auto split_norm = attn_norm->splits[id];
|
||||
// cur = llm_build_norm(ctx0, cur, hparams, split_norm, NULL, is_norm ? LLM_NORM : LLM_NORM_RMS, cb, il);
|
||||
// cb(cur, "attn_norm", il_cb);
|
||||
//}
|
||||
if (cur->type != GGML_TYPE_F32) {
|
||||
cur = ggml_cast(ctx0, cur, GGML_TYPE_F32);
|
||||
}
|
||||
@@ -9583,15 +9570,21 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
|
||||
cur = ggml_flash_attn_ext(ctx0, q, k, v, KQ_mask, KQ_scale, hparams.f_max_alibi_bias,
|
||||
hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
|
||||
cb(cur, "flash_attn", il_cb);
|
||||
ggml_flash_attn_ext_add_sinks(cur, sinks);
|
||||
if (model.layers[il].attn_sinks && model.layers[il].attn_sinks->extra) {
|
||||
auto split = (ggml_split_tensor_t *)model.layers[il].attn_sinks->extra;
|
||||
GGML_ASSERT(split->n_device == wq->n_device);
|
||||
GGML_ASSERT(split->splits[id]);
|
||||
ggml_flash_attn_ext_add_sinks(cur, split->splits[id]);
|
||||
} else {
|
||||
ggml_flash_attn_ext_add_sinks(cur, sinks);
|
||||
}
|
||||
if (n_swa > 0) {
|
||||
((int32_t *)cur->op_params)[4] = n_swa;
|
||||
}
|
||||
|
||||
// Some models produced NaNs/gibberish when FA is computed with f16 precision on CUDA
|
||||
if (use_f32_precision || model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX ||
|
||||
(model.arch == LLM_ARCH_DEEPSEEK2 && q->ne[1] <= 8) || model.arch == LLM_ARCH_COHERE2 || model.arch == LLM_ARCH_GLM4 ||
|
||||
model.arch == LLM_ARCH_GLM4_MOE) {
|
||||
(model.arch == LLM_ARCH_DEEPSEEK2 && q->ne[1] <= 8) || model.arch == LLM_ARCH_COHERE2 || model.arch == LLM_ARCH_GLM4 ||
|
||||
model.arch == LLM_ARCH_GLM4_MOE) {
|
||||
ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
|
||||
}
|
||||
|
||||
|
||||
@@ -3043,48 +3043,51 @@ bool create_tensors_helper::create_tensors() {
|
||||
if (layer.attn_norm) {
|
||||
auto split = create_split(ggml_nrows(layer.attn_norm), -1, cur_splits, mem_used);
|
||||
prepare_split_tensors(-1, ctx_split, layer.attn_norm, layer.split_attn_norm, split, mem_used);
|
||||
if (layer.attn_sinks) {
|
||||
prepare_split_tensors(-1, ctx_split, layer.attn_sinks, layer.split_attn_sinks, split, mem_used);
|
||||
}
|
||||
}
|
||||
if (layer.rope_freqs) {
|
||||
auto split = create_split(ggml_nrows(layer.rope_freqs), -1, cur_splits, mem_used);
|
||||
prepare_split_tensors(-1, ctx_split, layer.rope_freqs, layer.split_rope_freqs, split, mem_used);
|
||||
}
|
||||
if (layer.wo && layer.wq && layer.wk && layer.wv) {
|
||||
// TODO: fix this logic. It only works whe K and V head size is the same
|
||||
//printf("Layer %d: q = %ld x %ld, k = %ld x %ld, v = %ld x %ld, qo = %ld x %ld\n", il, layer.wq->ne[0], layer.wq->ne[1],
|
||||
// layer.wk->ne[0], layer.wk->ne[1], layer.wv->ne[0], layer.wv->ne[1], layer.wo->ne[0], layer.wo->ne[1]);
|
||||
int attn_granularity = hparams.n_embd_head_v * gqa_ratio;
|
||||
auto granularity_kq = hparams.n_embd_head_k * gqa_ratio;
|
||||
auto granularity_vo = hparams.n_embd_head_v * gqa_ratio;
|
||||
if (ggml_is_quantized(layer.wo->type)) {
|
||||
auto tt = ggml_internal_get_type_traits(layer.wo->type);
|
||||
if (tt.blck_size > attn_granularity) attn_granularity = tt.blck_size;
|
||||
if (tt.blck_size > granularity_vo) granularity_vo = tt.blck_size;
|
||||
GGML_ASSERT(granularity_vo % hparams.n_embd_head_v == 0);
|
||||
}
|
||||
GGML_ASSERT(attn_granularity % hparams.n_embd_head_v == 0);
|
||||
auto split = create_split(layer.wo->ne[0], attn_granularity, cur_splits, mem_used);
|
||||
//printf("Split:"); for (auto s : split) printf(" %d", s); printf("\n");
|
||||
prepare_split_tensors(0, ctx_split, layer.wo, layer.split_wo, split, mem_used);
|
||||
prepare_split_tensors(1, ctx_split, layer.wq, layer.split_wq, split, mem_used);
|
||||
auto split_vo = create_split(layer.wo->ne[0], granularity_vo, cur_splits, mem_used);
|
||||
auto split_kq = create_split(layer.wq->ne[1], granularity_kq, cur_splits, mem_used);
|
||||
prepare_split_tensors(0, ctx_split, layer.wo, layer.split_wo, split_vo, mem_used);
|
||||
prepare_split_tensors(1, ctx_split, layer.wq, layer.split_wq, split_kq, mem_used);
|
||||
if (layer.bo) {
|
||||
prepare_split_tensors(-1, ctx_split, layer.bo, layer.split_bo, split, mem_used);
|
||||
prepare_split_tensors(-1, ctx_split, layer.bo, layer.split_bo, split_vo, mem_used);
|
||||
}
|
||||
if (layer.bq) {
|
||||
prepare_split_tensors(0, ctx_split, layer.bq, layer.split_bq, split, mem_used);
|
||||
prepare_split_tensors(0, ctx_split, layer.bq, layer.split_bq, split_kq, mem_used);
|
||||
}
|
||||
if (layer.attn_q_norm) {
|
||||
prepare_split_tensors(-1, ctx_split, layer.attn_q_norm, layer.split_q_norm, split, mem_used);
|
||||
prepare_split_tensors(-1, ctx_split, layer.attn_q_norm, layer.split_q_norm, split_kq, mem_used);
|
||||
}
|
||||
for (auto & s : split) s /= gqa_ratio;
|
||||
prepare_split_tensors(1, ctx_split, layer.wk, layer.split_wk, split, mem_used);
|
||||
prepare_split_tensors(1, ctx_split, layer.wv, layer.split_wv, split, mem_used);
|
||||
if (layer.attn_sinks) {
|
||||
auto split_sinks = split_kq;
|
||||
for (auto & s : split_sinks) {
|
||||
s /= hparams.n_embd_head_k;
|
||||
}
|
||||
prepare_split_tensors(0, ctx_split, layer.attn_sinks, layer.split_sinks, split_sinks, mem_used);
|
||||
}
|
||||
for (auto & s : split_kq) s /= gqa_ratio;
|
||||
for (auto & s : split_vo) s /= gqa_ratio;
|
||||
prepare_split_tensors(1, ctx_split, layer.wk, layer.split_wk, split_kq, mem_used);
|
||||
prepare_split_tensors(1, ctx_split, layer.wv, layer.split_wv, split_vo, mem_used);
|
||||
if (layer.bk) {
|
||||
prepare_split_tensors(0, ctx_split, layer.bk, layer.split_bk, split, mem_used);
|
||||
prepare_split_tensors(0, ctx_split, layer.bk, layer.split_bk, split_kq, mem_used);
|
||||
}
|
||||
if (layer.bv) {
|
||||
prepare_split_tensors(0, ctx_split, layer.bv, layer.split_bv, split, mem_used);
|
||||
prepare_split_tensors(0, ctx_split, layer.bv, layer.split_bv, split_vo, mem_used);
|
||||
}
|
||||
if (layer.attn_k_norm) {
|
||||
prepare_split_tensors(-1, ctx_split, layer.attn_k_norm, layer.split_k_norm, split, mem_used);
|
||||
prepare_split_tensors(-1, ctx_split, layer.attn_k_norm, layer.split_k_norm, split_kq, mem_used);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -202,6 +202,7 @@ struct llama_layer {
|
||||
llama_split_tensor split_bkv;
|
||||
llama_split_tensor split_q_norm;
|
||||
llama_split_tensor split_k_norm;
|
||||
llama_split_tensor split_sinks;
|
||||
|
||||
// relative position bias
|
||||
struct ggml_tensor * attn_rel_b = nullptr;
|
||||
|
||||
@@ -1730,6 +1730,7 @@ static bool is_model_split_supported(const llama_model & model) {
|
||||
LLM_ARCH_GLM4_MOE,
|
||||
LLM_ARCH_MISTRAL3,
|
||||
LLM_ARCH_COHERE2,
|
||||
LLM_ARCH_MIMO2,
|
||||
};
|
||||
auto it = k_supported.find(model.arch);
|
||||
return it != k_supported.end();
|
||||
@@ -1760,6 +1761,13 @@ static bool llm_load_tensors(
|
||||
LLAMA_LOG_WARN(" => changing split mode to 'layer'\n");
|
||||
LLAMA_LOG_WARN("=======================================================\n\n");
|
||||
split_mode = LLAMA_SPLIT_MODE_LAYER;
|
||||
} else {
|
||||
if (model.arch == LLM_ARCH_MIMO2 && model.devices.size() > 2 && max_gpu != 2) {
|
||||
LLAMA_LOG_WARN("\n================================================================\n");
|
||||
LLAMA_LOG_WARN("Split mode 'graph' for Mimo2 does not work with more than 2 GPUs\n");
|
||||
LLAMA_LOG_WARN(" => setting max_gpu to 2\n");
|
||||
LLAMA_LOG_WARN("================================================================\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user