diff --git a/common/common.cpp b/common/common.cpp index 674e975d..9bcf1ff9 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1106,8 +1106,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.fused_mmad = false; return true; } - if (arg == "-no-rcache" || arg == "--no-rope-cache") { - params.rope_cache = false; + if (arg == "-rcache" || arg == "--rope-cache") { + params.rope_cache = true; return true; } if (arg == "-ser" || arg == "--smart-expert-reduction") { @@ -1918,7 +1918,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-ger, --grouped-expert-routing", "enable grouped expert routing (default: %s)", params.grouped_expert_routing ? "enabled" : "disabled" }); options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" }); options.push_back({ "*", "-no-mmad, --no-fused-mul-multiadd", "disaable fused mul-multi_add (default: %s)", params.fused_mmad? "enabled" : "disabled" }); - options.push_back({ "*", "-no-rcache, --no-rope-cache", "disaable RoPE cache (default: %s)", params.rope_cache ? "enabled" : "disabled" }); + options.push_back({ "*", "-rcache, --rope-cache", "enable RoPE cache (default: %s)", params.rope_cache ? "enabled" : "disabled" }); options.push_back({ "*", "-ser, --smart-expert-reduction,","experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts}); options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv}); options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n" diff --git a/common/common.h b/common/common.h index b202ad86..6eae6f12 100644 --- a/common/common.h +++ b/common/common.h @@ -249,7 +249,7 @@ struct gpt_params { bool fused_up_gate = true; // fused up*unary(gate) op bool fused_mmad = true; // fused mul+multi_add op bool grouped_expert_routing = false; // if to use grouped expert routing (BailingMoeV2 arch) - bool rope_cache = true; // if to use RoPE cache (for supported models) + bool rope_cache = false; // if to use RoPE cache (for supported models) int min_experts = -1; float thresh_experts = 0; diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index b478d593..c59c8609 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -3244,11 +3244,7 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg ggml_cuda_op_rms_norm(ctx, dst); break; case GGML_OP_FUSED_RMS_NORM: - //if (i + 6 < cgraph->n_nodes) { - // printf("=== Fused rms_norm(%s)\n", dst->name); - // for (int j = 1; j <= 6; ++j) printf(" %s(%s)\n", ggml_op_name(cgraph->nodes[i+j]->op), cgraph->nodes[i+j]->name); - //} - if (ENABLE_FUSION && i + 4 < cgraph->n_nodes && + if (false && ENABLE_FUSION && i + 4 < cgraph->n_nodes && cgraph->nodes[i+1]->op == GGML_OP_VIEW && cgraph->nodes[i+2]->op == GGML_OP_FUSED_RMS_NORM && cgraph->nodes[i+3]->op == GGML_OP_ROPE_FAST && @@ -3256,7 +3252,7 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg ggml_cuda_op_fused_rms_rope_fast(ctx, cgraph->nodes[i+3], cgraph->nodes[i+4])) { i += 4; } - else if (ENABLE_FUSION && i + 4 < cgraph->n_nodes && + else if (false && ENABLE_FUSION && i + 4 < cgraph->n_nodes && cgraph->nodes[i+1]->op == GGML_OP_ROPE_FAST && cgraph->nodes[i+2]->op == GGML_OP_RESHAPE && cgraph->nodes[i+3]->op == GGML_OP_FUSED_RMS_NORM && diff --git a/src/llama.cpp b/src/llama.cpp index b2ffb0ec..5684fa7e 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -3833,7 +3833,7 @@ struct llama_context_params llama_context_default_params() { /*.grouped_expert_routing =*/ false, /*.fused_up_gate =*/ true, /*.fused_mmad =*/ true, - /*.rope_cache =*/ true, + /*.rope_cache =*/ false, /*.min_experts =*/ -1, /*.thtesh_experts =*/ 0.0f, /*.only_active_experts =*/ false,