mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-26 08:04:09 +00:00
Fused delta-net (#1315)
* Revive fused delta-net * Add command line argument for fused delta net * Simplify/improve CUDA delta-net * Add -fdn to llama-bench * More CUDA fused delta net optimizations * CPU optimizations * Much faster fused delta-net on the CPU It seems it is faster than the chunked implementation! * Change meaning of fdn from bool flag to threshold value * Use eps = 1e-6 * Give some nodes a name
This commit is contained in:
@@ -1531,6 +1531,11 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
params.scheduler_async = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "-fdn" || arg == "--fused-delta-net") {
|
||||
CHECK_ARG
|
||||
params.fused_delta_net = std::stoi(argv[i]);
|
||||
return true;
|
||||
}
|
||||
if (arg == "-smf16" || arg == "--split-mode-f16") {
|
||||
params.reduce_type = "f16";
|
||||
//params.split_mode_f16 = true;
|
||||
@@ -2258,6 +2263,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
||||
options.push_back({ "*", "-grt, --graph-reduce-type", "Type for data exchange between GPUs (default: %s)", "f32"});
|
||||
options.push_back({ "*", "-smgs, --split-mode-graph-scheduling,", "Force Split Mode Graph Scheduling (default: %d)", params.split_mode_graph_scheduling});
|
||||
options.push_back({ "*", "-sas, --scheduler_async,", "Async evaluation of compute graphs: %d)", params.scheduler_async});
|
||||
options.push_back({ "*", "-fdn, --fused-delta-net N", "Use fused delta-net when batch size is <= N with recurrent models: %d)", params.fused_delta_net});
|
||||
options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants});
|
||||
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
|
||||
"in conversation mode, this will be used as system prompt\n"
|
||||
@@ -3336,6 +3342,7 @@ struct llama_context_params common_context_params_to_llama(const gpt_params & pa
|
||||
cparams.split_mode_graph_scheduling = params.split_mode_graph_scheduling;
|
||||
//cparams.split_mode_f16 = params.split_mode_f16;
|
||||
cparams.scheduler_async = params.scheduler_async;
|
||||
cparams.fused_delta_net = params.fused_delta_net;
|
||||
cparams.min_experts = params.min_experts;
|
||||
cparams.thresh_experts = params.thresh_experts;
|
||||
cparams.only_active_experts = params.only_active_exps;
|
||||
@@ -4346,6 +4353,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
||||
//fprintf(stream, "split_mode_f16: %s # default: true\n", params.split_mode_f16 ? "true" : "false");
|
||||
fprintf(stream, "reduce_type: %s # default f16\n", params.reduce_type.c_str());
|
||||
fprintf(stream, "scheduler_async: %s # default: false\n", params.scheduler_async ? "true" : "false");
|
||||
fprintf(stream, "fused_delta_net: %d # default: 0\n", params.fused_delta_net );
|
||||
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
|
||||
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user