From 573e23679dae6524d683288a34a9be102f53918f Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Thu, 22 Jan 2026 12:28:30 +0200 Subject: [PATCH] sweep_bench: set number of repetions (#1176) --- common/common.cpp | 5 +++ common/common.h | 59 ++++++++++++------------ examples/sweep-bench/sweep-bench.cpp | 67 +++++++++++++++++----------- 3 files changed, 77 insertions(+), 54 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 2e7d6312..3192fd37 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -786,6 +786,11 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.max_extra_alloc_MiB = std::stoi(argv[i]); return true; } + if (arg == "-nrep" || arg == "--n-repetitions") { + CHECK_ARG + params.nrep = std::stoi(argv[i]); + return true; + } if (arg == "--samplers") { CHECK_ARG const auto sampler_names = string_split(argv[i], ";"); diff --git a/common/common.h b/common/common.h index b67d62d1..1de82a6c 100644 --- a/common/common.h +++ b/common/common.h @@ -145,38 +145,39 @@ struct gpt_params { int32_t n_threads = cpu_get_num_math(); int32_t n_threads_draft = -1; - int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads) + int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads) int32_t n_threads_batch_draft = -1; - int32_t n_predict = -1; // new tokens to predict - int32_t n_ctx = 0; // context size - int32_t n_ctx_draft = 0; // context size for draft model - int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS) - int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS) - int32_t n_keep = 0; // number of tokens to keep from initial prompt - int32_t n_draft = 16; // number of tokens to draft during speculative decoding - int32_t n_draft_min = 1; // minimum number of tokens to draft during speculative decoding - float p_draft_min = 0.8f; // minimum speculative decoding probability (greedy) - int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) - int32_t n_parallel = 1; // number of parallel sequences to decode - int32_t n_sequences = 1; // number of sequences to decode - float p_split = 0.1f; // speculative decoding split probability - int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) - int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) - int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors - int32_t max_gpu = 0; // max number of GPUs to use at a time for split mode "graph" - float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs - int32_t grp_attn_n = 1; // group-attention factor - int32_t grp_attn_w = 512; // group-attention width - int32_t n_print = -1; // print token count every n tokens (-1 = disabled) - float rope_freq_base = 0.0f; // RoPE base frequency - float rope_freq_scale = 0.0f; // RoPE frequency scaling factor - float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor + int32_t n_predict = -1; // new tokens to predict + int32_t n_ctx = 0; // context size + int32_t n_ctx_draft = 0; // context size for draft model + int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_keep = 0; // number of tokens to keep from initial prompt + int32_t n_draft = 16; // number of tokens to draft during speculative decoding + int32_t n_draft_min = 1; // minimum number of tokens to draft during speculative decoding + float p_draft_min = 0.8f; // minimum speculative decoding probability (greedy) + int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) + int32_t n_parallel = 1; // number of parallel sequences to decode + int32_t n_sequences = 1; // number of sequences to decode + float p_split = 0.1f; // speculative decoding split probability + int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) + int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) + int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors + int32_t max_gpu = 0; // max number of GPUs to use at a time for split mode "graph" + float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs + int32_t grp_attn_n = 1; // group-attention factor + int32_t grp_attn_w = 512; // group-attention width + int32_t n_print = -1; // print token count every n tokens (-1 = disabled) + float rope_freq_base = 0.0f; // RoPE base frequency + float rope_freq_scale = 0.0f; // RoPE frequency scaling factor + float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor float yarn_attn_factor = -1.0f; // YaRN magnitude scaling factor - float yarn_beta_fast = -1.0f; // YaRN low correction dim + float yarn_beta_fast = -1.0f; // YaRN low correction dim float yarn_beta_slow = -1.0f; // YaRN high correction dim - int32_t yarn_orig_ctx = 0; // YaRN original context length - float defrag_thold = -1.0f; // KV cache defragmentation threshold - int32_t max_extra_alloc_MiB = 256; // additional VRAM per GPU the scheduler may allocate for more efficient compute graph evaluation + int32_t yarn_orig_ctx = 0; // YaRN original context length + float defrag_thold = -1.0f; // KV cache defragmentation threshold + int32_t max_extra_alloc_MiB = 256; // extra VRAM per GPU the scheduler may allocate for more efficient compute graph evaluation + int32_t nrep = 1; // number of repetitions used in sweep bench ggml_backend_sched_eval_callback cb_eval = nullptr; void * cb_eval_user_data = nullptr; diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp index 449a0b66..f77ca658 100644 --- a/examples/sweep-bench/sweep-bench.cpp +++ b/examples/sweep-bench/sweep-bench.cpp @@ -31,6 +31,7 @@ int main(int argc, char ** argv) { print_usage(argc, argv); return 1; } + if (params.nrep < 1) params.nrep = 1; // init LLM @@ -135,49 +136,63 @@ int main(int argc, char ** argv) { common_batch_clear(batch); llama_kv_cache_clear(ctx); + int i_loop = 0; + for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) { // clean up KV cache before generation - llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); + //llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); + + int nrep = i_loop < 1 ? params.nrep : 1; // first measure token generation performance at this context size const auto t_tg_start = ggml_time_us(); - for (unsigned int i = 0; i < tg; ++i) { + for (int irep = 0; irep < nrep; ++irep) { + + llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); + + for (unsigned int i = 0; i < tg; ++i) { + common_batch_clear(batch); + common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); + + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { + LOG_TEE("%s: llama_decode() failed\n", __func__); + return 1; + } + } + + } + + const auto t_tg_end = ggml_time_us(); + + // measure prompt processing performance + const auto t_pp_start = ggml_time_us(); + + for (int irep = 0; irep < nrep; ++irep) { + + // clean up KV cache after generation + llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); + + // prepare batch of pp size for prompt processing performance measurement common_batch_clear(batch); - common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); + + for (unsigned int i = 0; i < pp; ++i) { + common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); + } + batch.logits[batch.n_tokens - 1] = true; if (!decode_helper(ctx, batch, ctx_params.n_batch)) { LOG_TEE("%s: llama_decode() failed\n", __func__); return 1; } - } - const auto t_tg_end = ggml_time_us(); - - // clean up KV cache after generation - llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); - - // prepare batch of pp size for prompt processing performance measurement - common_batch_clear(batch); - - for (unsigned int i = 0; i < pp; ++i) { - common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); - } - batch.logits[batch.n_tokens - 1] = true; - - // measure prompt processing performance - const auto t_pp_start = ggml_time_us(); - - if (!decode_helper(ctx, batch, ctx_params.n_batch)) { - LOG_TEE("%s: llama_decode() failed\n", __func__); - return 1; } const auto t_pp_end = ggml_time_us(); // calculate and print metrics - const float t_pp = (t_pp_end - t_pp_start) / 1000000.0f; - const float t_tg = (t_tg_end - t_tg_start) / 1000000.0f; + const float t_pp = (t_pp_end - t_pp_start) / 1000000.0f / nrep; + const float t_tg = (t_tg_end - t_tg_start) / 1000000.0f / nrep; const float speed_pp = pp / t_pp; const float speed_tg = tg / t_tg; @@ -192,6 +207,8 @@ int main(int argc, char ** argv) { } else { LOG_TEE("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg); } + + ++i_loop; } llama_batch_free(batch);