sweep_bench: set number of repetions (#1176)

This commit is contained in:
Kawrakow
2026-01-22 12:28:30 +02:00
committed by GitHub
parent 101fe54797
commit 573e23679d
3 changed files with 77 additions and 54 deletions

View File

@@ -786,6 +786,11 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.max_extra_alloc_MiB = std::stoi(argv[i]); params.max_extra_alloc_MiB = std::stoi(argv[i]);
return true; return true;
} }
if (arg == "-nrep" || arg == "--n-repetitions") {
CHECK_ARG
params.nrep = std::stoi(argv[i]);
return true;
}
if (arg == "--samplers") { if (arg == "--samplers") {
CHECK_ARG CHECK_ARG
const auto sampler_names = string_split(argv[i], ";"); const auto sampler_names = string_split(argv[i], ";");

View File

@@ -145,38 +145,39 @@ struct gpt_params {
int32_t n_threads = cpu_get_num_math(); int32_t n_threads = cpu_get_num_math();
int32_t n_threads_draft = -1; int32_t n_threads_draft = -1;
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads) int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
int32_t n_threads_batch_draft = -1; int32_t n_threads_batch_draft = -1;
int32_t n_predict = -1; // new tokens to predict int32_t n_predict = -1; // new tokens to predict
int32_t n_ctx = 0; // context size int32_t n_ctx = 0; // context size
int32_t n_ctx_draft = 0; // context size for draft model int32_t n_ctx_draft = 0; // context size for draft model
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS) int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS) int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS)
int32_t n_keep = 0; // number of tokens to keep from initial prompt int32_t n_keep = 0; // number of tokens to keep from initial prompt
int32_t n_draft = 16; // number of tokens to draft during speculative decoding int32_t n_draft = 16; // number of tokens to draft during speculative decoding
int32_t n_draft_min = 1; // minimum number of tokens to draft during speculative decoding int32_t n_draft_min = 1; // minimum number of tokens to draft during speculative decoding
float p_draft_min = 0.8f; // minimum speculative decoding probability (greedy) float p_draft_min = 0.8f; // minimum speculative decoding probability (greedy)
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
int32_t n_parallel = 1; // number of parallel sequences to decode int32_t n_parallel = 1; // number of parallel sequences to decode
int32_t n_sequences = 1; // number of sequences to decode int32_t n_sequences = 1; // number of sequences to decode
float p_split = 0.1f; // speculative decoding split probability float p_split = 0.1f; // speculative decoding split probability
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
int32_t max_gpu = 0; // max number of GPUs to use at a time for split mode "graph" int32_t max_gpu = 0; // max number of GPUs to use at a time for split mode "graph"
float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs
int32_t grp_attn_n = 1; // group-attention factor int32_t grp_attn_n = 1; // group-attention factor
int32_t grp_attn_w = 512; // group-attention width int32_t grp_attn_w = 512; // group-attention width
int32_t n_print = -1; // print token count every n tokens (-1 = disabled) int32_t n_print = -1; // print token count every n tokens (-1 = disabled)
float rope_freq_base = 0.0f; // RoPE base frequency float rope_freq_base = 0.0f; // RoPE base frequency
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
float yarn_attn_factor = -1.0f; // YaRN magnitude scaling factor float yarn_attn_factor = -1.0f; // YaRN magnitude scaling factor
float yarn_beta_fast = -1.0f; // YaRN low correction dim float yarn_beta_fast = -1.0f; // YaRN low correction dim
float yarn_beta_slow = -1.0f; // YaRN high correction dim float yarn_beta_slow = -1.0f; // YaRN high correction dim
int32_t yarn_orig_ctx = 0; // YaRN original context length int32_t yarn_orig_ctx = 0; // YaRN original context length
float defrag_thold = -1.0f; // KV cache defragmentation threshold float defrag_thold = -1.0f; // KV cache defragmentation threshold
int32_t max_extra_alloc_MiB = 256; // additional VRAM per GPU the scheduler may allocate for more efficient compute graph evaluation int32_t max_extra_alloc_MiB = 256; // extra VRAM per GPU the scheduler may allocate for more efficient compute graph evaluation
int32_t nrep = 1; // number of repetitions used in sweep bench
ggml_backend_sched_eval_callback cb_eval = nullptr; ggml_backend_sched_eval_callback cb_eval = nullptr;
void * cb_eval_user_data = nullptr; void * cb_eval_user_data = nullptr;

View File

@@ -31,6 +31,7 @@ int main(int argc, char ** argv) {
print_usage(argc, argv); print_usage(argc, argv);
return 1; return 1;
} }
if (params.nrep < 1) params.nrep = 1;
// init LLM // init LLM
@@ -135,49 +136,63 @@ int main(int argc, char ** argv) {
common_batch_clear(batch); common_batch_clear(batch);
llama_kv_cache_clear(ctx); llama_kv_cache_clear(ctx);
int i_loop = 0;
for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) { for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) {
// clean up KV cache before generation // clean up KV cache before generation
llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); //llama_kv_cache_seq_rm(ctx, 0, n_kv, -1);
int nrep = i_loop < 1 ? params.nrep : 1;
// first measure token generation performance at this context size // first measure token generation performance at this context size
const auto t_tg_start = ggml_time_us(); const auto t_tg_start = ggml_time_us();
for (unsigned int i = 0; i < tg; ++i) { for (int irep = 0; irep < nrep; ++irep) {
llama_kv_cache_seq_rm(ctx, 0, n_kv, -1);
for (unsigned int i = 0; i < tg; ++i) {
common_batch_clear(batch);
common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true);
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
LOG_TEE("%s: llama_decode() failed\n", __func__);
return 1;
}
}
}
const auto t_tg_end = ggml_time_us();
// measure prompt processing performance
const auto t_pp_start = ggml_time_us();
for (int irep = 0; irep < nrep; ++irep) {
// clean up KV cache after generation
llama_kv_cache_seq_rm(ctx, 0, n_kv, -1);
// prepare batch of pp size for prompt processing performance measurement
common_batch_clear(batch); common_batch_clear(batch);
common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true);
for (unsigned int i = 0; i < pp; ++i) {
common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false);
}
batch.logits[batch.n_tokens - 1] = true;
if (!decode_helper(ctx, batch, ctx_params.n_batch)) { if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
LOG_TEE("%s: llama_decode() failed\n", __func__); LOG_TEE("%s: llama_decode() failed\n", __func__);
return 1; return 1;
} }
}
const auto t_tg_end = ggml_time_us();
// clean up KV cache after generation
llama_kv_cache_seq_rm(ctx, 0, n_kv, -1);
// prepare batch of pp size for prompt processing performance measurement
common_batch_clear(batch);
for (unsigned int i = 0; i < pp; ++i) {
common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false);
}
batch.logits[batch.n_tokens - 1] = true;
// measure prompt processing performance
const auto t_pp_start = ggml_time_us();
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
LOG_TEE("%s: llama_decode() failed\n", __func__);
return 1;
} }
const auto t_pp_end = ggml_time_us(); const auto t_pp_end = ggml_time_us();
// calculate and print metrics // calculate and print metrics
const float t_pp = (t_pp_end - t_pp_start) / 1000000.0f; const float t_pp = (t_pp_end - t_pp_start) / 1000000.0f / nrep;
const float t_tg = (t_tg_end - t_tg_start) / 1000000.0f; const float t_tg = (t_tg_end - t_tg_start) / 1000000.0f / nrep;
const float speed_pp = pp / t_pp; const float speed_pp = pp / t_pp;
const float speed_tg = tg / t_tg; const float speed_tg = tg / t_tg;
@@ -192,6 +207,8 @@ int main(int argc, char ** argv) {
} else { } else {
LOG_TEE("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg); LOG_TEE("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg);
} }
++i_loop;
} }
llama_batch_free(batch); llama_batch_free(batch);