Add batch warmup to sweep-bench

This commit is contained in:
Iwan Kawrakow
2025-05-04 11:21:19 +03:00
parent ce2b0292e1
commit a3975acd4c
3 changed files with 22 additions and 1 deletions

View File

@@ -1457,6 +1457,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.warmup = false;
return true;
}
if (arg == "--warmup-batch" || arg == "-wb") {
params.batch_warmup = true;
return true;
}
if (arg == "--output-format") {
CHECK_ARG
std::string value(argv[i]);

View File

@@ -199,6 +199,7 @@ struct gpt_params {
bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes
bool no_kv_offload = false; // disable KV offloading
bool warmup = true; // warmup run
bool batch_warmup = false; // batch warmup run
bool check_tensors = false; // validate tensor data
bool repack_tensors = false; // repack tensors if interleaved variant is available
bool use_thp = false; // use transparent huge pages (linux only)

View File

@@ -107,7 +107,7 @@ int main(int argc, char ** argv) {
llama_batch batch = llama_batch_init(n_kv_max, 0, 1);
// warm up
{
if (params.warmup) {
llama_batch_add(batch, bos, 0, { 0 }, false);
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
@@ -115,6 +115,22 @@ int main(int argc, char ** argv) {
return 1;
}
}
if (params.batch_warmup) {
// clean up KV cache after generation
llama_kv_cache_seq_rm(ctx, 0, params.n_ubatch, -1);
// prepare batch of pp size for prompt processing performance measurement
llama_batch_clear(batch);
for (unsigned int i = 0; i < params.n_ubatch; ++i) {
llama_batch_add(batch, std::rand() % n_vocab, i, { 0 }, false);
}
if (!decode_helper(ctx, batch, ctx_params.n_ubatch)) {
LOG_TEE("%s: llama_decode() failed\n", __func__);
return 1;
}
}
llama_batch_clear(batch);
llama_kv_cache_clear(ctx);