Server: rename functions and refactor code

rename functions

refactor update slots

rename params_base

rename timings
This commit is contained in:
firecoperana
2026-01-13 12:02:58 -06:00
parent cb1063f6cd
commit b43b22b68a
39 changed files with 609 additions and 595 deletions

View File

@@ -108,7 +108,7 @@ int main(int argc, char ** argv) {
// warm up
if (params.warmup) {
llama_batch_add(batch, bos, 0, { 0 }, false);
common_batch_add(batch, bos, 0, { 0 }, false);
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
LOG_TEE("%s: llama_decode() failed\n", __func__);
@@ -117,13 +117,13 @@ int main(int argc, char ** argv) {
}
if (params.batch_warmup) {
// clean up KV cache after generation
llama_kv_cache_seq_rm(ctx, 0, params.n_ubatch, -1);
llama_memory_seq_rm(ctx, 0, params.n_ubatch, -1);
// prepare batch of pp size for prompt processing performance measurement
llama_batch_clear(batch);
common_batch_clear(batch);
for (unsigned int i = 0; i < params.n_ubatch; ++i) {
llama_batch_add(batch, std::rand() % n_vocab, i, { 0 }, false);
common_batch_add(batch, std::rand() % n_vocab, i, { 0 }, false);
}
if (!decode_helper(ctx, batch, ctx_params.n_ubatch)) {
@@ -132,19 +132,19 @@ int main(int argc, char ** argv) {
}
}
llama_batch_clear(batch);
llama_kv_cache_clear(ctx);
common_batch_clear(batch);
llama_memory_clear(ctx);
for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) {
// clean up KV cache before generation
llama_kv_cache_seq_rm(ctx, 0, n_kv, -1);
llama_memory_seq_rm(ctx, 0, n_kv, -1);
// first measure token generation performance at this context size
const auto t_tg_start = ggml_time_us();
for (unsigned int i = 0; i < tg; ++i) {
llama_batch_clear(batch);
llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true);
common_batch_clear(batch);
common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true);
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
LOG_TEE("%s: llama_decode() failed\n", __func__);
@@ -155,13 +155,13 @@ int main(int argc, char ** argv) {
const auto t_tg_end = ggml_time_us();
// clean up KV cache after generation
llama_kv_cache_seq_rm(ctx, 0, n_kv, -1);
llama_memory_seq_rm(ctx, 0, n_kv, -1);
// prepare batch of pp size for prompt processing performance measurement
llama_batch_clear(batch);
common_batch_clear(batch);
for (unsigned int i = 0; i < pp; ++i) {
llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false);
common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false);
}
batch.logits[batch.n_tokens - 1] = true;