Server: refactor and rename functions (#1151)

* Server: rename functions and refactor code

rename functions

refactor update slots

rename params_base

rename timings

* change

* Revert kv cache name changes

* Revert 2

* fix test build error

---------

Co-authored-by: firecoperana <firecoperana>
This commit is contained in:
firecoperana
2026-01-18 00:16:57 -06:00
committed by GitHub
parent 7024fdbc72
commit d71a3ec315
38 changed files with 532 additions and 528 deletions

View File

@@ -869,7 +869,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
hs_cur.seq_tokens[2].size() - hs_cur.common_prefix +
hs_cur.seq_tokens[3].size() - hs_cur.common_prefix;
//GGML_ASSERT(hs_cur.common_prefix >= ::llama_tokenize(ctx, hs_cur.context, true).size());
//GGML_ASSERT(hs_cur.common_prefix >= ::common_tokenize(ctx, hs_cur.context, true).size());
// Delete the selected random example from the prompt
if (randomize_tasks) {
@@ -906,7 +906,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
size_t i1 = i0;
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
llama_batch_clear(batch);
common_batch_clear(batch);
// batch as much tasks as possible into the available context
// each task has 4 unique sequence ids - one for each ending
@@ -922,7 +922,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
}
for (size_t i = 0; i < hs_cur.common_prefix; ++i) {
llama_batch_add(batch, hs_cur.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3 }, false);
common_batch_add(batch, hs_cur.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3 }, false);
}
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
n_logits += 1;
@@ -932,7 +932,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
// TODO: don't evaluate the last token of each sequence
for (size_t i = hs_cur.common_prefix; i < seq_tokens_size; ++i) {
const bool needs_logits = i < seq_tokens_size - 1;
llama_batch_add(batch, hs_cur.seq_tokens[s][i], i, { s0 + s }, needs_logits);
common_batch_add(batch, hs_cur.seq_tokens[s][i], i, { s0 + s }, needs_logits);
n_logits += needs_logits;
}
}
@@ -1191,7 +1191,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
size_t i1 = i0;
size_t i_logits = 0;
llama_batch_clear(batch);
common_batch_clear(batch);
while (n_cur + (int) data[i1].required_tokens <= n_ctx) {
int n_logits = 0;
@@ -1201,7 +1201,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
}
for (size_t i = 0; i < data[i1].common_prefix; ++i) {
llama_batch_add(batch, data[i1].seq_tokens[0][i], i, { s0 + 0, s0 + 1 }, false);
common_batch_add(batch, data[i1].seq_tokens[0][i], i, { s0 + 0, s0 + 1 }, false);
}
batch.logits[batch.n_tokens - 1] = true;
n_logits += 1;
@@ -1209,7 +1209,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
for (int s = 0; s < 2; ++s) {
// TODO: end before the last token, no need to predict past the end of the sequences
for (size_t i = data[i1].common_prefix; i < data[i1].seq_tokens[s].size(); ++i) {
llama_batch_add(batch, data[i1].seq_tokens[s][i], i, { s0 + s }, true);
common_batch_add(batch, data[i1].seq_tokens[s][i], i, { s0 + s }, true);
n_logits += 1;
}
}
@@ -1547,7 +1547,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
size_t i1 = i0;
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
llama_batch_clear(batch);
common_batch_clear(batch);
// batch as much tasks as possible into the available context
// each task has 4 unique sequence ids - one for each ending
@@ -1569,8 +1569,8 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
for (int s = 0; s < num_answers; ++s) batch_indeces[s] = s0 + s;
for (size_t i = 0; i < cur_task.common_prefix; ++i) {
//llama_batch_add(batch, cur_task.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3}, false);
llama_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false);
//common_batch_clear(batch, cur_task.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3}, false);
common_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false);
}
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
n_logits += 1;
@@ -1580,7 +1580,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
// TODO: don't evaluate the last token of each sequence
for (size_t i = cur_task.common_prefix; i < seq_tokens_size; ++i) {
const bool needs_logits = i < seq_tokens_size - 1;
llama_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, needs_logits);
common_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, needs_logits);
n_logits += needs_logits;
}
}