Unify warmup to one token

This commit is contained in:
Saood Karim
2025-02-09 16:05:16 -06:00
parent ca4e8e5346
commit 370274317b
3 changed files with 6 additions and 4 deletions

View File

@@ -2169,8 +2169,10 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
if (bos != -1) {
tmp.push_back(bos);
}
tmp.push_back(eos);
else
{
tmp.push_back(eos);
}
if (llama_model_has_encoder(model)) {
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0));
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);

View File

@@ -1586,7 +1586,7 @@ int main(int argc, char ** argv) {
if (params.warmup) {
if (t.n_prompt > 0) {
//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
test_prompt(ctx, 1, 0, t.n_batch, t.n_threads);
}
if (t.n_gen > 0) {
test_gen(ctx, 1, 0, t.n_threads);

View File

@@ -14521,7 +14521,7 @@ static struct ggml_cgraph * llama_build_graph(
const llama_vocab * vocab = llama_get_vocab(&lctx);
llama_token bos = llama_token_bos_impl(*vocab);
llama_token eos = llama_token_eos_impl(*vocab);
bool is_warming_up = (batch.n_tokens == 2 && batch.token[0] == bos && batch.token[1] == eos);
bool is_warming_up = (batch.n_tokens == 1 && batch.token[0] == bos);
struct llm_build_context llm(lctx, batch, cb, worst_case, is_warming_up);
llm.init();