diff --git a/common/common.cpp b/common/common.cpp index 6219f0ce..44678d7a 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2169,8 +2169,10 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { if (bos != -1) { tmp.push_back(bos); } - tmp.push_back(eos); - + else + { + tmp.push_back(eos); + } if (llama_model_has_encoder(model)) { llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0)); llama_token decoder_start_token_id = llama_model_decoder_start_token(model); diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 41b93df5..95df06dc 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -1586,7 +1586,7 @@ int main(int argc, char ** argv) { if (params.warmup) { if (t.n_prompt > 0) { //test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads); - test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads); + test_prompt(ctx, 1, 0, t.n_batch, t.n_threads); } if (t.n_gen > 0) { test_gen(ctx, 1, 0, t.n_threads); diff --git a/src/llama.cpp b/src/llama.cpp index 2ee15c7a..b2553802 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -14521,7 +14521,7 @@ static struct ggml_cgraph * llama_build_graph( const llama_vocab * vocab = llama_get_vocab(&lctx); llama_token bos = llama_token_bos_impl(*vocab); llama_token eos = llama_token_eos_impl(*vocab); - bool is_warming_up = (batch.n_tokens == 2 && batch.token[0] == bos && batch.token[1] == eos); + bool is_warming_up = (batch.n_tokens == 1 && batch.token[0] == bos); struct llm_build_context llm(lctx, batch, cb, worst_case, is_warming_up); llm.init();