mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-20 13:14:09 +00:00
* spec : add self speculative decoding and ngram-mod and refactor common : use common_ prefix for common library function llama : use LLAMA_TOKEN_NULL spec : add self speculative decoding (no draft model required) + refactor spec : add ngram-mod spec : various improvements ton ngram-map + docs spec : fix the check-rate logic of ngram-simple common : add common_speculative_is_compat() spec : simplify time measurement using common_time_meas refactor common_sampler_init refactor common_token_to_piece refactor and fix cur_p bug clean up * spec : remove check rate * spec: show warnings instead of abort --------- Co-authored-by: firecoperana <firecoperana> Co-authored-by: Sascha Rogmann <59577610+srogmann@users.noreply.github.com>
25 lines
712 B
C++
25 lines
712 B
C++
// ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
|
|
|
|
#include <cstdio>
|
|
#include <string>
|
|
#include <thread>
|
|
|
|
#include "llama.h"
|
|
#include "get-model.h"
|
|
|
|
// This creates a new context inside a pthread and then tries to exit cleanly.
|
|
int main(int argc, char ** argv) {
|
|
auto * model_path = get_model_or_exit(argc, argv);
|
|
|
|
std::thread([&model_path]() {
|
|
llama_backend_init();
|
|
auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
|
|
auto * ctx = llama_init_from_model(model, llama_context_default_params());
|
|
llama_free(ctx);
|
|
llama_free_model(model);
|
|
llama_backend_free();
|
|
}).join();
|
|
|
|
return 0;
|
|
}
|