mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Port universal assisted decoding to llama-server (#699)
* port universal assisted decoding to server * fix calls * fix LOG_INFO * fix llama_detokenize call * use emplace_back
This commit is contained in:
@@ -13,10 +13,17 @@ struct llama_speculative_params {
|
||||
float p_min = 0.75f; // min probability required to accept a token in the draft
|
||||
};
|
||||
|
||||
struct llama_speculative * llama_speculative_init(struct llama_context * ctx_dft);
|
||||
struct llama_speculative * llama_speculative_init(
|
||||
struct llama_context * ctx_tgt,
|
||||
struct llama_context * ctx_dft
|
||||
);
|
||||
|
||||
void llama_speculative_free(struct llama_speculative * spec);
|
||||
|
||||
void llama_speculative_add_replacement_tgt_dft(
|
||||
struct llama_speculative * spec,
|
||||
const char *source, const char *dest);
|
||||
|
||||
bool llama_speculative_are_compatible(
|
||||
const struct llama_context * ctx_tgt,
|
||||
const struct llama_context * ctx_dft);
|
||||
|
||||
Reference in New Issue
Block a user