spec : add self speculative decoding, ngram and refactor (#1261)

* spec : add self speculative decoding and ngram-mod and refactor

common : use common_ prefix for common library function

llama : use LLAMA_TOKEN_NULL

spec : add self speculative decoding (no draft model required) + refactor

spec : add ngram-mod

spec : various improvements ton ngram-map + docs

spec : fix the check-rate logic of ngram-simple

common : add common_speculative_is_compat()

spec : simplify time measurement using common_time_meas

refactor common_sampler_init

refactor common_token_to_piece

refactor and fix cur_p bug

clean up

* spec : remove check rate

* spec: show warnings instead of abort

---------

Co-authored-by: firecoperana <firecoperana>
Co-authored-by: Sascha Rogmann <59577610+srogmann@users.noreply.github.com>
This commit is contained in:
firecoperana
2026-02-13 12:04:55 -06:00
committed by GitHub
parent 1fdbc0dafe
commit 1cb7e1bf39
54 changed files with 2652 additions and 779 deletions

View File

@@ -372,8 +372,8 @@ common_prefix find_common_text_token_prefix(const llama_context* ctx, const llam
llama_tokens a_sub(a.begin() + start, a.end());
llama_tokens b_sub(b.begin() + start, b.end());
std::string a_str = common_token_to_piece(ctx, a_sub, true);
std::string b_str = common_token_to_piece(ctx, b_sub, true);
std::string a_str = common_detokenize(ctx, a_sub, true);
std::string b_str = common_detokenize(ctx, b_sub, true);
common_prefix string_prefix;
std::vector<size_t> a_list;
@@ -1722,7 +1722,7 @@ server_tokens::server_tokens(const llama_tokens& tokens, bool has_mtmd) : has_mt
text_tokens.push_back(t);
}
}
return common_token_to_piece(ctx, text_tokens, special);
return common_detokenize(ctx, text_tokens, special);
}
std::string server_tokens::detokenize(const llama_context* ctx, bool special, size_t start, size_t length) const {
@@ -1744,7 +1744,7 @@ server_tokens::server_tokens(const llama_tokens& tokens, bool has_mtmd) : has_mt
}
++i;
}
return common_token_to_piece(ctx, text_tokens, special);
return common_detokenize(ctx, text_tokens, special);
}
size_t server_tokens::find_n_from_tokens(const llama_context* ctx, const server_tokens& b, bool special,
@@ -1812,7 +1812,7 @@ server_tokens::server_tokens(const llama_tokens& tokens, bool has_mtmd) : has_mt
std::string endStr = think_token.end;
llama_tokens tokens = get_text_tokens();
std::string str = common_token_to_piece(ctx, tokens, true);
std::string str = common_detokenize(ctx, tokens, true);
std::vector<std::pair<size_t, size_t>> results;
// Find all positions of start and end