Merge remote-tracking branch 'origin/main' into s6/mikupad

This commit is contained in:
Saood Karim
2025-08-16 00:04:07 -05:00
64 changed files with 9411 additions and 5162 deletions

View File

@@ -76,6 +76,7 @@ add_library(${TARGET} STATIC
minja.hpp
ngram-cache.h
ngram-cache.cpp
speculative.cpp
)
if (BUILD_SHARED_LIBS)

View File

@@ -24,9 +24,9 @@ class common_chat_msg_parser {
std::string prelude;
std::vector<common_string_range> groups;
};
common_chat_msg_parser(const std::string & input, bool is_partial, const common_chat_syntax & syntax);
// Accessors
const std::string & input() const { return input_; }
size_t pos() const { return pos_; }
@@ -42,7 +42,7 @@ class common_chat_msg_parser {
}
pos_ = pos;
}
void move_back(size_t n) {
if (pos_ < n) {
throw std::runtime_error("Can't move back that far!");
@@ -56,46 +56,46 @@ class common_chat_msg_parser {
// Content manipulation
void add_content(const std::string & content);
void add_reasoning_content(const std::string & reasoning_content);
// Tool call manipulation
void add_tool_call(const common_chat_tool_call & tool_call);
bool add_tool_call(const std::string & name, const std::string & id, const std::string & arguments);
bool add_tool_call(const json & tool_call);
bool add_tool_calls(const json & arr);
void clear_tools();
// Parsing utilities
std::string consume_rest();
bool try_consume_literal(const std::string & literal);
void consume_literal(const std::string & literal);
bool try_parse_reasoning(const std::string & start_think, const std::string & end_think);
// Regex-based parsing methods (new)
std::optional<find_regex_result> try_find_regex(const common_regex & regex, size_t from = std::string::npos, bool add_prelude_to_content = true);
find_regex_result consume_regex(const common_regex & regex);
std::optional<find_regex_result> try_consume_regex(const common_regex & regex);
// Progressive parsing primitives (for Phase 4)
std::optional<find_regex_result> try_find_literal(const std::string & literal);
bool consume_spaces();
void set_healing_marker(const std::string & marker);
// Main parsing entry point
void parse();
// Finishing
void finish();
// Result extraction
common_chat_msg result_and_reset();
// Advanced JSON parsing (following original llama.cpp patterns)
struct consume_json_result {
json value;
bool is_partial;
};
std::optional<common_json> try_consume_json();
common_json consume_json();
consume_json_result consume_json_with_dumped_args(
@@ -112,8 +112,8 @@ private:
void parse_kimi_k2_format();
void parse_deepseek_r1_format();
void parse_generic_format();
// JSON parsing utilities (enhanced streaming support)
struct json_parse_result {
json value;
@@ -121,11 +121,11 @@ private:
bool is_partial;
std::string healing_marker;
};
// Partial detection utilities
bool detect_partial_function_call(const std::string& content);
void handle_partial_detection();
// Legacy find_literal for compatibility
std::optional<find_regex_result> try_find_literal_legacy(const std::string & literal);
};
@@ -133,4 +133,4 @@ private:
// Main parsing function (public API)
common_chat_msg common_chat_parse(const std::string & input, bool is_partial, const common_chat_syntax & syntax);
// Content-only parsing for fallback scenarios (static internal function)
// Content-only parsing for fallback scenarios (static internal function)

View File

@@ -220,7 +220,7 @@ void common_chat_parse_deepseek_r1(common_chat_msg_parser & builder) {
// Check for the new tools array format first (no DeepSeek markers)
auto original_pos = builder.pos();
// First, try the tools array format for content like "function\n```json\n{"tools": [...]}"
if (builder.try_find_regex(function_regex_simple)) {
builder.move_to(original_pos);
@@ -231,7 +231,7 @@ void common_chat_parse_deepseek_r1(common_chat_msg_parser & builder) {
// Fall through to try standard DeepSeek patterns
}
}
// If tools array format didn't work, try XML-wrapped format
builder.move_to(original_pos);
try {
@@ -240,7 +240,7 @@ void common_chat_parse_deepseek_r1(common_chat_msg_parser & builder) {
} catch (const common_chat_msg_partial_exception&) {
// Fall through to try standard DeepSeek patterns
}
// If XML wrapper format didn't work, try standard DeepSeek patterns
builder.move_to(original_pos);
try {
@@ -278,7 +278,7 @@ void common_chat_parse_deepseek_r1(common_chat_msg_parser & builder) {
throw; // Re-throw for partial mode
}
}
// Add any remaining content (critical for responses without tool calls)
builder.add_content(builder.consume_rest());
}
@@ -286,19 +286,19 @@ void common_chat_parse_deepseek_r1(common_chat_msg_parser & builder) {
// Parse DeepSeek R1 tools array format following original llama.cpp parse_prefixed_json_tool_call_array pattern
static void parse_deepseek_r1_tools_array(common_chat_msg_parser & builder) {
static const common_regex prefix("function\n```json\n");
if (auto res = builder.try_find_regex(prefix)) {
// Parse JSON and manually process tools array to convert arguments to strings
auto json_result = builder.try_consume_json();
if (!json_result) {
throw common_chat_msg_partial_exception("invalid JSON");
}
// DeepSeek R1 format has "tools" array, manually process each tool
if (json_result->json.contains("tools") && json_result->json.at("tools").is_array()) {
// Manually create tool calls array with string arguments (following original pattern)
json tools_with_dumped_args = json::array();
for (const auto& tool : json_result->json.at("tools")) {
@@ -310,15 +310,15 @@ static void parse_deepseek_r1_tools_array(common_chat_msg_parser & builder) {
tools_with_dumped_args.push_back(formatted_tool);
}
}
if (!builder.add_tool_calls(tools_with_dumped_args) || !json_result->healing_marker.marker.empty()) {
throw common_chat_msg_partial_exception("incomplete tool call array");
}
} else {
throw common_chat_msg_partial_exception("tools key not found or not array");
}
// Consume closing ```
builder.try_consume_regex(common_regex("```"));
} else {
@@ -326,41 +326,41 @@ static void parse_deepseek_r1_tools_array(common_chat_msg_parser & builder) {
}
}
// Parse DeepSeek R1 XML-wrapped format following original Hermes-2-Pro pattern
// Parse DeepSeek R1 XML-wrapped format following original Hermes-2-Pro pattern
static void parse_deepseek_r1_xml_wrapped(common_chat_msg_parser & builder) {
// Pattern for: <tool_call>\nfunction</think>FunctionName\n```json\n{...}\n```\n</tool_call>
static const common_regex xml_pattern(
"<tool_call>\\s*" // Opening XML tag
"function</think>([^\\n]+)" // Function name after "function</think>"
"function</think>([^\\n]+)" // Function name after "function</think>"
"\\s*```json\\s*" // JSON block start
);
if (auto res = builder.try_find_regex(xml_pattern)) {
// Extract function name from capture group
std::string function_name = builder.str(res->groups[1]);
// Parse JSON arguments
auto json_result = builder.try_consume_json();
if (!json_result) {
throw common_chat_msg_partial_exception("invalid JSON in XML wrapper");
}
// Create single tool call following original pattern
json tool_call;
tool_call["name"] = function_name;
tool_call["arguments"] = json_result->json.dump(); // Convert to string
json tool_calls_array = json::array();
tool_calls_array.push_back(tool_call);
if (!builder.add_tool_calls(tool_calls_array) || !json_result->healing_marker.marker.empty()) {
throw common_chat_msg_partial_exception("incomplete XML wrapped tool call");
}
// Consume closing ```\n</tool_call>
builder.try_consume_regex(common_regex("```\\s*</tool_call>"));
} else {
@@ -384,6 +384,15 @@ static void common_chat_parse_kimi_k2(common_chat_msg_parser & builder) {
builder.add_content(kimi_k2::clean_content(builder.input()));
}
static void common_chat_parse_gpt_oss(common_chat_msg_parser & builder) {
// TODO @ngxson : this won't work with --special enabled, we should fix that
builder.try_parse_reasoning("<|channel|>analysis<|message|>", "<|start|>assistant<|channel|>final<|message|>");
if (!builder.syntax().enable_tool_calls) {
builder.add_content(builder.consume_rest());
return;
}
}
// Main parsing dispatch function
static void common_chat_parse(common_chat_msg_parser & builder) {
switch (builder.syntax().format) {
@@ -399,6 +408,9 @@ static void common_chat_parse(common_chat_msg_parser & builder) {
case COMMON_CHAT_FORMAT_KIMI_K2:
common_chat_parse_kimi_k2(builder);
break;
case COMMON_CHAT_FORMAT_GPT_OSS:
common_chat_parse_gpt_oss(builder);
break;
default:
throw std::runtime_error(std::string("Unsupported format: ") + common_chat_format_name(builder.syntax().format));
}
@@ -432,6 +444,19 @@ const char* common_chat_format_name(common_chat_format format) {
case COMMON_CHAT_FORMAT_GENERIC: return "generic";
case COMMON_CHAT_FORMAT_DEEPSEEK_R1: return "deepseek_r1";
case COMMON_CHAT_FORMAT_KIMI_K2: return "kimi_k2";
case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
default: return "unknown";
}
}
}
const char * common_reasoning_format_name(common_reasoning_format format) {
switch (format) {
case COMMON_REASONING_FORMAT_NONE: return "none";
case COMMON_REASONING_FORMAT_AUTO: return "auto";
case COMMON_REASONING_FORMAT_DEEPSEEK: return "deepseek";
case COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY: return "deepseek-legacy";
default:
throw std::runtime_error("Unknown reasoning format");
}
}

View File

@@ -13,20 +13,20 @@ struct common_chat_templates;
struct common_string_range {
size_t begin;
size_t end;
common_string_range(size_t begin, size_t end) : begin(begin), end(end) {
if (begin > end) {
throw std::runtime_error("Invalid range");
}
}
// prevent default ctor
common_string_range() = delete;
bool empty() const {
return begin == end;
}
bool operator==(const common_string_range & other) const {
return begin == other.begin && end == other.end;
}
@@ -40,7 +40,7 @@ struct common_chat_tool_call {
bool operator==(const common_chat_tool_call & other) const {
return name == other.name && arguments == other.arguments && id == other.id;
}
bool operator!=(const common_chat_tool_call & other) const {
return !(*this == other);
}
@@ -65,10 +65,10 @@ struct common_chat_msg {
std::string tool_call_id;
bool empty() const {
return content.empty() && content_parts.empty() && tool_calls.empty() &&
return content.empty() && content_parts.empty() && tool_calls.empty() &&
reasoning_content.empty() && tool_name.empty() && tool_call_id.empty();
}
void ensure_tool_call_ids_set(std::vector<std::string> & ids_cache, const std::function<std::string()> & gen_tool_call_id) {
for (auto i = 0u; i < tool_calls.size(); i++) {
if (ids_cache.size() <= i) {
@@ -91,7 +91,7 @@ struct common_chat_msg {
&& tool_name == other.tool_name
&& tool_call_id == other.tool_call_id;
}
bool operator!=(const common_chat_msg & other) const {
return !(*this == other);
}
@@ -110,7 +110,7 @@ struct common_chat_msg_diff {
&& tool_call_index == other.tool_call_index
&& tool_call_delta == other.tool_call_delta;
}
bool operator!=(const common_chat_msg_diff & other) const {
return !(*this == other);
}
@@ -132,18 +132,20 @@ enum common_chat_format {
COMMON_CHAT_FORMAT_CONTENT_ONLY,
COMMON_CHAT_FORMAT_GENERIC,
COMMON_CHAT_FORMAT_DEEPSEEK_R1,
COMMON_CHAT_FORMAT_GPT_OSS,
COMMON_CHAT_FORMAT_KIMI_K2, // Our custom format (keep last for backward compatibility)
};
enum common_reasoning_format {
COMMON_REASONING_FORMAT_NONE,
COMMON_REASONING_FORMAT_AUTO,
COMMON_REASONING_FORMAT_DEEPSEEK,
COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY,
};
struct common_chat_syntax {
common_chat_format format = COMMON_CHAT_FORMAT_KIMI_K2;
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_NONE;
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_AUTO; //COMMON_REASONING_FORMAT_NONE;
// Whether reasoning_content should be inlined in the content (e.g. for reasoning_format=deepseek in stream mode)
bool reasoning_in_content = false;
bool thinking_forced_open = false;
@@ -165,11 +167,12 @@ class common_chat_msg_partial_exception : public std::runtime_error {
// Format detection from chat template
common_chat_format common_chat_format_detect(const std::string & chat_template);
const char* common_chat_format_name(common_chat_format format);
const char* common_reasoning_format_name(common_reasoning_format format);
// Main parsing function (entry point for original llama.cpp compatibility)
common_chat_msg common_chat_parse(const std::string & input, bool is_partial, const common_chat_syntax & syntax);
// Forward declare parser class
// Forward declare parser class
class common_chat_msg_parser;
// Format-specific parsing functions (accessible from chat-parser)

View File

@@ -505,6 +505,11 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.n_ctx = std::stoi(argv[i]);
return true;
}
if (arg == "-cd" || arg == "--ctx-size-draft") {
CHECK_ARG
params.n_ctx_draft = std::stoi(argv[i]);
return true;
}
if (arg == "--grp-attn-n" || arg == "-gan") {
CHECK_ARG
params.grp_attn_n = std::stoi(argv[i]);
@@ -725,7 +730,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
}
}
return true;
}
}
if (arg == "--cfg-negative-prompt") {
CHECK_ARG
sparams.cfg_negative_prompt = argv[i];
@@ -765,11 +770,21 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.n_keep = std::stoi(argv[i]);
return true;
}
if (arg == "--draft") {
if (arg == "--draft" || arg == "--draft-max" || arg == "--draft-n") {
CHECK_ARG
params.n_draft = std::stoi(argv[i]);
return true;
}
if (arg == "--draft-min" || arg == "--draft-n-min") {
CHECK_ARG
params.n_draft_min = std::stoi(argv[i]);
return true;
}
if (arg == "--draft-p-min") {
CHECK_ARG
params.p_draft_min = std::stof(argv[i]);
return true;
}
if (arg == "--chunks") {
CHECK_ARG
params.n_chunks = std::stoi(argv[i]);
@@ -934,6 +949,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.cache_type_v = argv[++i];
return true;
}
if (arg == "-ctkd" || arg == "--cache-type-k-draft") {
params.cache_type_k_draft = argv[++i];
return true;
}
if (arg == "-ctvd" || arg == "--cache-type-v-draft") {
params.cache_type_v_draft = argv[++i];
return true;
}
if (arg == "-mli" || arg == "--multiline-input") {
params.multiline_input = true;
return true;
@@ -1071,7 +1094,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
size_t pos = 0;
while ((pos = servers.find(",")) != std::string::npos) {
std::string server = servers.substr(0, pos);
ggml_backend_rpc_buffer_type(server.c_str());
ggml_backend_rpc_buffer_type(server.c_str());
servers.erase(0, pos + 1);
}
ggml_backend_rpc_buffer_type(servers.c_str());
@@ -1703,7 +1726,6 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "speculative", "-td, --threads-draft N", "number of threads to use during generation (default: same as --threads)" });
options.push_back({ "speculative", "-tbd, --threads-batch-draft N",
"number of threads to use during batch and prompt processing (default: same as --threads-draft)" });
options.push_back({ "speculative", " --draft N", "number of tokens to draft for speculative decoding (default: %d)", params.n_draft });
options.push_back({ "speculative", "-ps, --p-split N", "speculative decoding split probability (default: %.1f)", (double)params.p_split });
options.push_back({ "*", "-lcs, --lookup-cache-static FNAME",
"path to static lookup cache to use for lookup decoding (not updated by generation)" });
@@ -1711,6 +1733,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
"path to dynamic lookup cache to use for lookup decoding (updated by generation)" });
options.push_back({ "*", "-c, --ctx-size N", "size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx });
options.push_back({ "*", "-cd, --ctx-size-draft N", "size of the prompt context for the draft model (default: %d, 0 = loaded from model)", params.n_ctx_draft });
options.push_back({ "*", "-n, --predict N", "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict });
options.push_back({ "*", "-b, --batch-size N", "logical maximum batch size (default: %d)", params.n_batch });
options.push_back({ "*", "-ub, --ubatch-size N", "physical maximum batch size (default: %d)", params.n_ubatch });
@@ -1821,6 +1844,8 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", "-nkvo, --no-kv-offload", "disable KV offload" });
options.push_back({ "*", "-ctk, --cache-type-k TYPE", "KV cache data type for K (default: %s)", params.cache_type_k.c_str() });
options.push_back({ "*", "-ctv, --cache-type-v TYPE", "KV cache data type for V (default: %s)", params.cache_type_v.c_str() });
options.push_back({ "*", "-ctkd, --cache-type-k-draft TYPE", "KV cache data type for K for the draft model" });
options.push_back({ "*", "-ctvd, --cache-type-v-draft TYPE", "KV cache data type for V for the draft model" });
options.push_back({ "perplexity" });
options.push_back({ "perplexity", " --all-logits", "return logits for all tokens in the batch (default: %s)", params.logits_all ? "true" : "false" });
@@ -1903,6 +1928,10 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", "-hfr, --hf-repo REPO", "Hugging Face model repository (default: unused)" });
options.push_back({ "*", "-hff, --hf-file FILE", "Hugging Face model file (default: unused)" });
options.push_back({ "*", "-hft, --hf-token TOKEN", "Hugging Face access token (default: value from HF_TOKEN environment variable)" });
options.push_back({ "*", "--draft-max, --draft, --draft-n N",
"number of tokens to draft for speculative decoding (default: %d)", params.n_draft });
options.push_back({ "*", "--draft-min, --draft-n-min N", "minimum number of draft tokens to use for speculative decoding" });
options.push_back({ "*", "--draft-p-min P", "minimum speculative decoding probability (greedy) (default: %.1f)", (double)params.p_draft_min });
options.push_back({ "retrieval" });
options.push_back({ "retrieval", " --context-file FNAME", "file to load context from (repeat to specify multiple files)" });
@@ -2062,7 +2091,7 @@ std::string string_join(const std::vector<std::string> & strs, const std::string
if (strs.empty()) {
return "";
}
std::ostringstream oss;
for (size_t i = 0; i < strs.size(); ++i) {
if (i > 0) {

View File

@@ -83,10 +83,13 @@ struct gpt_params {
int32_t n_threads_batch_draft = -1;
int32_t n_predict = -1; // new tokens to predict
int32_t n_ctx = 0; // context size
int32_t n_ctx_draft = 0; // context size for draft model
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS)
int32_t n_keep = 0; // number of tokens to keep from initial prompt
int32_t n_draft = 5; // number of tokens to draft during speculative decoding
int32_t n_draft = 16; // number of tokens to draft during speculative decoding
int32_t n_draft_min = 1; // minimum number of tokens to draft during speculative decoding
float p_draft_min = 0.8f; // minimum speculative decoding probability (greedy)
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
int32_t n_parallel = 1; // number of parallel sequences to decode
int32_t n_sequences = 1; // number of sequences to decode
@@ -207,6 +210,8 @@ struct gpt_params {
std::string cache_type_k = "f16"; // KV cache data type for the K
std::string cache_type_v = "f16"; // KV cache data type for the V
std::string cache_type_k_draft = ""; // KV cache data type for K for the draft model
std::string cache_type_v_draft = ""; // KV cache data type for V for the draft model
// multimodal models (see examples/llava)
std::string mmproj = ""; // path to multimodal projector

View File

@@ -442,7 +442,9 @@ static llama_token_data_array llama_sampling_prepare_impl(
cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
}
llama_token_data_array cur_p = { cur.data(), cur.size(), false };
ctx_sampling->cur_p = { cur.data(), cur.size(), false };
llama_token_data_array & cur_p = ctx_sampling->cur_p;
// apply penalties
const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev;
@@ -506,3 +508,47 @@ void llama_sampling_accept(
llama_sampler_dry_accept(ctx_sampling->smpl, id);
}
}
llama_token_data_array * llama_sampling_get_candidates(struct llama_sampling_context * ctx_sampling) {
return &ctx_sampling->cur_p;
}
std::vector<llama_token> llama_sampling_sample_and_accept_n(struct llama_sampling_context * gsmpl, struct llama_context * ctx, const std::vector<llama_token> & draft) {
std::vector<int> idxs(draft.size() + 1);
for (size_t i = 0; i < idxs.size(); ++i) {
idxs[i] = i;
}
return llama_sampling_sample_and_accept_n(gsmpl, ctx, idxs, draft);
}
std::vector<llama_token> llama_sampling_sample_and_accept_n(struct llama_sampling_context * gsmpl, struct llama_context * ctx, const std::vector<int> & idxs, const std::vector<llama_token> & draft) {
GGML_ASSERT(idxs.size() == draft.size() + 1 && "idxs.size() must be draft.size() + 1");
std::vector<llama_token> result;
result.reserve(idxs.size());
size_t i = 0;
for (; i < draft.size(); i++) {
const llama_token id = llama_sampling_sample(gsmpl, ctx, nullptr, idxs[i]);
llama_sampling_accept(gsmpl, ctx, id, true);
result.push_back(id);
if (draft[i] != id) {
break;
}
}
if (i == draft.size()) {
const llama_token id = llama_sampling_sample(gsmpl, ctx, nullptr, idxs[i]);
llama_sampling_accept(gsmpl, ctx, id, true);
result.push_back(id);
}
return result;
}

View File

@@ -101,6 +101,8 @@ struct llama_sampling_context {
size_t n_valid; // Number of correct top tokens with correct probabilities.
llama_token_data_array cur_p; // current candidates
std::mt19937 rng;
};
@@ -176,3 +178,11 @@ void llama_sampling_accept(
struct llama_context * ctx_main,
llama_token id,
bool apply_grammar);
// returns at least 1 token, up to draft.size()
// access the internal list of current candidate tokens
llama_token_data_array * llama_sampling_get_candidates(struct llama_sampling_context * ctx_sampling);
std::vector<llama_token> llama_sampling_sample_and_accept_n(struct llama_sampling_context * gsmpl, struct llama_context * ctx, const std::vector<llama_token> & draft);
std::vector<llama_token> llama_sampling_sample_and_accept_n(struct llama_sampling_context * gsmpl, struct llama_context * ctx, const std::vector<int> & idxs, const std::vector<llama_token> & draft);

275
common/speculative.cpp Normal file
View File

@@ -0,0 +1,275 @@
#include "speculative.h"
#include "common.h"
#include "sampling.h"
#include "llama-impl.h"
#include <cstring>
#include <algorithm>
#define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128
#define SPEC_VOCAB_CHECK_START_TOKEN_ID 5
struct llama_speculative {
struct llama_context * ctx;
struct llama_sampling_context * smpl;
llama_batch batch;
std::vector<llama_token> prompt;
};
struct llama_speculative * llama_speculative_init(
struct llama_context * ctx_dft) {
auto * result = new llama_speculative {
/* .ctx = */ ctx_dft,
/* .smpl = */ nullptr,
/* .batch = */ llama_batch_init(llama_n_batch(ctx_dft), 0, 1),
/* .prompt = */ {},
};
// TODO: optimize or pass from outside?
#if 0
{
llama_sampling_params params;
params.no_perf = false;
params.top_k = 40;
params.top_p = 0.9;
params.samplers = {
COMMON_SAMPLER_TYPE_TOP_K,
COMMON_SAMPLER_TYPE_TOP_P,
COMMON_SAMPLER_TYPE_INFILL,
};
result->smpl = llama_sampler_init(llama_get_model(ctx_dft), params);
}
#else
{
llama_sampling_params params;
params.top_k = 10;
params.samplers_sequence = {
llama_sampler_type::TOP_K,
};
const auto *model_dft = llama_get_model(ctx_dft);
result->smpl = llama_sampling_init(llama_get_model_vocab(model_dft), params);
}
#endif
return result;
}
void llama_speculative_free(struct llama_speculative * spec) {
if (spec == nullptr) {
return;
}
llama_sampling_free(spec->smpl);
llama_batch_free(spec->batch);
delete spec;
}
bool llama_speculative_are_compatible(
const struct llama_context * ctx_tgt,
const struct llama_context * ctx_dft) {
const struct llama_model * model_tgt = llama_get_model(ctx_tgt);
const struct llama_model * model_dft = llama_get_model(ctx_dft);
const struct llama_vocab * vocab_tgt = llama_get_model_vocab(model_tgt);
const struct llama_vocab * vocab_dft = llama_get_model_vocab(model_dft);
const bool vocab_type_tgt = llama_vocab_type(model_tgt);
LLAMA_LOG_INFO("%s: vocab_type tgt: %d\n", __func__, vocab_type_tgt);
const bool vocab_type_dft = llama_vocab_type(model_dft);
LLAMA_LOG_INFO("%s: vocab_type dft: %d\n", __func__, vocab_type_dft);
if (vocab_type_tgt != vocab_type_dft) {
LLAMA_LOG_ERROR("%s: draft model vocab type must match target model to use speculation but "
"vocab_type_dft = %d while vocab_type_tgt = %d\n", __func__, vocab_type_dft, vocab_type_tgt);
return false;
}
if (llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) ||
llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) ||
llama_token_bos(model_tgt) != llama_token_bos(model_dft) ||
llama_token_eos(model_tgt) != llama_token_eos(model_dft)) {
LLAMA_LOG_ERROR("%s: draft vocab special tokens must match target vocab to use speculation\n", __func__);
LLAMA_LOG_ERROR("%s: tgt: bos = %d (%d), eos = %d (%d)\n", __func__, llama_token_bos(model_tgt), llama_add_bos_token(model_tgt), llama_token_eos(model_tgt), llama_add_eos_token(model_tgt));
LLAMA_LOG_ERROR("%s: dft: bos = %d (%d), eos = %d (%d)\n", __func__, llama_token_bos(model_dft), llama_add_bos_token(model_dft), llama_token_eos(model_dft), llama_add_eos_token(model_dft));
return false;
}
{
const int n_vocab_tgt = llama_n_vocab(model_tgt);
const int n_vocab_dft = llama_n_vocab(model_dft);
const int model_diff = std::abs(n_vocab_tgt - n_vocab_dft);
if (model_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) {
LLAMA_LOG_ERROR("%s: draft model vocab must closely match target model to use speculation but "
"target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n",
__func__, n_vocab_tgt, n_vocab_dft, model_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE);
return false;
}
for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
const char * token_text_tgt = llama_token_get_text(model_tgt, i);
const char * token_text_dft = llama_token_get_text(model_dft, i);
if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
LLAMA_LOG_ERROR("%s: draft vocab vocab must match target vocab to use speculation but "
"token %d content differs - target '%s', draft '%s'\n", __func__, i,
llama_token_to_piece(ctx_tgt, i).c_str(),
llama_token_to_piece(ctx_dft, i).c_str());
return false;
}
}
}
return true;
}
std::vector<llama_token> llama_speculative_gen_draft(
struct llama_speculative * spec,
struct llama_speculative_params params,
const std::vector<llama_token> & prompt_tgt,
llama_token id_last) {
auto & batch = spec->batch;
auto & ctx = spec->ctx;
auto & smpl = spec->smpl;
auto & prompt = spec->prompt;
int reuse_i = 0;
int reuse_n = 0;
const int n_ctx = llama_n_ctx(ctx) - params.n_draft;
const int i_start = std::max<int>(0, (int) prompt_tgt.size() - n_ctx);
// reuse as much as possible from the old draft context
// ideally, the draft context should be as big as the target context and we will always reuse the entire prompt
for (int i = 0; i < (int) prompt.size(); ++i) {
int cur = 0;
while (i_start + cur < (int) prompt_tgt.size() &&
i + cur < (int) prompt.size() &&
prompt_tgt[i_start + cur] == prompt[i + cur]) {
cur++;
}
if ((cur >= params.n_reuse || n_ctx >= (int) prompt_tgt.size()) && cur > reuse_n) {
reuse_i = i;
reuse_n = cur;
}
}
// LLAMA_LOG_INFO("%s: reuse_i = %d, reuse_n = %d, prompt = %d\n", __func__, reuse_i, reuse_n, (int) prompt.size());
std::vector<llama_token> result;
result.reserve(params.n_draft);
if (reuse_n == 0) {
llama_kv_cache_clear(ctx);
prompt.clear();
} else {
// this happens when a previous draft has been discarded (for example, due to being too small), but the
// target model agreed with it. in this case, we simply pass back the previous results to save compute
if (reuse_i + reuse_n < (int) prompt.size() && prompt[reuse_i + reuse_n] == id_last) {
for (int i = reuse_i + reuse_n + 1; i < (int) prompt.size(); ++i) {
result.push_back(prompt[i]);
if (params.n_draft <= (int) result.size()) {
break;
}
}
return result;
}
if (reuse_i > 0) {
llama_kv_cache_seq_rm (ctx, 0, 0, reuse_i);
llama_kv_cache_seq_add(ctx, 0, reuse_i, -1, -reuse_i);
prompt.erase(prompt.begin(), prompt.begin() + reuse_i);
}
if (reuse_n < (int) prompt.size()) {
llama_kv_cache_seq_rm (ctx, 0, reuse_n, -1);
prompt.erase(prompt.begin() + reuse_n, prompt.end());
}
}
// prepare a batch to evaluate any new tokens in the prompt
llama_batch_clear(batch);
for (size_t i = i_start + reuse_n; i < prompt_tgt.size(); ++i) {
//LLAMA_LOG_INFO("i = %d, i_start = %d, reuse_n = %d, i - i_start = %d, id = %6d\n", i, i_start, reuse_n, i - i_start, prompt_tgt[i]);
llama_batch_add(batch, prompt_tgt[i], i - i_start, { 0 }, false);
prompt.push_back(prompt_tgt[i]);
}
// we should rarely end-up here during normal decoding
if (batch.n_tokens > 0) {
//LLAMA_LOG_INFO("%s: draft prompt batch: %s\n", __func__, string_from(ctx, batch).c_str());
llama_decode(ctx, batch);
}
const llama_pos n_past = prompt.size();
// LLAMA_LOG_INFO("%s: n_past = %d\n", __func__, n_past);
llama_batch_clear(batch);
llama_batch_add (batch, id_last, n_past, { 0 }, true);
prompt.push_back(id_last);
//LLAMA_LOG_INFO("%s: draft prompt: %s\n", __func__, string_from(ctx, prompt).c_str());
llama_decode(ctx, batch);
llama_sampling_reset(smpl);
// sample n_draft tokens from the draft model
for (int i = 0; i < params.n_draft; ++i) {
llama_batch_clear(batch);
llama_sampling_sample(smpl, ctx, nullptr, 0);
const auto * cur_p = llama_sampling_get_candidates(smpl);
// for (int k = 0; k < std::min(3, (int) cur_p->size); ++k) {
// LLAMA_LOG_INFO(" - draft candidate %3d, pos %3d: %6d (%8.3f) '%s'\n",
// k, i, cur_p->data[k].id, cur_p->data[k].p, llama_token_to_piece(ctx, cur_p->data[k].id).c_str());
// }
// add drafted token for each sequence
const llama_token id = cur_p->data[0].id;
llama_sampling_accept(smpl, ctx, id, true);
result.push_back(id);
if (params.n_draft <= (int) result.size()) {
break;
}
// only collect very high-confidence draft tokens
if (cur_p->data[0].p < params.p_min) {
break;
}
llama_batch_add(batch, id, n_past + i + 1, { 0 }, true);
// evaluate the drafted tokens on the draft model
llama_decode(ctx, batch);
prompt.push_back(id);
}
return result;
}

29
common/speculative.h Normal file
View File

@@ -0,0 +1,29 @@
#pragma once
#include "llama.h"
#include <vector>
struct llama_speculative;
struct llama_speculative_params {
int n_draft = 16; // max drafted tokens
int n_reuse = 256;
float p_min = 0.75f; // min probability required to accept a token in the draft
};
struct llama_speculative * llama_speculative_init(struct llama_context * ctx_dft);
void llama_speculative_free(struct llama_speculative * spec);
bool llama_speculative_are_compatible(
const struct llama_context * ctx_tgt,
const struct llama_context * ctx_dft);
// sample up to n_draft tokens and add them to the batch using the draft model
std::vector<llama_token> llama_speculative_gen_draft(
struct llama_speculative * spec,
struct llama_speculative_params params,
const std::vector<llama_token> & prompt,
llama_token id_last);