diff --git a/common/common.cpp b/common/common.cpp index 3192fd37..802fe0df 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -940,6 +940,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa sparams.adaptive_decay = std::stof(argv[i]); return true; } + if (arg == "--adaptive-updt-w-cur") { + sparams.adaptive_updt_w_cur = true; + return true; + } if (arg == "--spec-replace") { CHECK_ARG std::string target = argv[i]; @@ -2231,6 +2235,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", " --top-n-sigma t", "top-n-sigma parmeter (default: %.1f, 0.0 = disabled)", (double)sparams.top_n_sigma}); options.push_back({ "*", " --adaptive-target", "adaptive-p sampling: (default: %.2f, <0.0 = disabled)", (double)sparams.adaptive_target}); options.push_back({ "*", " --adaptive-decay", "adaptive-p sampling: (default: %.2f)", (double)sparams.adaptive_decay}); + options.push_back({ "*", " --adaptive-updt-w-cur", "adaptive-p sampling: (default: %s)", sparams.adaptive_updt_w_cur ? "true" : "false"}); options.push_back({ "*", " -l TOKEN_ID(+/-)BIAS", "modifies the likelihood of token appearing in the completion,\n" "i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n" "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'" }); @@ -4227,6 +4232,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l fprintf(stream, "typical_p: %f # default: 1.0\n", sparams.typical_p); fprintf(stream, "adaptive_target: %f # default: -1.0\n", sparams.adaptive_target); fprintf(stream, "adaptive_decay: %f # default: 0.9\n", sparams.adaptive_decay); + fprintf(stream, "adaptive_updt_w_cur: %s # default: false\n", sparams.adaptive_updt_w_cur ? "true" : "false"); fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false"); fprintf(stream, "display_prompt: %s # default: true\n", params.display_prompt ? "true" : "false"); } diff --git a/common/sampling.cpp b/common/sampling.cpp index ba8d3f67..cfec27fd 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -120,7 +120,7 @@ struct llama_sampling_context * common_sampler_init(const struct llama_vocab* vo { GGML_ASSERT(vocab); auto n_vocab = llama_vocab_n_tokens(vocab); - result->adapt_p_ctx = llama_init_adaptive_p(n_vocab, params.adaptive_target, params.adaptive_decay, result->rng()); + result->adapt_p_ctx = llama_init_adaptive_p(n_vocab, params.adaptive_target, params.adaptive_decay, params.adaptive_updt_w_cur, result->rng()); break; } default: diff --git a/common/sampling.h b/common/sampling.h index a5420fa7..f2d1b1bf 100644 --- a/common/sampling.h +++ b/common/sampling.h @@ -69,6 +69,7 @@ typedef struct llama_sampling_params { float top_n_sigma = 0.0f; // top-n-sigma float adaptive_target = -1.0f; // select tokens near this probability (valid range 0.0 to 1.0; <0 = disabled) float adaptive_decay = 0.90f; // decay rate for target adaptation over time. lower values -> faster but less stable adaptation. (valid range 0.0 to 1.0; ≤0 = no adaptation) + bool adaptive_updt_w_cur = false; // update state with current probability bool penalize_nl = false; // consider newlines as a repeatable token uint32_t seed = LLAMA_DEFAULT_SEED; // the seed used to initialize llama_sampling_context diff --git a/include/llama.h b/include/llama.h index 72cb9edd..a0a8e3ac 100644 --- a/include/llama.h +++ b/include/llama.h @@ -1387,6 +1387,7 @@ LLAMA_API struct llama_grammar* llama_sampler_init_grammar_lazy_patterns( LLAMA_API struct llama_sampler_adaptive_p * llama_init_adaptive_p(int n_vocab, const float target, const float decay, + const bool updt_w_cur, const uint32_t seed); void llama_prep_adaptive_p(struct llama_context * ctx, diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 5e26eb20..bb94af7a 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -1075,9 +1075,13 @@ llama_token llama_sample_token_adaptive_p_impl( GGML_ASSERT(iter != ctx->cum_probs.end()); const size_t idx = std::distance(ctx->cum_probs.begin(), iter); llama_token id = candidates->data[idx].id; - GGML_ASSERT(id < int(ctx->orig_prob.size())); - if (auto update_prob = ctx->orig_prob[id]; update_prob > 0) { + + // update history + const float update_prob = ctx->updt_w_cur + ? candidates->data[idx].p / ctx->cum_cur_p + : ctx->orig_prob[id] / ctx->cum_orig_prob; + if (update_prob > 0) { ctx->weighted_sum = ctx->decay * ctx->weighted_sum + update_prob; ctx->total_weight = ctx->decay * ctx->total_weight + 1.0f; } @@ -1111,6 +1115,7 @@ void llama_sample_adaptive_p_impl(struct llama_sampling * ctx, llama_token_data_ candidates->data[i].p = prob; cum_sum += prob; } + adapt_p_ctx->cum_cur_p = cum_sum; // compute adapted target probability const float target = std::clamp(adapt_p_ctx->target, 0.0f, 1.0f); @@ -1146,6 +1151,10 @@ void llama_prep_adaptive_p_impl( struct llama_sampling * smpl, llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx) { + if (adapt_p_ctx->updt_w_cur) { + // update with current probability, original not needed + return; + } constexpr float kDelta = 30.0f; //16.6f; auto t_start = ggml_time_us(); auto & orig_prob = adapt_p_ctx->orig_prob; @@ -1169,17 +1178,20 @@ void llama_prep_adaptive_p_impl( struct llama_sampler_adaptive_p * llama_init_adaptive_p_impl(int n_vocab, const float target, const float decay, + const bool updt_w_cur, const uint32_t seed) { GGML_ASSERT(n_vocab > 0); const float clamped_decay = std::clamp(decay, 0.0f, 0.99f); auto result = new llama_sampler_adaptive_p { /* .target = */ target, /* .decay = */ clamped_decay, + /* .updt_w_cur = */ updt_w_cur, /* .rng = */ std::mt19937(seed), /* .weighted_sum = */ target / (1.0f - clamped_decay), /* .total_weight = */ 1.0f / (1.0f - clamped_decay), /* .orig_prob = */ {}, - /* .cum_orig_prob = */ 0.0f, + /* .cum_orig_prob = */ 1.0f, + /* .cum_cur_p = */ 1.0f, /* .max_xform_logit = */ -INFINITY, /* .cum_probs = */ {}, }; diff --git a/src/llama-sampling.h b/src/llama-sampling.h index 8ebbfb49..0d7e72d2 100644 --- a/src/llama-sampling.h +++ b/src/llama-sampling.h @@ -68,15 +68,17 @@ void llama_sampler_dry_apply(struct llama_sampler_dry* smpl, llama_token_data_ar struct llama_sampler_adaptive_p { const float target; // target probability (0.0 - 1.0; negative = disabled) const float decay; // EMA decay; history ≈ 1/(1-decay) tokens (0.0 - 0.99) + const bool updt_w_cur; // false=original, true=current std::mt19937 rng; // RNG float weighted_sum; // sum(p_n * decay^N) float total_weight; // sum(decay^i), converges to 1/(1-decay) // first referenced in prep - std::vector orig_prob; // for storing the original proibabilities + std::vector orig_prob; // for storing the original proibabilities float cum_orig_prob; // for normalizing orig_prob in sample_token // first referenced in sample + float cum_cur_p; // cumulative sum of current probabilities float max_xform_logit; // maximum logit found during transform // first referenced in sample_token @@ -86,6 +88,7 @@ struct llama_sampler_adaptive_p { struct llama_sampler_adaptive_p * llama_init_adaptive_p_impl(int n_vocab, const float target, const float decay, + const bool updt_w_cur, const uint32_t seed); void llama_prep_adaptive_p_impl( diff --git a/src/llama.cpp b/src/llama.cpp index f6b4eba9..4f551745 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -7814,8 +7814,8 @@ void llama_sampler_dry_accept(struct llama_sampler_dry* smpl, llama_token token) } -struct llama_sampler_adaptive_p * llama_init_adaptive_p(int n_vocab, const float target, const float decay, const uint32_t seed) { - return llama_init_adaptive_p_impl(n_vocab, target, decay, seed); +struct llama_sampler_adaptive_p * llama_init_adaptive_p(int n_vocab, const float target, const float decay, const bool updt_w_cur, const uint32_t seed) { + return llama_init_adaptive_p_impl(n_vocab, target, decay, updt_w_cur, seed); }