From bd2434945d72c5e65e048f4c90f212b95f65ea22 Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Mon, 19 Jan 2026 10:00:19 +0000 Subject: [PATCH] Correctly accumulate adaptive_p sampling time --- common/sampling.cpp | 2 +- include/llama.h | 9 ++++----- src/llama-sampling.cpp | 16 ++++++++++++---- src/llama-sampling.h | 2 ++ src/llama.cpp | 13 ++++++------- 5 files changed, 25 insertions(+), 17 deletions(-) diff --git a/common/sampling.cpp b/common/sampling.cpp index c1130a2f..ba8d3f67 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -473,7 +473,7 @@ static llama_token llama_sampling_sample_impl( id = llama_sample_token_mirostat_v2(ctx_main, &cur_p, mirostat_tau, mirostat_eta, &ctx_sampling->mirostat_mu); } else if (adaptive_target >= 0.0f && ctx_sampling->adapt_p_ctx!=nullptr) { // adaptive p sampling - llama_prep_adaptive_p(&cur_p, ctx_sampling->adapt_p_ctx); + llama_prep_adaptive_p(ctx_main, &cur_p, ctx_sampling->adapt_p_ctx); sampler_queue(ctx_main, params, ctx_sampling, cur_p, std::max(1, params.min_keep)); id = llama_sample_token_adaptive_p(ctx_main, &cur_p, ctx_sampling->adapt_p_ctx); } else { diff --git a/include/llama.h b/include/llama.h index 82a96a90..72cb9edd 100644 --- a/include/llama.h +++ b/include/llama.h @@ -1389,15 +1389,14 @@ LLAMA_API struct llama_grammar* llama_sampler_init_grammar_lazy_patterns( const float decay, const uint32_t seed); - void llama_prep_adaptive_p( + void llama_prep_adaptive_p(struct llama_context * ctx, llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx); /// @details Adaptive p sampler described in https://github.com/MrJackSpade/adaptive-p-docs/blob/main/README.md - void llama_sample_adaptive_p( - struct llama_context * ctx, - llama_token_data_array * candidates, - struct llama_sampler_adaptive_p * adapt_p_ctx); + void llama_sample_adaptive_p(struct llama_context * ctx, + llama_token_data_array * candidates, + struct llama_sampler_adaptive_p * adapt_p_ctx); /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 8e3af2d7..b598a2f1 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -1061,25 +1061,28 @@ llama_token llama_sample_token_adaptive_p_impl( const size_t idx = std::distance(ctx->cum_probs.begin(), iter); llama_token id = candidates->data[idx].id; - smpl->t_sample_us += ggml_time_us() - t_start_sample_us; - smpl->n_sample++; - GGML_ASSERT(id < int(ctx->orig_prob.size())); if (auto update_prob = ctx->orig_prob[id]; update_prob > 0) { ctx->weighted_sum = ctx->decay * ctx->weighted_sum + update_prob; ctx->total_weight = ctx->decay * ctx->total_weight + 1.0f; } + smpl->t_sample_us += ggml_time_us() - t_start_sample_us; + smpl->n_sample++; + return id; } -void llama_sample_adaptive_p_impl(llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx) { +void llama_sample_adaptive_p_impl(struct llama_sampling * ctx, llama_token_data_array * candidates, + struct llama_sampler_adaptive_p * adapt_p_ctx) { if (adapt_p_ctx->target < 0.0f) { // sampler is disabled llama_sample_softmax_impl(nullptr, candidates); return; } + auto t_start = ggml_time_us(); + // incomplete softmax because final division can be fused float max_l = candidates->data[0].logit; if (!candidates->sorted) { @@ -1120,12 +1123,16 @@ void llama_sample_adaptive_p_impl(llama_token_data_array * candidates, struct ll } candidates->sorted = false; adapt_p_ctx->max_xform_logit = max_logit; + + ctx->t_sample_us += ggml_time_us() - t_start; } void llama_prep_adaptive_p_impl( + struct llama_sampling * smpl, llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx) { constexpr float kDelta = 16.6f; + auto t_start = ggml_time_us(); auto & orig_prob = adapt_p_ctx->orig_prob; if (candidates->size != orig_prob.size() || candidates->sorted) { LLAMA_LOG_ERROR("%s: this function must be called before any other sampler has been applied\n", __func__); @@ -1146,6 +1153,7 @@ void llama_prep_adaptive_p_impl( orig_prob[j] = prob; } adapt_p_ctx->cum_orig_prob = cum_prob; + if (smpl) smpl->t_sample_us += ggml_time_us() - t_start; } struct llama_sampler_adaptive_p * llama_init_adaptive_p_impl(int n_vocab, diff --git a/src/llama-sampling.h b/src/llama-sampling.h index 308d8b10..8ebbfb49 100644 --- a/src/llama-sampling.h +++ b/src/llama-sampling.h @@ -89,10 +89,12 @@ struct llama_sampler_adaptive_p * llama_init_adaptive_p_impl(int n_vocab, const uint32_t seed); void llama_prep_adaptive_p_impl( + struct llama_sampling * smpl, llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx); void llama_sample_adaptive_p_impl( + struct llama_sampling * smpl, llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx); diff --git a/src/llama.cpp b/src/llama.cpp index e30632b4..488dcadc 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -7687,15 +7687,14 @@ void llama_sample_dry([[maybe_unused]] struct llama_context* ctx, struct llama_s llama_sampler_dry_apply(smpl, candidates_p); } -void llama_sample_adaptive_p( - [[maybe_unused]] struct llama_context * ctx, - llama_token_data_array * candidates, - struct llama_sampler_adaptive_p * adapt_p_ctx) { - llama_sample_adaptive_p_impl(candidates, adapt_p_ctx); +void llama_sample_adaptive_p(llama_context * ctx, + llama_token_data_array * candidates, + llama_sampler_adaptive_p * adapt_p_ctx) { + llama_sample_adaptive_p_impl(&ctx->sampling, candidates, adapt_p_ctx); } -void llama_prep_adaptive_p(llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx) { - llama_prep_adaptive_p_impl(candidates, adapt_p_ctx); +void llama_prep_adaptive_p(struct llama_context * ctx, llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx) { + llama_prep_adaptive_p_impl(&ctx->sampling, candidates, adapt_p_ctx); }