From b2c9689762df6d7f2e23d4a50c8c3d113e5d9219 Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Sun, 18 Jan 2026 15:01:46 +0000 Subject: [PATCH] Once at it, lets fix the formatting too --- src/llama-sampling.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 6ab13132..17f95f0a 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -1083,8 +1083,7 @@ llama_token llama_sample_token_adaptive_p_impl( return id; } -void llama_sample_adaptive_p_impl(llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx) -{ +void llama_sample_adaptive_p_impl(llama_token_data_array * candidates, struct llama_sampler_adaptive_p * adapt_p_ctx) { if (adapt_p_ctx->target < 0.0f) { // sampler is disabled llama_sample_softmax_impl(nullptr, candidates); @@ -1205,8 +1204,7 @@ void llama_prep_adaptive_p_impl( struct llama_sampler_adaptive_p * llama_init_adaptive_p_impl( const float target, const float decay, - const uint32_t seed) -{ + const uint32_t seed) { const float clamped_decay = std::clamp(decay, 0.0f, 0.99f); return new llama_sampler_adaptive_p { /* .target = */ target,