From 154a195f7529651f5ec216d061e72bc1bc8ca460 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Sat, 10 May 2025 19:07:02 +0300 Subject: [PATCH] Minor --- common/common.cpp | 2 -- src/llama.cpp | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 0ecab991..ab936ee7 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2206,7 +2206,6 @@ std::string fs_get_cache_file(const std::string & filename) { // Model utils // struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { - printf("================================================== %s\n", __func__); llama_init_result iparams; auto mparams = llama_model_params_from_gpt_params(params); @@ -2234,7 +2233,6 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { return iparams; } - printf("%d entries in params.offload_policy\n", (int)params.offload_policy.size()); for (auto [op, on_off] : params.offload_policy) { llama_set_offload_policy(lctx, op, on_off); } diff --git a/src/llama.cpp b/src/llama.cpp index 44bad5e2..38a2b299 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -23239,6 +23239,7 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void * void llama_set_offload_policy(struct llama_context * lctx, int op, bool on_or_off) { if (!lctx || !lctx->sched) return; - printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXX offload(%s) = %d\n", ggml_op_name(ggml_op(op)), on_or_off); + const char * op_name = op < 0 || op >= int(GGML_OP_COUNT) ? "all ops" : ggml_op_name(ggml_op(op)); + printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXX offload(%s) = %d\n", op_name, on_or_off); ggml_backend_sched_set_op_offload(lctx->sched, ggml_op(op), on_or_off); }