This commit is contained in:
Iwan Kawrakow
2025-05-10 19:07:02 +03:00
parent 3a671301f8
commit 154a195f75
2 changed files with 2 additions and 3 deletions

View File

@@ -2206,7 +2206,6 @@ std::string fs_get_cache_file(const std::string & filename) {
// Model utils
//
struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
printf("================================================== %s\n", __func__);
llama_init_result iparams;
auto mparams = llama_model_params_from_gpt_params(params);
@@ -2234,7 +2233,6 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
return iparams;
}
printf("%d entries in params.offload_policy\n", (int)params.offload_policy.size());
for (auto [op, on_off] : params.offload_policy) {
llama_set_offload_policy(lctx, op, on_off);
}

View File

@@ -23239,6 +23239,7 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void *
void llama_set_offload_policy(struct llama_context * lctx, int op, bool on_or_off) {
if (!lctx || !lctx->sched) return;
printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXX offload(%s) = %d\n", ggml_op_name(ggml_op(op)), on_or_off);
const char * op_name = op < 0 || op >= int(GGML_OP_COUNT) ? "all ops" : ggml_op_name(ggml_op(op));
printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXX offload(%s) = %d\n", op_name, on_or_off);
ggml_backend_sched_set_op_offload(lctx->sched, ggml_op(op), on_or_off);
}