From 59133173fa02eefbd13823b1c4406566db33c960 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Thu, 25 Sep 2025 11:22:19 +0300 Subject: [PATCH] Add mtmd: clip initialization compiles --- examples/mtmd/clip.cpp | 25 ++++++++++++++++++++----- ggml/include/ggml.h | 9 ++++++--- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/examples/mtmd/clip.cpp b/examples/mtmd/clip.cpp index 1f9114c4..8d50c7cc 100644 --- a/examples/mtmd/clip.cpp +++ b/examples/mtmd/clip.cpp @@ -11,6 +11,17 @@ #include "ggml-backend.h" //#include "gguf.h" +#ifdef GGML_USE_CUDA +# include "ggml-cuda.h" +#elif defined(GGML_USE_VULKAN) +# include "ggml-vulkan.h" +#endif + +#ifdef GGML_USE_METAL +# include "ggml-metal.h" +#endif + + #include #include #include @@ -392,21 +403,24 @@ struct clip_ctx { clip_ctx(clip_context_params & ctx_params) { debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr; - backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); + //backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); + backend_cpu = ggml_backend_cpu_init(); if (!backend_cpu) { throw std::runtime_error("failed to initialize CPU backend"); } if (ctx_params.use_gpu) { auto backend_name = std::getenv("MTMD_BACKEND_DEVICE"); if (backend_name != nullptr) { - backend = ggml_backend_init_by_name(backend_name, nullptr); + //backend = ggml_backend_init_by_name(backend_name, nullptr); + backend = ggml_backend_reg_init_backend_from_str(backend_name); if (!backend) { LOG_WRN("%s: Warning: Failed to initialize \"%s\" backend, falling back to default GPU backend\n", __func__, backend_name); } } if (!backend) { - backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr); - backend = backend ? backend : ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU, nullptr); + backend = ggml_backend_reg_init_backend(1, nullptr); + //backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr); + //backend = backend ? backend : ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU, nullptr); } } @@ -423,7 +437,8 @@ struct clip_ctx { backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu)); sched.reset( - ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true) + //ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true) + ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false) ); } diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 9e78903c..4ffb18d0 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -700,16 +700,19 @@ extern "C" { }; enum ggml_log_level { - GGML_LOG_LEVEL_ERROR = 2, + GGML_LOG_LEVEL_NONE = 0, + GGML_LOG_LEVEL_DEBUG = 1, + GGML_LOG_LEVEL_INFO = 2, GGML_LOG_LEVEL_WARN = 3, - GGML_LOG_LEVEL_INFO = 4, - GGML_LOG_LEVEL_DEBUG = 5 + GGML_LOG_LEVEL_ERROR = 4, + GGML_LOG_LEVEL_CONT = 5, // continue previous log }; enum ggml_tensor_flag { GGML_TENSOR_FLAG_INPUT = 1, GGML_TENSOR_FLAG_OUTPUT = 2, GGML_TENSOR_FLAG_PARAM = 4, + GGML_TENSOR_FLAG_LOSS = 8, // ...defines loss for numerical optimization (multiple loss tensors add up) }; // ggml object