mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
* Add mtmd: the beginning * Add mtmd: mtmd.cpp compiles * Add mtmd: clip initialization compiles * Add mtmd: clip.cpp compiles * Add mtmd: builds successfully * Add CPU implementation for GGML_OP_GLU * Add CUDA implementation for GGML_OP_GLU * Add CPU implementation for GGML_OP_CONV_2D and GGML_OP_CONV_2D_DW * Add CUDA implementation for GGML_OP_CONV_2D and GGML_OP_CONV_2D_DW * Add mtmd: refresh CPU rope * Add mtmd: refresh CUDA rope * Add mtmd: add Qwen2-VL * Add mtmd: Qwen2.5-VL text seems to work with this change * Add mtmd: fix swiglu * Add mtmd: use LOG_TEE so generated tokens show up in terminal * Add mtmd: do not attempt to load a GPU backend if none are available * GLU, not GPU * Fix typo * Fix new/free mismatch * LOG stuff * Add mtmd: this fixes gibberish on second image --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
57 lines
1.4 KiB
CMake
57 lines
1.4 KiB
CMake
# dependencies
|
|
|
|
find_package(Threads REQUIRED)
|
|
|
|
# third-party
|
|
|
|
# ...
|
|
|
|
# examples
|
|
|
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|
|
|
if (EMSCRIPTEN)
|
|
else()
|
|
add_subdirectory(cvector-generator)
|
|
add_subdirectory(baby-llama)
|
|
add_subdirectory(batched-bench)
|
|
add_subdirectory(batched)
|
|
add_subdirectory(benchmark)
|
|
add_subdirectory(convert-llama2c-to-ggml)
|
|
add_subdirectory(embedding)
|
|
add_subdirectory(eval-callback)
|
|
add_subdirectory(export-lora)
|
|
add_subdirectory(gbnf-validator)
|
|
add_subdirectory(gguf-hash)
|
|
add_subdirectory(gguf-split)
|
|
add_subdirectory(gguf)
|
|
add_subdirectory(gritlm)
|
|
add_subdirectory(imatrix)
|
|
add_subdirectory(infill)
|
|
add_subdirectory(llama-bench)
|
|
add_subdirectory(lookahead)
|
|
add_subdirectory(lookup)
|
|
add_subdirectory(main)
|
|
add_subdirectory(parallel)
|
|
add_subdirectory(passkey)
|
|
add_subdirectory(perplexity)
|
|
add_subdirectory(quantize-stats)
|
|
add_subdirectory(quantize)
|
|
add_subdirectory(retrieval)
|
|
add_subdirectory(mtmd)
|
|
if (GGML_RPC)
|
|
add_subdirectory(rpc)
|
|
endif()
|
|
if (LLAMA_BUILD_SERVER)
|
|
add_subdirectory(server)
|
|
endif()
|
|
if (GGML_SYCL)
|
|
add_subdirectory(sycl)
|
|
endif()
|
|
add_subdirectory(save-load-state)
|
|
add_subdirectory(simple)
|
|
add_subdirectory(speculative)
|
|
add_subdirectory(sweep-bench)
|
|
add_subdirectory(tokenize)
|
|
endif()
|