mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
* Merging mainline - WIP * Merging mainline - WIP AVX2 and CUDA appear to work. CUDA performance seems slightly (~1-2%) lower as it is so often the case with llama.cpp/ggml after some "improvements" have been made. * Merging mainline - fix Metal * Remove check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
23 lines
717 B
CMake
23 lines
717 B
CMake
find_package(Git)
|
|
|
|
# the commit's SHA1
|
|
execute_process(COMMAND
|
|
"${GIT_EXECUTABLE}" describe --match=NeVeRmAtCh --always --abbrev=8
|
|
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
|
|
OUTPUT_VARIABLE GIT_SHA1
|
|
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
|
|
|
# the date of the commit
|
|
execute_process(COMMAND
|
|
"${GIT_EXECUTABLE}" log -1 --format=%ad --date=local
|
|
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
|
|
OUTPUT_VARIABLE GIT_DATE
|
|
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
|
|
|
# the subject of the commit
|
|
execute_process(COMMAND
|
|
"${GIT_EXECUTABLE}" log -1 --format=%s
|
|
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
|
|
OUTPUT_VARIABLE GIT_COMMIT_SUBJECT
|
|
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|