mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-03-07 12:30:08 +00:00
Do the SVD before or after quantization, quantize the SVD result, etc. So far, none of the versions is competitive with just using more bpw in the quantization.
13 lines
582 B
CMake
13 lines
582 B
CMake
set(TARGET llama-quantize-stats)
|
|
if (GGML_NATIVE)
|
|
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
|
message("-- Adding march=native to ${TARGET}")
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
|
|
endif()
|
|
endif()
|
|
add_executable(${TARGET} quantize-stats.cpp)
|
|
install(TARGETS ${TARGET} RUNTIME)
|
|
target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT})
|
|
target_include_directories(${TARGET} PRIVATE ../../common ../../ggml/src)
|
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|