Remove llamafile remnants

This commit is contained in:
Kawrakow
2026-01-22 11:12:04 +00:00
parent 66caa42b53
commit 3a3e1638d4
5 changed files with 0 additions and 28 deletions

View File

@@ -112,7 +112,6 @@ option(GGML_ACCELERATE "ggml: enable Accelerate framework"
option(GGML_BLAS "ggml: use BLAS" ${GGML_BLAS_DEFAULT})
set(GGML_BLAS_VENDOR ${GGML_BLAS_VENDOR_DEFAULT} CACHE STRING
"ggml: BLAS library vendor")
option(GGML_LLAMAFILE "ggml: use LLAMAFILE" OFF)
option(GGML_IQK_MUL_MAT "ggml: use optimized iqk matrix multiplications" ON)
option(GGML_CUDA "ggml: use CUDA" OFF)

View File

@@ -2999,7 +2999,6 @@ extern "C" {
GGML_API int ggml_cpu_has_vsx (void);
GGML_API int ggml_cpu_has_matmul_int8(void);
GGML_API int ggml_cpu_has_cann (void);
GGML_API int ggml_cpu_has_llamafile (void);
//
// Internal types and functions exposed for tests and benchmarks

View File

@@ -299,15 +299,6 @@ if (GGML_IQK_MUL_MAT)
endif()
endif()
if (GGML_LLAMAFILE)
message(STATUS "Using llamafile")
add_compile_definitions(GGML_USE_LLAMAFILE)
set(GGML_HEADERS_LLAMAFILE llamafile/sgemm.h)
set(GGML_SOURCES_LLAMAFILE llamafile/sgemm.cpp)
endif()
if (GGML_CUDA)
cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
@@ -1534,7 +1525,6 @@ add_library(ggml
${GGML_SOURCES_VULKAN} ${GGML_HEADERS_VULKAN}
${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM}
${GGML_SOURCES_BLAS} ${GGML_HEADERS_BLAS}
${GGML_SOURCES_LLAMAFILE} ${GGML_HEADERS_LLAMAFILE}
${GGML_SOURCES_IQK_MM} ${GGML_HEADERS_IQK_MM}
${GGML_SOURCES_IQK} ${GGML_HEADERS_IQK}
${GGML_SOURCES_CANN} ${GGML_HEADERS_CANN}

View File

@@ -54,13 +54,6 @@
#if defined(__ARM_FEATURE_SVE)
int ggml_sve_cnt_b = 0;
#endif
#if defined(__ARM_FEATURE_SVE) || defined(__ARM_FEATURE_MATMUL_INT8)
#undef GGML_USE_LLAMAFILE
#endif
#ifdef GGML_USE_LLAMAFILE
#include <llamafile/sgemm.h>
#endif
#if defined(_MSC_VER)
// disable "possible loss of data" to avoid hundreds of casts
@@ -28670,14 +28663,6 @@ int ggml_cpu_has_cann(void) {
#endif
}
int ggml_cpu_has_llamafile(void) {
#if defined(GGML_USE_LLAMAFILE)
return 1;
#else
return 0;
#endif
}
int ggml_cpu_has_gpublas(void) {
return ggml_cpu_has_cuda() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl();
}

View File

@@ -7883,7 +7883,6 @@ const char * llama_print_system_info(void) {
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
s += "LLAMAFILE = " + std::to_string(ggml_cpu_has_llamafile()) + " | ";
return s.c_str();
}