NUMA-aware KV cache buffer type (experimental)

Co-authored-by: Stanisław Szymczyk <sszymczy@gmail.com>
This commit is contained in:
Saood Karim
2025-02-02 13:18:33 -06:00
parent a22250df93
commit e0101cfe5a
3 changed files with 96 additions and 0 deletions

View File

@@ -109,6 +109,8 @@ extern "C" {
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void);
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_numa_buffer_type(void);
#ifdef GGML_USE_CPU_HBM
GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void);
#endif

View File

@@ -936,6 +936,90 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, siz
return ggml_backend_buffer_init(ggml_backend_cpu_buffer_type(), cpu_backend_buffer_i_from_ptr, ptr, size);
}
// NUMA buffer interface - similar to CPU, but with pages allocated accordingly to a NUMA first-touch policy
#include <sys/mman.h>
GGML_CALL static void ggml_backend_numa_buffer_free_buffer(ggml_backend_buffer_t buffer) {
if (munmap((char *) buffer->context, buffer->size)) {
GGML_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
}
}
GGML_CALL static void ggml_backend_numa_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
if (posix_madvise(buffer->context, buffer->size, POSIX_MADV_DONTNEED)) {
GGML_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_DONTNEED) failed: %s\n",
strerror(errno));
}
}
static const struct ggml_backend_buffer_i ggml_backend_numa_buffer_i = {
/* .free_buffer = */ ggml_backend_numa_buffer_free_buffer,
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
/* .init_tensor = */ NULL, // no initialization required
/* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor,
/* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor,
/* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor,
/* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor,
/* .clear = */ ggml_backend_numa_buffer_clear,
/* .reset = */ NULL,
};
// NUMA buffer type - similar to CPU, but with pages allocated accordingly to a NUMA first-touch policy
GGML_CALL static const char * ggml_backend_numa_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
return "NUMA";
GGML_UNUSED(buft);
}
GGML_CALL static ggml_backend_buffer_t ggml_backend_numa_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
int flags = MAP_SHARED | MAP_ANONYMOUS;
void * data = mmap(NULL, size, PROT_READ|PROT_WRITE, flags, -1, 0);
if (data == MAP_FAILED) {
GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size);
return NULL;
}
if (posix_madvise(data, size, POSIX_MADV_RANDOM)) {
GGML_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
strerror(errno));
}
return ggml_backend_buffer_init(buft, ggml_backend_numa_buffer_i, data, size);
}
GGML_CALL static size_t ggml_backend_numa_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
return TENSOR_ALIGNMENT;
GGML_UNUSED(buft);
}
GGML_CALL static bool ggml_backend_numa_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
return true;
GGML_UNUSED(buft);
}
GGML_CALL ggml_backend_buffer_type_t ggml_backend_numa_buffer_type(void) {
static struct ggml_backend_buffer_type ggml_backend_numa_buffer_type = {
/* .iface = */ {
/* .get_name = */ ggml_backend_numa_buffer_type_get_name,
/* .alloc_buffer = */ ggml_backend_numa_buffer_type_alloc_buffer,
/* .get_alignment = */ ggml_backend_numa_buffer_type_get_alignment,
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
/* .is_host = */ ggml_backend_numa_buffer_type_is_host,
},
/* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
/* .context = */ NULL,
};
return &ggml_backend_numa_buffer_type;
}
GGML_CALL static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user_data) {
return ggml_backend_cpu_init();

View File

@@ -3249,6 +3249,15 @@ static bool llama_kv_cache_init(
bool warn = true;
int n_mla = 0;
auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU));
auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa");
bool is_numa = is_numa_fn();
if (!offload && is_numa) {
LLAMA_LOG_INFO("%s: NUMA usage detected, using NUMA-aware buffer for KV cache\n", __func__);
}
for (int i = 0; i < (int) n_layer; i++) {
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
@@ -3257,6 +3266,7 @@ static bool llama_kv_cache_init(
const uint32_t n_embd_head_k= hparams.n_embd_head_k;
<<<<<<< HEAD
struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front();
ggml_tensor * k;
ggml_tensor * v;