mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
KV cache read/write for split mode "graph" (#1048)
* Handle split cache (write) * Handle split cache (read) * Fix writing the data twice --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
148
src/llama.cpp
148
src/llama.cpp
@@ -5320,7 +5320,7 @@ bool llama_save_session_file(struct llama_context * ctx, const char * path_sessi
|
||||
// TODO: replace all non-fatal assertions with returned errors or exceptions
|
||||
struct llama_data_write {
|
||||
virtual void write(const void * src, size_t size) = 0;
|
||||
virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) = 0;
|
||||
virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size, int il) = 0;
|
||||
virtual size_t get_size_written() = 0;
|
||||
virtual ~llama_data_write() = default;
|
||||
|
||||
@@ -5449,7 +5449,7 @@ struct llama_data_write {
|
||||
for (const auto & range : cell_ranges) {
|
||||
const size_t range_size = range.second - range.first;
|
||||
const size_t buf_size = range_size * k_size_row;
|
||||
write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size);
|
||||
write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size, il);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5469,7 +5469,7 @@ struct llama_data_write {
|
||||
for (const auto & range : cell_ranges) {
|
||||
const size_t range_size = range.second - range.first;
|
||||
const size_t buf_size = range_size * v_size_row;
|
||||
write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size);
|
||||
write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size, il);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5497,7 +5497,7 @@ struct llama_data_write {
|
||||
const size_t range_size = range.second - range.first;
|
||||
const size_t src_offset = (range.first + j * kv_size) * v_size_el;
|
||||
const size_t buf_size = range_size * v_size_el;
|
||||
write_tensor_data(kv_self.v_l[il], src_offset, buf_size);
|
||||
write_tensor_data(kv_self.v_l[il], src_offset, buf_size, il);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5716,6 +5716,41 @@ struct llama_data_read {
|
||||
return true;
|
||||
}
|
||||
|
||||
void read_kv_cache_data_split(llama_context * ctx, ggml_tensor * tensor, const uint8_t * data, size_t head, size_t row_size, int nrows, int il) {
|
||||
GGML_ASSERT(il >= 0 && il < int(ctx->model.layers.size()));
|
||||
GGML_ASSERT(ggml_internal_get_type_traits(tensor->type).row_meta_size == 0);
|
||||
auto kv = tensor->ne[1] > 1 ? ctx->model.layers[il].wk : ctx->model.layers[il].wv;
|
||||
auto extra = (ggml_split_tensor_t *)tensor->extra;
|
||||
auto kv_extra = (ggml_split_tensor_t *)kv->extra;
|
||||
GGML_ASSERT(extra && kv_extra);
|
||||
auto ne = kv->ne[1];
|
||||
size_t sum_ne = 0;
|
||||
size_t sum_split_row_size = 0;
|
||||
GGML_ASSERT(row_size == ggml_row_size(tensor->type, ne));
|
||||
std::vector<uint8_t> aux;
|
||||
for (int id = 0; id < extra->n_device; ++id) {
|
||||
auto split = extra->splits[id];
|
||||
GGML_ASSERT(split->type == tensor->type);
|
||||
auto kv_split = kv_extra->splits[id];
|
||||
GGML_ASSERT((split && kv_split) || (!split && !kv_split));
|
||||
if (!split) continue;
|
||||
auto split_row_size = ggml_row_size(tensor->type, kv_split->ne[1]);
|
||||
aux.resize(split_row_size*nrows);
|
||||
auto src = data + sum_split_row_size;
|
||||
auto dst = aux.data();
|
||||
for (int row = 0; row < nrows; ++row) {
|
||||
std::memcpy(dst, src, split_row_size);
|
||||
dst += split_row_size;
|
||||
src += row_size;
|
||||
}
|
||||
ggml_backend_tensor_set(split, aux.data(), head*split_row_size, nrows*split_row_size);
|
||||
sum_ne += kv_split->ne[1];
|
||||
sum_split_row_size += split_row_size;
|
||||
}
|
||||
GGML_ASSERT(sum_ne == ne);
|
||||
GGML_ASSERT(sum_split_row_size == row_size);
|
||||
}
|
||||
|
||||
bool read_kv_cache_data(struct llama_context * ctx, uint32_t cell_count) {
|
||||
const struct llama_hparams & hparams = ctx->model.hparams;
|
||||
struct llama_kv_cache & kv_self = ctx->kv_self;
|
||||
@@ -5770,7 +5805,11 @@ struct llama_data_read {
|
||||
|
||||
if (cell_count) {
|
||||
// Read and set the keys for the whole cell range
|
||||
ggml_backend_tensor_set(kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head * k_size_row, cell_count * k_size_row);
|
||||
if (kv_self.k_l[il]->extra) {
|
||||
read_kv_cache_data_split(ctx, kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head, k_size_row, cell_count, il);
|
||||
} else {
|
||||
ggml_backend_tensor_set(kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head * k_size_row, cell_count * k_size_row);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5798,7 +5837,11 @@ struct llama_data_read {
|
||||
|
||||
if (cell_count) {
|
||||
// Read and set the values for the whole cell range
|
||||
ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head * v_size_row, cell_count * v_size_row);
|
||||
if (kv_self.v_l[il]->extra) {
|
||||
read_kv_cache_data_split(ctx, kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head, v_size_row, cell_count, il);
|
||||
} else {
|
||||
ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head * v_size_row, cell_count * v_size_row);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5834,6 +5877,9 @@ struct llama_data_read {
|
||||
}
|
||||
|
||||
if (cell_count) {
|
||||
if (kv_self.v_l[il]->extra) {
|
||||
throw std::runtime_error("Transposed V cache is not sypported with split mode 'graph'");
|
||||
}
|
||||
// For each row in the transposed matrix, read the values for the whole cell range
|
||||
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
||||
const size_t dst_offset = (kv_self.head + j * kv_self.size) * v_size_el;
|
||||
@@ -5871,7 +5917,7 @@ struct llama_data_write_dummy : llama_data_write {
|
||||
size_written += size;
|
||||
}
|
||||
|
||||
void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override {
|
||||
void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size, int /* il */) override {
|
||||
size_written += size;
|
||||
}
|
||||
|
||||
@@ -5885,7 +5931,11 @@ struct llama_data_write_buffer : llama_data_write {
|
||||
size_t buf_size = 0;
|
||||
size_t size_written = 0;
|
||||
|
||||
llama_data_write_buffer(uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
|
||||
const llama_model & model;
|
||||
|
||||
std::vector<uint8_t> aux_buffer;
|
||||
|
||||
llama_data_write_buffer(uint8_t * p, size_t len, const llama_model & _model) : ptr(p), buf_size(len), model(_model) {}
|
||||
|
||||
void write(const void * src, size_t size) override {
|
||||
if (size > buf_size) {
|
||||
@@ -5897,16 +5947,66 @@ struct llama_data_write_buffer : llama_data_write {
|
||||
buf_size -= size;
|
||||
}
|
||||
|
||||
void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
|
||||
void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size, int il) override {
|
||||
if (size > buf_size) {
|
||||
throw std::runtime_error("unexpectedly reached end of buffer");
|
||||
}
|
||||
ggml_backend_tensor_get(tensor, ptr, offset, size);
|
||||
if (tensor->extra) {
|
||||
get_tensor_data_split(tensor, offset, size, il);
|
||||
} else {
|
||||
ggml_backend_tensor_get(tensor, ptr, offset, size);
|
||||
}
|
||||
ptr += size;
|
||||
size_written += size;
|
||||
buf_size -= size;
|
||||
}
|
||||
|
||||
void get_tensor_data_split(const ggml_tensor * tensor, size_t offset, size_t size, int il) {
|
||||
auto tt = ggml_internal_get_type_traits(tensor->type);
|
||||
if (tt.row_meta_size > 0) {
|
||||
throw std::runtime_error(std::string{"Split cache for type "} + ggml_type_name(tensor->type) + " is not supported");
|
||||
}
|
||||
GGML_ASSERT(il >= 0 && il < int(model.layers.size()));
|
||||
auto kv = tensor->ne[1] > 1 ? model.layers[il].wk : model.layers[il].wv;
|
||||
get_tensor_data_split(ptr, tensor, kv, aux_buffer, offset, size);
|
||||
}
|
||||
|
||||
static void get_tensor_data_split(uint8_t * ptr, const ggml_tensor * tensor, const ggml_tensor * kv,
|
||||
std::vector<uint8_t> & aux_buffer, size_t offset, size_t size) {
|
||||
auto ne = kv->ne[1];
|
||||
auto full_row_size = ggml_row_size(tensor->type, ne);
|
||||
GGML_ASSERT(offset % full_row_size == 0);
|
||||
GGML_ASSERT(size % full_row_size == 0);
|
||||
auto first_row = offset / full_row_size;
|
||||
auto num_rows = size / full_row_size;
|
||||
auto extra = (const ggml_split_tensor_t *)tensor->extra;
|
||||
auto kv_extra = (const ggml_split_tensor_t *)kv->extra;
|
||||
GGML_ASSERT(extra && kv_extra);
|
||||
size_t split_offset = 0;
|
||||
size_t total_size = 0;
|
||||
for (int id = 0; id < extra->n_device; ++id) {
|
||||
auto split = extra->splits[id];
|
||||
auto kv_split = kv_extra->splits[id];
|
||||
GGML_ASSERT((split && kv_split) || (!split && !kv_split));
|
||||
if (!split) continue;
|
||||
GGML_ASSERT(split->type == tensor->type);
|
||||
auto split_row_size = ggml_row_size(tensor->type, kv_split->ne[1]);
|
||||
auto split_size = split_row_size * num_rows;
|
||||
if (split_size > aux_buffer.size()) aux_buffer.resize(split_size);
|
||||
ggml_backend_tensor_get(split, aux_buffer.data(), first_row*split_row_size, split_size);
|
||||
auto dst = ptr + split_offset;
|
||||
auto src = aux_buffer.data();
|
||||
for (int row = 0; row < (int)num_rows; ++row) {
|
||||
std::memcpy(dst, src, split_row_size);
|
||||
dst += full_row_size;
|
||||
src += split_row_size;
|
||||
}
|
||||
split_offset += split_row_size;
|
||||
total_size += split_row_size * num_rows;
|
||||
}
|
||||
GGML_ASSERT(total_size == size);
|
||||
}
|
||||
|
||||
size_t get_size_written() override {
|
||||
return size_written;
|
||||
}
|
||||
@@ -5943,20 +6043,34 @@ struct llama_data_write_file : llama_data_write {
|
||||
llama_file * file;
|
||||
size_t size_written = 0;
|
||||
std::vector<uint8_t> temp_buffer;
|
||||
std::vector<uint8_t> aux_buffer;
|
||||
|
||||
llama_data_write_file(llama_file * f) : file(f) {}
|
||||
const llama_model & model;
|
||||
|
||||
llama_data_write_file(llama_file * f, const llama_model & _model) : file(f), model(_model) {}
|
||||
|
||||
void write(const void * src, size_t size) override {
|
||||
file->write_raw(src, size);
|
||||
size_written += size;
|
||||
}
|
||||
|
||||
void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
|
||||
void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size, int il) override {
|
||||
temp_buffer.resize(size);
|
||||
ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
|
||||
if (tensor->extra) {
|
||||
get_tensor_data_split(tensor, offset, size, il);
|
||||
} else {
|
||||
ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
|
||||
}
|
||||
write(temp_buffer.data(), temp_buffer.size());
|
||||
}
|
||||
|
||||
void get_tensor_data_split(const struct ggml_tensor * tensor, size_t offset, size_t size, int il) {
|
||||
GGML_ASSERT(il >= 0 && il < int(model.layers.size()));
|
||||
auto kv = tensor->ne[1] > 1 ? model.layers[il].wk : model.layers[il].wv;
|
||||
temp_buffer.resize(size);
|
||||
llama_data_write_buffer::get_tensor_data_split(temp_buffer.data(), tensor, kv, aux_buffer, offset, size);
|
||||
}
|
||||
|
||||
size_t get_size_written() override {
|
||||
return size_written;
|
||||
}
|
||||
@@ -6016,7 +6130,7 @@ static size_t llama_state_get_data_internal(struct llama_context * ctx, llama_da
|
||||
}
|
||||
|
||||
size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst, size_t size) {
|
||||
llama_data_write_buffer data_ctx(dst, size);
|
||||
llama_data_write_buffer data_ctx(dst, size, ctx->model);
|
||||
try {
|
||||
return llama_state_get_data_internal(ctx, data_ctx);
|
||||
} catch (const std::exception & err) {
|
||||
@@ -6128,7 +6242,7 @@ static bool llama_state_save_file_internal(struct llama_context * ctx, const cha
|
||||
file.write_raw(tokens, sizeof(llama_token) * n_token_count);
|
||||
|
||||
// save the context state using stream saving
|
||||
llama_data_write_file data_ctx(&file);
|
||||
llama_data_write_file data_ctx(&file, ctx->model);
|
||||
llama_state_get_data_internal(ctx, data_ctx);
|
||||
|
||||
return true;
|
||||
@@ -6157,7 +6271,7 @@ size_t llama_state_seq_get_size(struct llama_context * ctx, llama_seq_id seq_id)
|
||||
}
|
||||
|
||||
size_t llama_state_seq_get_data(struct llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) {
|
||||
llama_data_write_buffer data_ctx(dst, size);
|
||||
llama_data_write_buffer data_ctx(dst, size, ctx->model);
|
||||
try {
|
||||
return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
|
||||
} catch (const std::exception & err) {
|
||||
@@ -6195,7 +6309,7 @@ static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, con
|
||||
file.write_raw(tokens, sizeof(llama_token) * n_token_count);
|
||||
|
||||
// save the context state using stream saving
|
||||
llama_data_write_file data_ctx(&file);
|
||||
llama_data_write_file data_ctx(&file, ctx->model);
|
||||
llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
|
||||
|
||||
const size_t res = file.tell();
|
||||
|
||||
Reference in New Issue
Block a user