mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-02 20:48:03 +00:00
Test transparent huge pages on Linux (#278)
* Adding ability to use THP on Linux * Use the actual page size4 used for mmap also in munmap * Add -thp to llama-bench --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -993,6 +993,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
params.use_mmap = false;
|
||||
return true;
|
||||
}
|
||||
if (arg == "-thp" || arg == "--transparent-huge-pages") {
|
||||
params.use_thp = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "--numa") {
|
||||
CHECK_ARG
|
||||
std::string value(argv[i]);
|
||||
@@ -2316,6 +2320,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||
mparams.use_mlock = params.use_mlock;
|
||||
mparams.check_tensors = params.check_tensors;
|
||||
mparams.repack_tensors = params.repack_tensors;
|
||||
mparams.use_thp = params.use_thp;
|
||||
if (params.kv_overrides.empty()) {
|
||||
mparams.kv_overrides = NULL;
|
||||
} else {
|
||||
@@ -3371,6 +3376,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
|
||||
fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", sparams.n_probs);
|
||||
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
||||
fprintf(stream, "repack: %s # default: false\n", params.repack_tensors ? "true" : "false");
|
||||
fprintf(stream, "use_thp: %s # default: false\n", params.use_thp ? "true" : "false");
|
||||
fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false");
|
||||
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
||||
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
||||
|
||||
@@ -194,6 +194,7 @@ struct gpt_params {
|
||||
bool warmup = true; // warmup run
|
||||
bool check_tensors = false; // validate tensor data
|
||||
bool repack_tensors = false; // repack tensors if interleaved variant is available
|
||||
bool use_thp = false; // use transparent huge pages (linux only)
|
||||
|
||||
std::string cache_type_k = "f16"; // KV cache data type for the K
|
||||
std::string cache_type_v = "f16"; // KV cache data type for the V
|
||||
|
||||
@@ -248,6 +248,7 @@ struct cmd_params {
|
||||
bool warmup;
|
||||
bool repack = false;
|
||||
bool fmoe = false;
|
||||
bool use_thp = false;
|
||||
output_formats output_format;
|
||||
output_formats output_format_stderr;
|
||||
};
|
||||
@@ -281,6 +282,7 @@ static const cmd_params cmd_params_defaults = {
|
||||
/* verbose */ false,
|
||||
/* warmup */ true,
|
||||
/* repack */ false,
|
||||
/* use_thp */ false,
|
||||
/* fmoe */ false,
|
||||
/* output_format */ MARKDOWN,
|
||||
/* output_format_stderr */ NONE,
|
||||
@@ -320,6 +322,7 @@ static void print_usage(int /* argc */, char ** argv) {
|
||||
printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0");
|
||||
printf(" -w, --warmup <0|1> (default: %s)\n", cmd_params_defaults.warmup ? "1" : "0");
|
||||
printf(" -rtr, --run-time-repack <0|1> (default: %s)\n", cmd_params_defaults.repack ? "1" : "0");
|
||||
printf(" -thp, --transparent-huge-pages <0|1> (default: %s)\n", cmd_params_defaults.use_thp? "1" : "0");
|
||||
printf(" -ot, --override-tensor pattern (default: none)\n");
|
||||
printf(" -fmoe, --fused-moe <0|1> (default: %s)\n", cmd_params_defaults.fmoe? "1" : "0");
|
||||
printf("\n");
|
||||
@@ -691,6 +694,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||
break;
|
||||
}
|
||||
params.repack = std::stoi(argv[i]);
|
||||
} else if (arg == "-thp" || arg == "--transparent-huge-pages") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.use_thp = std::stoi(argv[i]);
|
||||
} else if (arg == "-fmoe" || arg == "--fused-moe") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
@@ -781,6 +790,7 @@ struct cmd_params_instance {
|
||||
bool embeddings;
|
||||
bool repack = false;
|
||||
bool fmoe = false;
|
||||
bool use_thp = false;
|
||||
const llama_model_tensor_buft_override* buft_overrides;
|
||||
|
||||
llama_model_params to_llama_mparams() const {
|
||||
@@ -795,6 +805,7 @@ struct cmd_params_instance {
|
||||
mparams.tensor_split = tensor_split.data();
|
||||
mparams.use_mmap = use_mmap;
|
||||
mparams.repack_tensors = repack;
|
||||
mparams.use_thp = use_thp;
|
||||
mparams.tensor_buft_overrides = buft_overrides;
|
||||
|
||||
return mparams;
|
||||
@@ -808,6 +819,7 @@ struct cmd_params_instance {
|
||||
main_gpu == other.main_gpu &&
|
||||
use_mmap == other.use_mmap &&
|
||||
repack == other.repack &&
|
||||
use_thp == other.use_thp &&
|
||||
tensor_split == other.tensor_split;
|
||||
}
|
||||
|
||||
@@ -882,6 +894,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .embeddings = */ embd,
|
||||
/* .repack = */ params.repack,
|
||||
/* .fmoe = */ params.fmoe,
|
||||
/* .use_thp = */ params.use_thp,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -915,6 +928,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .embeddings = */ embd,
|
||||
/* .repack = */ params.repack,
|
||||
/* .fmoe = */ params.fmoe,
|
||||
/* .use_thp = */ params.use_thp,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -948,6 +962,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .embeddings = */ embd,
|
||||
/* .repack = */ params.repack,
|
||||
/* .fmoe = */ params.fmoe,
|
||||
/* .use_thp = */ params.use_thp,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -981,6 +996,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||
/* .embeddings = */ embd,
|
||||
/* .repack = */ params.repack,
|
||||
/* .fmoe = */ params.fmoe,
|
||||
/* .use_thp = */ params.use_thp,
|
||||
/* .buft_overrides=*/ params.buft_overrides.data(),
|
||||
};
|
||||
instances.push_back(instance);
|
||||
@@ -1025,6 +1041,7 @@ struct test {
|
||||
bool embeddings;
|
||||
bool repack = false;
|
||||
bool fmoe = false;
|
||||
bool use_thp = false;
|
||||
int n_prompt;
|
||||
int n_gen;
|
||||
std::string test_time;
|
||||
@@ -1058,6 +1075,7 @@ struct test {
|
||||
embeddings = inst.embeddings;
|
||||
repack = inst.repack;
|
||||
fmoe = inst.fmoe;
|
||||
use_thp = inst.use_thp;
|
||||
n_prompt = inst.n_prompt;
|
||||
n_gen = inst.n_gen;
|
||||
test_kind = inst.test_kind;
|
||||
@@ -1148,7 +1166,7 @@ struct test {
|
||||
"n_threads", "type_k", "type_v",
|
||||
"n_gpu_layers", "split_mode",
|
||||
"main_gpu", "no_kv_offload", "flash_attn", "mla_attn", "attn_max_batch", "ser",
|
||||
"tensor_split", "use_mmap", "embeddings", "repack", "fused_moe",
|
||||
"tensor_split", "use_mmap", "embeddings", "repack", "fused_moe", "use_thp",
|
||||
"n_prompt", "n_gen", "test_time",
|
||||
"avg_ns", "stddev_ns",
|
||||
"avg_ts", "stddev_ts", "test",
|
||||
@@ -1169,7 +1187,7 @@ struct test {
|
||||
}
|
||||
if (field == "cuda" || field == "vulkan" || field == "kompute" || field == "metal" ||
|
||||
field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" ||
|
||||
field == "flash_attn" || field == "use_mmap" || field == "embeddings" || field == "repack" ||
|
||||
field == "flash_attn" || field == "use_mmap" || field == "embeddings" || field == "repack" || field == "use_thp" ||
|
||||
field == "fused_moe") {
|
||||
return BOOL;
|
||||
}
|
||||
@@ -1211,7 +1229,8 @@ struct test {
|
||||
std::to_string(n_gpu_layers), split_mode_str(split_mode),
|
||||
std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn),
|
||||
std::to_string(mla_attn), std::to_string(attn_max_batch), ser_to_string(ser),
|
||||
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings), std::to_string(repack), std::to_string(fmoe),
|
||||
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
|
||||
std::to_string(repack), std::to_string(fmoe), std::to_string(use_thp),
|
||||
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
||||
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
||||
std::to_string(avg_ts()), std::to_string(stdev_ts()),
|
||||
@@ -1389,6 +1408,9 @@ struct markdown_printer : public printer {
|
||||
if (field == "repack") {
|
||||
return 3;
|
||||
}
|
||||
if (field == "use_thp") {
|
||||
return 3;
|
||||
}
|
||||
if (field == "fused_moe") {
|
||||
return 4;
|
||||
}
|
||||
@@ -1435,6 +1457,9 @@ struct markdown_printer : public printer {
|
||||
if (field == "repack") {
|
||||
return "rtr";
|
||||
}
|
||||
if (field == "use_thp") {
|
||||
return "thp";
|
||||
}
|
||||
if (field == "fused_moe") {
|
||||
return "fmoe";
|
||||
}
|
||||
@@ -1505,6 +1530,9 @@ struct markdown_printer : public printer {
|
||||
if (params.repack != cmd_params_defaults.repack) {
|
||||
fields.emplace_back("repack");
|
||||
}
|
||||
if (params.use_thp != cmd_params_defaults.use_thp) {
|
||||
fields.emplace_back("use_thp");
|
||||
}
|
||||
if (params.fmoe != cmd_params_defaults.fmoe) {
|
||||
fields.emplace_back("fused_moe");
|
||||
}
|
||||
|
||||
@@ -345,6 +345,7 @@ extern "C" {
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
bool check_tensors; // validate model tensor data
|
||||
bool repack_tensors;// repack if available
|
||||
bool use_thp; // uase transparent huge pages (linux only)
|
||||
};
|
||||
|
||||
// NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
|
||||
|
||||
@@ -1827,6 +1827,7 @@ using llama_files = std::vector<std::unique_ptr<llama_file>>;
|
||||
struct llama_mmap {
|
||||
void * addr;
|
||||
size_t size;
|
||||
size_t mapped_page_size = 0;
|
||||
|
||||
llama_mmap(const llama_mmap &) = delete;
|
||||
|
||||
@@ -1836,7 +1837,7 @@ struct llama_mmap {
|
||||
// list of mapped fragments (first_offset, last_offset)
|
||||
std::vector<std::pair<size_t, size_t>> mapped_fragments;
|
||||
|
||||
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
|
||||
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false, [[maybe_unused]] bool use_thp = false) {
|
||||
size = file->size;
|
||||
int fd = fileno(file->fp);
|
||||
int flags = MAP_SHARED;
|
||||
@@ -1849,6 +1850,29 @@ struct llama_mmap {
|
||||
strerror(errno));
|
||||
}
|
||||
if (prefetch) { flags |= MAP_POPULATE; }
|
||||
if (use_thp) {
|
||||
size_t huge = get_default_huge_page_size();
|
||||
auto size = huge*((file->size + huge - 1)/huge);
|
||||
addr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
|
||||
if (addr != MAP_FAILED) {
|
||||
printf("%s: using THP with page size %zu MiB ", __func__, huge/(1024*1024));
|
||||
fflush(stdout);
|
||||
size_t tot = 0;
|
||||
while (tot < file->size) {
|
||||
auto n_read = pread(fd, static_cast<char*>(addr) + tot, file->size - tot, tot);
|
||||
if (n_read < 0) throw std::runtime_error(format("Reading into mapped huge pages failed at %zu (%s)", tot, strerror(errno)));
|
||||
printf("."); fflush(stdout);
|
||||
tot += n_read;
|
||||
}
|
||||
printf(" done\n");
|
||||
mapped_fragments.emplace_back(0, file->size);
|
||||
mapped_page_size = huge;
|
||||
return;
|
||||
}
|
||||
else {
|
||||
fprintf(stderr, "%s: mmap with huge page size %zu MiB failed (%s)\n", __func__, huge/(1024*1024), strerror(errno));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
|
||||
if (addr == MAP_FAILED) { // NOLINT
|
||||
@@ -1893,7 +1917,7 @@ struct llama_mmap {
|
||||
void unmap_fragment(size_t first, size_t last) {
|
||||
// note: this function must not be called multiple times with overlapping ranges
|
||||
// otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
|
||||
int page_size = sysconf(_SC_PAGESIZE);
|
||||
int page_size = mapped_page_size > 0 ? mapped_page_size : sysconf(_SC_PAGESIZE);
|
||||
align_range(&first, &last, page_size);
|
||||
size_t len = last - first;
|
||||
|
||||
@@ -1935,6 +1959,28 @@ struct llama_mmap {
|
||||
mapped_fragments = std::move(new_mapped_fragments);
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
static int get_default_huge_page_size() {
|
||||
int pg_size = 2048;
|
||||
std::ifstream in("/proc/meminfo");
|
||||
if (in) {
|
||||
std::string line;
|
||||
while (true) {
|
||||
std::getline(in, line);
|
||||
if (in.fail()) break;
|
||||
if (auto pos = line.find("Hugepagesize:"); pos != std::string::npos) {
|
||||
std::istringstream str(line.data() + pos + 13);
|
||||
int aux;
|
||||
str >> aux;
|
||||
if (!str.fail()) pg_size = aux;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return pg_size * 1024;
|
||||
}
|
||||
#endif
|
||||
|
||||
~llama_mmap() {
|
||||
for (const auto & frag : mapped_fragments) {
|
||||
if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
|
||||
@@ -1945,7 +1991,7 @@ struct llama_mmap {
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
|
||||
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false, [[maybe_unused]] bool use_thp = false) {
|
||||
GGML_UNUSED(numa);
|
||||
|
||||
size = file->size;
|
||||
@@ -2007,10 +2053,11 @@ struct llama_mmap {
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
|
||||
llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false, bool use_thp = false) {
|
||||
GGML_UNUSED(file);
|
||||
GGML_UNUSED(prefetch);
|
||||
GGML_UNUSED(numa);
|
||||
GGML_UNUSED(use_thp);
|
||||
|
||||
throw std::runtime_error("mmap not supported");
|
||||
}
|
||||
@@ -3842,6 +3889,7 @@ struct llama_model_loader {
|
||||
bool use_mmap = false;
|
||||
bool check_tensors;
|
||||
bool repack_tensors = false;
|
||||
bool use_thp = false;
|
||||
|
||||
llama_files files;
|
||||
llama_ftype ftype;
|
||||
@@ -3876,7 +3924,7 @@ struct llama_model_loader {
|
||||
std::string arch_name;
|
||||
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
|
||||
|
||||
llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, bool repack_tensors,
|
||||
llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, bool repack_tensors, bool use_thp,
|
||||
const llama_model_kv_override * param_overrides_p,
|
||||
const llama_model_tensor_buft_override * param_tensor_buft_overrides_p) {
|
||||
int trace = 0;
|
||||
@@ -4140,6 +4188,7 @@ struct llama_model_loader {
|
||||
this->use_mmap = use_mmap;
|
||||
this->check_tensors = check_tensors;
|
||||
this->repack_tensors = repack_tensors;
|
||||
this->use_thp = use_thp;
|
||||
}
|
||||
|
||||
~llama_model_loader() {
|
||||
@@ -4453,12 +4502,12 @@ struct llama_model_loader {
|
||||
}
|
||||
}
|
||||
|
||||
void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr) {
|
||||
void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr, bool use_thp = false) {
|
||||
if (use_mmap) {
|
||||
mappings.reserve(files.size());
|
||||
mmaps_used.reserve(files.size());
|
||||
for (const auto & file : files) {
|
||||
std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, ggml_is_numa()));
|
||||
std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, ggml_is_numa(), use_thp));
|
||||
mmaps_used.emplace_back(mapping->size, 0);
|
||||
if (mlock_mmaps) {
|
||||
std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
|
||||
@@ -8077,7 +8126,7 @@ static bool llm_load_tensors(
|
||||
|
||||
ml.done_getting_tensors();
|
||||
|
||||
ml.init_mappings(true, use_mlock ? &model.mlock_mmaps : nullptr);
|
||||
ml.init_mappings(true, use_mlock ? &model.mlock_mmaps : nullptr, ml.use_thp);
|
||||
model.mappings.reserve(ml.mappings.size());
|
||||
|
||||
// create the backend buffers
|
||||
@@ -8410,7 +8459,7 @@ static bool llm_load_tensors(
|
||||
static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
|
||||
try {
|
||||
llama_model_loader ml(fname, params.use_mmap, params.check_tensors,
|
||||
params.repack_tensors, params.kv_overrides, params.tensor_buft_overrides);
|
||||
params.repack_tensors, params.use_thp, params.kv_overrides, params.tensor_buft_overrides);
|
||||
|
||||
model.hparams.vocab_only = params.vocab_only;
|
||||
|
||||
@@ -17494,7 +17543,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
auto v = (std::vector<llama_model_kv_override>*)params->kv_overrides;
|
||||
kv_overrides = v->data();
|
||||
}
|
||||
llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, /* repack_tensors */ false, kv_overrides, nullptr);
|
||||
llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, /* repack_tensors */ false, /* use_thp */ false, kv_overrides, nullptr);
|
||||
ml.init_mappings(false); // no prefetching
|
||||
|
||||
llama_model model;
|
||||
@@ -18318,6 +18367,7 @@ struct llama_model_params llama_model_default_params() {
|
||||
/*.use_mlock =*/ false,
|
||||
/*.check_tensors =*/ false,
|
||||
/*.repack_tensors =*/ false,
|
||||
/*.use_thp =*/ false,
|
||||
};
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
|
||||
Reference in New Issue
Block a user