Merge branch 'main' into andrewkchan/try_trellis

This commit is contained in:
Andrew Keen Chan
2025-05-20 06:48:14 +00:00
109 changed files with 20602 additions and 6765 deletions

View File

@@ -1,3 +1,10 @@
//
// Copyright (C) 2023-2025 The llama.cpp authors
// Copyright (C) 2024-2025 Iwan Kawrakow
// MIT license
// SPDX-License-Identifier: MIT
//
#include "common.h"
#include "llama.h"
@@ -58,6 +65,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
{ "Q5_0_R4", LLAMA_FTYPE_MOSTLY_Q5_0_R4, " 5.50 bpw quantization", },
{ "Q6_0_R4", LLAMA_FTYPE_MOSTLY_Q6_0_R4, " 6.50 bpw quantization", },
{ "Q8_0_R8", LLAMA_FTYPE_MOSTLY_Q8_0_R8, " 8.50 bpw quantization", },
{ "Q8_KV", LLAMA_FTYPE_MOSTLY_Q8_KV, " 8.00 bpw quantization", },
{ "IQ4_XS", LLAMA_FTYPE_MOSTLY_IQ4_XS, " 4.25 bpw non-linear quantization", },
{ "IQ4_KS", LLAMA_FTYPE_MOSTLY_IQ4_KS, " 4.25 bpw non-linear quantization", },
{ "IQ4_KS_R4",LLAMA_FTYPE_MOSTLY_IQ4_KS_R4,"IQ4_KS repacked", },
@@ -85,6 +93,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, +0.0008 ppl @ LLaMA-v1-7B", },
{ "Q6_K_R4", LLAMA_FTYPE_MOSTLY_Q6_K_R4, "Q6_K repacked", },
{ "Q8_K_R8", LLAMA_FTYPE_MOSTLY_Q8_K_R8, "Q8_K repacked", },
{ "Q8_KV_R8", LLAMA_FTYPE_MOSTLY_Q8_KV_R8, "Q8_KV repacked", },
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", },
{ "Q4_0_4_4", LLAMA_FTYPE_MOSTLY_Q4_0_4_4, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
{ "Q4_0_4_8", LLAMA_FTYPE_MOSTLY_Q4_0_4_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
@@ -136,15 +145,19 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
//
[[noreturn]]
static void usage(const char * executable) {
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--hide-imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
printf(" --hide-imatrix: do not store imatrix details in the quantized model\n");
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n");
printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n\n");
printf(" --custom-q regex1=type1,regex2=type2...: use this to specify custom quantization type rules.\n\n");
printf(" --repack Repack all tensors to the corresponding _r4/8 variant if available.\n\n");
printf(" --repack-pattern Comma separated list of regexs to use for matching tensor names to be repacked.\n\n");
printf("Additional specific tensor quantization types used in the custom quant scheme 'CQS (default is Q2_K):\n");
printf(" --attn-q-type ggml_type: use this ggml_type for the attn_q.weight tensor.\n");
printf(" --attn-k-type ggml_type: use this ggml_type for the attn_k.weight tensor.\n");
@@ -291,6 +304,28 @@ static ggml_type parse_ggml_type(const char * arg) {
return result;
}
using CustomQ = std::pair<std::string, ggml_type>;
static bool parse_custom_quants(const std::string& arg, std::vector<CustomQ>& custom_quants) {
for (const auto & item : string_split<std::string>(arg, ',')) {
auto pos = item.find('=');
if (pos == std::string::npos) {
fprintf(stderr, "Invalid custom quantization input %s\n", arg.c_str());
return false;
}
auto pattern = item.substr(0, pos);
auto type_as_string = item.substr(pos + 1);
auto type = parse_ggml_type(type_as_string.c_str());
if (type == GGML_TYPE_COUNT) {
fprintf(stderr, "Invalid quantization type '%s' in custom quantization input %s\n", type_as_string.c_str(), item.c_str());
return false;
}
printf("Adding custom rule %s -> %s\n", pattern.c_str(), ggml_type_name(type));
custom_quants.emplace_back(std::move(pattern), type);
}
return true;
}
int main(int argc, char ** argv) {
if (argc < 3) {
usage(argv[0]);
@@ -302,12 +337,26 @@ int main(int argc, char ** argv) {
std::string imatrix_file;
std::vector<std::string> included_weights, excluded_weights;
std::vector<llama_model_kv_override> kv_overrides;
std::vector<CustomQ> custom_quants;
std::vector<std::string> repack_patterns;
bool hide_imatrix = false;
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
params.quantize_output_tensor = false;
} else if (strcmp(argv[arg_idx], "--ignore-imatrix-rules") == 0) {
params.ignore_imatrix_rules = true;
} else if (strcmp(argv[arg_idx], "--repack") == 0) {
params.only_repack = true;
} else if (strcmp(argv[arg_idx], "--repack-pattern") == 0) {
if (arg_idx < argc-1) {
auto p = string_split(argv[++arg_idx], ',');
repack_patterns.insert(repack_patterns.end(), p.begin(), p.end());
} else {
usage(argv[0]);
}
} else if (strcmp(argv[arg_idx], "--output-tensor-type") == 0) {
if (arg_idx < argc-1) {
params.output_tensor_type = parse_ggml_type(argv[++arg_idx]);
@@ -372,6 +421,10 @@ int main(int argc, char ** argv) {
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
usage(argv[0]);
}
} else if (strcmp(argv[arg_idx], "--custom-q") == 0) {
if (arg_idx == argc-1 || !parse_custom_quants(argv[++arg_idx], custom_quants)) {
usage(argv[0]);
}
} else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) {
params.allow_requantize = true;
} else if (strcmp(argv[arg_idx], "--pure") == 0) {
@@ -382,6 +435,8 @@ int main(int argc, char ** argv) {
} else {
usage(argv[0]);
}
} else if (strcmp(argv[arg_idx], "--hide-imatrix") == 0) {
hide_imatrix = true;
} else if (strcmp(argv[arg_idx], "--include-weights") == 0) {
if (arg_idx < argc-1) {
included_weights.emplace_back(argv[++arg_idx]);
@@ -401,6 +456,10 @@ int main(int argc, char ** argv) {
}
}
if (!repack_patterns.empty()) {
params.repack_pattern = &repack_patterns;
}
if (argc - arg_idx < 2) {
printf("%s: bad arguments\n", argv[0]);
usage(argv[0]);
@@ -418,7 +477,11 @@ int main(int argc, char ** argv) {
llama_model_kv_override kvo;
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_FILE);
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
strncpy(kvo.val_str, imatrix_file.c_str(), 127);
if (hide_imatrix) {
strncpy(kvo.val_str, "top_secret", 127);
} else {
strncpy(kvo.val_str, imatrix_file.c_str(), 127);
}
kvo.val_str[127] = '\0';
kv_overrides.emplace_back(std::move(kvo));
}
@@ -426,7 +489,11 @@ int main(int argc, char ** argv) {
llama_model_kv_override kvo;
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_DATASET);
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
strncpy(kvo.val_str, imatrix_dataset.c_str(), 127);
if (hide_imatrix) {
strncpy(kvo.val_str, "top_secret", 127);
} else {
strncpy(kvo.val_str, imatrix_dataset.c_str(), 127);
}
kvo.val_str[127] = '\0';
kv_overrides.emplace_back(std::move(kvo));
}
@@ -435,7 +502,11 @@ int main(int argc, char ** argv) {
llama_model_kv_override kvo;
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES);
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
kvo.val_i64 = imatrix_data.size();
if (hide_imatrix) {
kvo.val_i64 = 0;
} else {
kvo.val_i64 = imatrix_data.size();
}
kv_overrides.emplace_back(std::move(kvo));
}
@@ -443,7 +514,11 @@ int main(int argc, char ** argv) {
llama_model_kv_override kvo;
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS);
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
kvo.val_i64 = m_last_call;
if (hide_imatrix) {
kvo.val_i64 = 0;
} else {
kvo.val_i64 = m_last_call;
}
kv_overrides.emplace_back(std::move(kvo));
}
}
@@ -452,6 +527,9 @@ int main(int argc, char ** argv) {
kv_overrides.back().key[0] = 0;
params.kv_overrides = &kv_overrides;
}
if (!custom_quants.empty()) {
params.custom_quants = &custom_quants;
}
llama_backend_init();