mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-07 15:00:11 +00:00
bitnet: add 2 bpw quantization
The scalar dot product already chieves 37 t/s for TG!
This commit is contained in:
@@ -341,6 +341,10 @@ int main(int argc, char ** argv) {
|
||||
if (!layer_included(params, kv_tensor.first)) {
|
||||
continue;
|
||||
}
|
||||
if (kv_tensor.second->ne[0] == 1 || kv_tensor.second->ne[1] == 1) {
|
||||
// we never quantize those
|
||||
continue;
|
||||
}
|
||||
if (params.verbose) {
|
||||
printf("%s: type %s, size %" PRId64 "\n", kv_tensor.first.c_str(), ggml_type_name(kv_tensor.second->type), ggml_nelements(kv_tensor.second));
|
||||
}
|
||||
@@ -386,6 +390,10 @@ int main(int argc, char ** argv) {
|
||||
if (!layer_included(params, kv_tensor.first)) {
|
||||
continue;
|
||||
}
|
||||
if (kv_tensor.second->ne[0] == 1 || kv_tensor.second->ne[1] == 1) {
|
||||
// we never quantize those
|
||||
continue;
|
||||
}
|
||||
if (params.verbose) {
|
||||
printf(" %s ...\n", kv_tensor.first.c_str());
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user