mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-04-25 17:09:22 +00:00
Adding q5_0_r4
We get PP-512(LLaMA-3.1-8B) = 256.7 t/s on a Ryzen-7950X. We even get TG-128 improvement to 11.7 t/s from 11.1 t/s.
This commit is contained in:
@@ -408,8 +408,10 @@ extern "C" {
|
||||
GGML_TYPE_IQ4_KSS = 146,
|
||||
|
||||
GGML_TYPE_Q4_0_R4 = 202,
|
||||
GGML_TYPE_Q5_0_R4 = 206,
|
||||
GGML_TYPE_Q8_0_R4 = 208,
|
||||
GGML_TYPE_IQ4_NL_X4 = 220,
|
||||
GGML_TYPE_Q6_0_R4 = 233,
|
||||
GGML_TYPE_COUNT,
|
||||
};
|
||||
|
||||
@@ -471,7 +473,9 @@ extern "C" {
|
||||
//
|
||||
GGML_FTYPE_MOSTLY_Q4_0_R4 = 202, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q8_0_R4 = 207, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q5_0_R4 = 208, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_IQ4_NL_X4 = 219, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q6_0_R4 = 227, // except 1d tensors
|
||||
};
|
||||
|
||||
// available tensor operations:
|
||||
|
||||
Reference in New Issue
Block a user