mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-04-28 18:32:04 +00:00
IQ1_M_R4: better 1.75 bpw quants (#187)
* iq1_m_r4: basics (quantize/dequantize) * iq1_m_r4: Zen4 gemm * iq1_m_r4: neon gemm * iq1_m_r4: switch to q8_0_x4 also on AVX2/Zen4 With the deltas being per group of 8, we cannot make use of the q8 sums stored in q8_1, so we get a tiny gain by using q8_0_x4. * iq1_m_r4: rename mul_mat_iq1_m_r4_q8_1 to mul_mat_iq1_m_r4_q8_0 --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -432,6 +432,7 @@ extern "C" {
|
||||
GGML_TYPE_IQ3_S_R4 = 221,
|
||||
GGML_TYPE_IQ2_S_R4 = 222,
|
||||
GGML_TYPE_IQ4_XS_R4 = 223,
|
||||
GGML_TYPE_IQ1_M_R4 = 229,
|
||||
GGML_TYPE_BF16_R16 = 230,
|
||||
GGML_TYPE_Q6_0_R4 = 233,
|
||||
GGML_TYPE_IQ2_BN_R4 = 335,
|
||||
@@ -516,6 +517,7 @@ extern "C" {
|
||||
GGML_FTYPE_MOSTLY_IQ3_S_R4 = 220, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_IQ2_S_R4 = 221, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_IQ4_XS_R4 = 222, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_IQ1_M_R4 = 223, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_BF16_R16 = 224, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q6_0_R4 = 227, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_IQ2_BN_R4 = 329, // except 1d tensors
|
||||
|
||||
Reference in New Issue
Block a user