Not working bf16_r4

This commit is contained in:
Iwan Kawrakow
2024-12-05 06:53:41 +02:00
parent 20758edcae
commit 69689d60e5
9 changed files with 266 additions and 1 deletions

View File

@@ -77,6 +77,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
{ "Q4_0_8_8", LLAMA_FTYPE_MOSTLY_Q4_0_8_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, -0.0020 ppl @ Mistral-7B", },
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
{ "BF16_R4", LLAMA_FTYPE_MOSTLY_BF16_R4, "14.00G, -0.0050 ppl @ Mistral-7B", },
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },