mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-02-09 16:00:12 +00:00
Use Q6_0 instead of Q5_1 for tensors incompatible with IQ5_K/Q5_K (#116)
This commit is contained in:
committed by
Iwan Kawrakow
parent
4d2fbde0cb
commit
8ad84b9fab
@@ -16048,7 +16048,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
||||
case GGML_TYPE_IQ4_K:
|
||||
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
|
||||
case GGML_TYPE_IQ5_K:
|
||||
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
|
||||
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q6_0; break;
|
||||
case GGML_TYPE_IQ6_K:
|
||||
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
|
||||
default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
|
||||
|
||||
Reference in New Issue
Block a user