Allow quantization of ffn_gate_inp (#896)

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-11-05 10:44:32 +02:00
committed by GitHub
parent 7978f04996
commit e68f50be9a
4 changed files with 19 additions and 2 deletions

View File

@@ -455,6 +455,7 @@ extern "C" {
enum ggml_type ffn_gate_type; // feedforward network gate type
enum ggml_type ffn_down_type; // feedforward network down type
enum ggml_type ffn_up_type; // feedforward network up type
enum ggml_type ffn_gate_inp_type; // routed experts probabilities typy (relevant for MoE models only)
bool allow_requantize; // allow quantizing non-f32/f16 tensors
bool quantize_output_tensor; // quantize output.weight
bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored