Ling-1T convert fixup (#837)

* Conditionally write moe_shared_expert_intermediate_size

Ling-1T config.json does *not* have `moe_shared_expert_intermediate_size`.
Ling-flash-2.0a *does* have it.

This small patch just makes the gguf_writer conditionally detect as
needed.

* Fix Ling-1T missing moe_shared_expert_intermediate_size

Thanks CISC for the proper patch to include the needed values!
This commit is contained in:
ubergarm
2025-10-17 00:52:31 -04:00
committed by GitHub
parent cde642e591
commit 32540ac619

View File

@@ -4494,7 +4494,7 @@ class BailingMoeV2Model(Model):
self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
self.gguf_writer.add_vocab_size(hparams["vocab_size"])
self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
self.gguf_writer.add_expert_shared_feed_forward_length(hparams["moe_shared_expert_intermediate_size"])
self.gguf_writer.add_expert_shared_feed_forward_length(hparams.get("moe_shared_expert_intermediate_size", hparams["moe_intermediate_size"] * hparams["num_shared_experts"]))
self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
self.gguf_writer.add_expert_count(hparams["num_experts"])
self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])