mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
Fix imatrix calculation for MLA models (#411)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
@@ -60,7 +60,7 @@ private:
|
||||
int m_last_call = 0;
|
||||
int m_last_layer = 9999;
|
||||
int m_last_ffn = -1;
|
||||
std::vector<float> m_src1_data;
|
||||
std::vector<char> m_src1_data;
|
||||
std::vector<char> m_ids; // the expert ids from ggml_mul_mat_id
|
||||
std::vector<float> m_last_input;
|
||||
std::vector<float> m_ffn_input;
|
||||
@@ -189,11 +189,12 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
||||
const bool is_host = ggml_backend_buffer_is_host(src1->buffer);
|
||||
|
||||
if (!is_host) {
|
||||
m_src1_data.resize(ggml_nelements(src1));
|
||||
ggml_backend_tensor_get(src1, m_src1_data.data(), 0, ggml_nbytes(src1));
|
||||
auto nbytes = ggml_nbytes(src1);
|
||||
m_src1_data.resize(nbytes);
|
||||
ggml_backend_tensor_get(src1, m_src1_data.data(), 0, nbytes);
|
||||
}
|
||||
|
||||
const float * data = is_host ? (const float *) src1->data : m_src1_data.data();
|
||||
const float * data = is_host ? (const float *) src1->data : (const float *)m_src1_data.data();
|
||||
|
||||
if (m_collect_lsim) {
|
||||
if (wname.find(".ffn_") != std::string::npos) {
|
||||
@@ -331,10 +332,17 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
||||
}
|
||||
auto & e = m_stats[wname];
|
||||
if (e.values.empty()) {
|
||||
e.values.resize(src1->ne[0], 0);
|
||||
e.counts.resize(src1->ne[0], 0);
|
||||
if (src0->ne[3] > 1) {
|
||||
fprintf(stderr, "Unsupported 4D tensor %s\n", wname.c_str());
|
||||
exit(1);
|
||||
}
|
||||
// If we have a 3D tensor as it is the case for the attn_k_b and attn_v_b for DeepSeek MLA models,
|
||||
// than we need to compute the imatrix for each head, and not just one imatrx for all heads.
|
||||
// Hence, the storage we need is src0->ne[0]*src0->ne[2].
|
||||
e.values.resize(src0->ne[0]*src0->ne[2], 0);
|
||||
e.counts.resize(src0->ne[0]*src0->ne[2], 0);
|
||||
}
|
||||
else if (e.values.size() != (size_t)src1->ne[0]) {
|
||||
else if (e.values.size() != (size_t)(src0->ne[0]*src0->ne[2])) {
|
||||
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
|
||||
exit(1); //GGML_ABORT("fatal error");
|
||||
}
|
||||
@@ -342,14 +350,20 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
||||
if (m_params.verbosity > 1) {
|
||||
printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, wname.c_str(), ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type);
|
||||
}
|
||||
for (int row = 0; row < (int)(src1->ne[1]*src1->ne[2]); ++row) {
|
||||
const float * x = data + row * src1->ne[0];
|
||||
for (int j = 0; j < (int)src1->ne[0]; ++j) {
|
||||
e.values[j] += x[j]*x[j];
|
||||
e.counts[j]++;
|
||||
if (!std::isfinite(e.values[j])) {
|
||||
fprintf(stderr, "%f detected in %s\n", e.values[j], wname.c_str());
|
||||
exit(1);
|
||||
int rk2 = src1->ne[2]/src0->ne[2];
|
||||
for (int i12 = 0; i12 < (int)src1->ne[2]; ++i12) { // i.e., loop over attention heads for MLA models
|
||||
int i02 = i12/rk2;
|
||||
auto values = e.values.data() + i02*src0->ne[0];
|
||||
auto counts = e.counts.data() + i02*src0->ne[0];
|
||||
for (int i11 = 0; i11 < (int)src1->ne[1]; ++i11) {
|
||||
const float * x = (const float *)((const char *)data + i11*src1->nb[1] + i12*src1->nb[2]);
|
||||
for (int j = 0; j < (int)src1->ne[0]; ++j) {
|
||||
values[j] += x[j]*x[j];
|
||||
counts[j]++;
|
||||
if (!std::isfinite(values[j])) {
|
||||
fprintf(stderr, "%f detected in %s\n", e.values[j], wname.c_str());
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user