From f74dd771431f939ef97dc628c717af6afb6f916a Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Fri, 5 Sep 2025 19:05:02 +0200 Subject: [PATCH] Fix ggml_is_contiguously_allocated (#764) Co-authored-by: Iwan Kawrakow --- ggml/src/ggml.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 94c1cc78..f41bfb2b 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -4743,7 +4743,9 @@ GGML_CALL bool ggml_is_permuted(const struct ggml_tensor * tensor) { } GGML_CALL bool ggml_is_contiguously_allocated(const struct ggml_tensor * tensor) { - return ggml_nbytes(tensor) == ggml_nelements(tensor) * ggml_type_size(tensor->type)/ggml_blck_size(tensor->type); + return ggml_nbytes(tensor) == ggml_nrows(tensor)*ggml_row_size(tensor->type, tensor->ne[0]); + // The ongoing mainline obsession with blocks of quants does not work with per tensor row scales + //return ggml_nbytes(tensor) == ggml_nelements(tensor) * ggml_type_size(tensor->type)/ggml_blck_size(tensor->type); } GGML_CALL bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor) {