Fix ggml_is_contiguously_allocated (#764)

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
Kawrakow
2025-09-05 19:05:02 +02:00
committed by GitHub
parent 426032c27a
commit f74dd77143

View File

@@ -4743,7 +4743,9 @@ GGML_CALL bool ggml_is_permuted(const struct ggml_tensor * tensor) {
}
GGML_CALL bool ggml_is_contiguously_allocated(const struct ggml_tensor * tensor) {
return ggml_nbytes(tensor) == ggml_nelements(tensor) * ggml_type_size(tensor->type)/ggml_blck_size(tensor->type);
return ggml_nbytes(tensor) == ggml_nrows(tensor)*ggml_row_size(tensor->type, tensor->ne[0]);
// The ongoing mainline obsession with blocks of quants does not work with per tensor row scales
//return ggml_nbytes(tensor) == ggml_nelements(tensor) * ggml_type_size(tensor->type)/ggml_blck_size(tensor->type);
}
GGML_CALL bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor) {