iqk_mul_mat: be independent of llamafile_sgemm (WIP)

* Remove iqk_mul_mat from llamafile_sgemm
* Pass tensor types and strides to iqk_mul_mat

It is marked WIP because only tested on __aarch64__
This commit is contained in:
Iwan Kawrakow
2024-06-11 09:12:22 +02:00
parent 3593891f39
commit ad53eabf87
5 changed files with 77 additions and 101 deletions

View File

@@ -51,9 +51,6 @@
#include "sgemm.h"
#include "ggml-impl.h"
#include "ggml-quants.h"
#if GGML_USE_IQK_MULMAT
#include "iqk_mul_mat.h"
#endif
#ifdef _MSC_VER
#define NOINLINE __declspec(noinline)
@@ -868,26 +865,6 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
if (Ctype != GGML_TYPE_F32)
return false;
#if GGML_USE_IQK_MULMAT
#if defined __AVX2__ && defined __FMA__
bool is_accepted_float_type = k >= 32 &&
((Atype == GGML_TYPE_F16 && Btype == GGML_TYPE_F32) || (Atype == GGML_TYPE_F32 && Btype == GGML_TYPE_F16));
#elif defined __ARM_FEATURE_FP16_VECTOR_ARITHMETIC && defined __ARM_FEATURE_FMA
bool is_accepted_float_type = k >= 32 && Atype == GGML_TYPE_F16 && Btype == GGML_TYPE_F16;
#else
bool is_accepted_float_type = false;
#endif
if (task == GGML_TASK_TYPE_INIT && is_accepted_float_type) {
return true;
}
if (task == GGML_TASK_TYPE_COMPUTE && is_accepted_float_type) {
if (iqk_mul_mat(m, n, k, Atype, A, B, (float *)C, ldc, ith, nth)) {
return true;
}
}
#endif
switch (Atype) {
case GGML_TYPE_F32: {