Integrated 32x6 DGEMM kernel for zen4 and its related changes are added.

Details:
- Now AOCL BLIS uses AX512 - 32x6 DGEMM kernel for native code path.
  Thanks to Moore, Branden <Branden.Moore@amd.com> for suggesting and
  implementing these optimizations.
- In the initial version of 32x6 DGEMM kernel, to broadcast elements of B packed
  we perform load into xmm (2 elements), broadcast into zmm from xmmm and then to get the
  next element, we do vpermilpd(xmm). This logic is replaced with direct broadcast from
  memory, since the elements of Bpack are stored contiguously, the first broadcast fetches
  the cacheline and then subsequent broadcasts happen faster. We use two registers for broadcast
  and interleave broadcast operation with FMAs to hide any memory latencies.
- Native dTRSM uses 16x14 dgemm - therefore we need to override the default blkszs (MR,NR,..)
  when executing trsm. we call bli_zen4_override_trsm_blkszs(cntx_local) on a local cntx_t object
  for double data-type as well in the function bli_trsm_front(), bli_trsm_xx_ker_var2, xx = {ll,lu,rl,ru}.
  Renamed "BLIS_GEMM_AVX2_UKR" to "BLIS_GEMM_FOR_TRSM_UKR" and in the bli_cntx_init_zen4() we replaced
  dgemm kernel for TRSM with 16x14 dgemm kernel.
- New packm kernels - 16xk, 24xk and 32xk are added.
- New 32xk packm reference kernel is added in bli_packm_cxk_ref.c and it is
  enabled for zen4 config (bli_dpackm_32xk_zen4_ref() )
- Copyright year updated for modified files.
- cleaned up code for "zen" config - removed unused packm kernels declaration in kernels/zen/bli_kernels.h
- [SWLCSG-1374], [CPUPL-2918]

Change-Id: I576282382504b72072a6db068eabd164c8943627
This commit is contained in:
Kiran Varaganti
2023-01-10 12:04:55 +05:30
parent 0a699c45f0
commit 201db7883c
22 changed files with 1558 additions and 47 deletions

View File

@@ -1,4 +1,4 @@
##Copyright (C) 2022, Advanced Micro Devices, Inc. All rights reserved.##
##Copyright (C) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.##
cmake_minimum_required(VERSION 3.0.0)
@@ -336,10 +336,11 @@ endif()
if(${TARGET_ARCH} STREQUAL zen4 OR
${TARGET_ARCH} STREQUAL amdzen)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/zen4/1/bli_amaxv_zen_int_avx512.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/zen4/3/bli_gemmtrsm_l_zen_16x14.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/zen4/3/bli_gemmtrsm_u_zen_16x14.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/skx/3/bli_dgemm_skx_asm_16x14.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/zen4/1/bli_amaxv_zen_int_avx512.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/zen4/3/bli_dgemm_zen4_asm_32x6.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/zen4/3/bli_gemmtrsm_l_zen_16x14.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/zen4/3/bli_gemmtrsm_u_zen_16x14.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/skx/3/bli_dgemm_skx_asm_16x14.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/kernels/skx/3/bli_sgemm_skx_asm_32x12_l2.c PROPERTIES COMPILE_FLAGS /arch:AVX512)
endif()

View File

@@ -109,6 +109,10 @@ int main( int argc, char** argv )
printf("Error opening output file %s\n", argv[2]);
exit(1);
}
if (argc > 3)
{
n_repeats = atoi(argv[3]);
}
fprintf(fout, "Dt transa transb m n k alphaR alphaI lda ldb betaR betaI ldc gflops\n");

View File

@@ -4,7 +4,7 @@
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2022, Advanced Micro Devices, Inc. All rights reserved.
Copyright (C) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -41,12 +41,12 @@
#define BLI_CNTX_DEFAULT_BLKSZ_LIST(blkszs) \
/* s d c z */ \
bli_blksz_init_easy( &blkszs[ BLIS_MR ], 32, 16, 3, 3 ); \
bli_blksz_init_easy( &blkszs[ BLIS_NR ], 12, 14, 8, 4 ); \
bli_blksz_init_easy( &blkszs[ BLIS_MC ], 512, 240, 144, 18 ); \
bli_blksz_init_easy( &blkszs[ BLIS_MR ], 32, 32, 3, 3 ); \
bli_blksz_init_easy( &blkszs[ BLIS_NR ], 12, 6, 8, 4 ); \
bli_blksz_init_easy( &blkszs[ BLIS_MC ], 512, 128, 144, 18 ); \
bli_blksz_init ( &blkszs[ BLIS_KC ], 480, 512, 256, 566, \
480, 320, 256, 566 ); \
bli_blksz_init_easy( &blkszs[ BLIS_NC ], 6144, 4004, 4080, 256 ); \
bli_blksz_init_easy( &blkszs[ BLIS_NC ], 6144, 4002, 4080, 256 ); \
\
bli_blksz_init_easy( &blkszs[ BLIS_AF ], 8, 8, -1, -1 ); \
bli_blksz_init_easy( &blkszs[ BLIS_DF ], 8, 8, -1, -1 ); \
@@ -68,19 +68,20 @@ void bli_cntx_init_zen4( cntx_t* cntx )
10,
// gemm
BLIS_GEMM_UKR, BLIS_FLOAT , bli_sgemm_skx_asm_32x12_l2, FALSE,
BLIS_GEMM_UKR, BLIS_DOUBLE, bli_dgemm_skx_asm_16x14, FALSE,
BLIS_GEMM_UKR, BLIS_SCOMPLEX, bli_cgemm_haswell_asm_3x8, TRUE,
BLIS_GEMM_UKR, BLIS_DCOMPLEX, bli_zgemm_haswell_asm_3x4, TRUE,
BLIS_GEMM_UKR, BLIS_DOUBLE, bli_dgemm_zen4_asm_32x6, FALSE,
BLIS_GEMM_UKR, BLIS_SCOMPLEX, bli_cgemm_haswell_asm_3x8, TRUE,
BLIS_GEMM_UKR, BLIS_DCOMPLEX, bli_zgemm_haswell_asm_3x4, TRUE,
BLIS_GEMM_AVX2_UKR, BLIS_FLOAT, bli_sgemm_haswell_asm_6x16, TRUE,
BLIS_GEMM_AVX2_UKR, BLIS_DOUBLE, bli_dgemm_haswell_asm_6x8, TRUE,
// Different GEMM kernels are used for TRSM for zen4 architecture
BLIS_GEMM_FOR_TRSM_UKR, BLIS_FLOAT, bli_sgemm_haswell_asm_6x16, TRUE,
BLIS_GEMM_FOR_TRSM_UKR, BLIS_DOUBLE, bli_dgemm_skx_asm_16x14, FALSE,
// gemmtrsm_l
BLIS_GEMMTRSM_L_UKR, BLIS_FLOAT, bli_sgemmtrsm_l_haswell_asm_6x16, TRUE,
BLIS_GEMMTRSM_L_UKR, BLIS_DOUBLE, bli_dgemmtrsm_l_zen_asm_16x14, TRUE,
BLIS_GEMMTRSM_L_UKR, BLIS_DOUBLE, bli_dgemmtrsm_l_zen_asm_16x14, TRUE,
// gemmtrsm_u
BLIS_GEMMTRSM_U_UKR, BLIS_FLOAT, bli_sgemmtrsm_u_haswell_asm_6x16, TRUE,
BLIS_GEMMTRSM_U_UKR, BLIS_DOUBLE, bli_dgemmtrsm_u_zen_asm_16x14, TRUE,
BLIS_GEMMTRSM_U_UKR, BLIS_DOUBLE, bli_dgemmtrsm_u_zen_asm_16x14, TRUE,
cntx
);
@@ -99,11 +100,13 @@ void bli_cntx_init_zen4( cntx_t* cntx )
// packm kernels
bli_cntx_set_packm_kers
(
8,
10,
BLIS_PACKM_6XK_KER, BLIS_FLOAT, bli_spackm_haswell_asm_6xk,
BLIS_PACKM_16XK_KER, BLIS_FLOAT, bli_spackm_haswell_asm_16xk,
BLIS_PACKM_6XK_KER, BLIS_DOUBLE, bli_dpackm_haswell_asm_6xk,
BLIS_PACKM_8XK_KER, BLIS_DOUBLE, bli_dpackm_haswell_asm_8xk,
BLIS_PACKM_24XK_KER, BLIS_DOUBLE, bli_dpackm_zen4_asm_24xk,
BLIS_PACKM_32XK_KER, BLIS_DOUBLE, bli_dpackm_32xk_zen4_ref,
BLIS_PACKM_3XK_KER, BLIS_SCOMPLEX, bli_cpackm_haswell_asm_3xk,
BLIS_PACKM_8XK_KER, BLIS_SCOMPLEX, bli_cpackm_haswell_asm_8xk,
BLIS_PACKM_3XK_KER, BLIS_DCOMPLEX, bli_zpackm_haswell_asm_3xk,

View File

@@ -4,7 +4,7 @@
# An object-based framework for developing high-performance BLAS-like
# libraries.
#
# Copyright (C) 2021-2023, Advanced Micro Devices, Inc. All rights reserved.
# Copyright (C) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are

View File

@@ -5,7 +5,7 @@
libraries.
Copyright (C) 2014, The University of Texas at Austin
Copyright (C) 2018 - 2022, Advanced Micro Devices, Inc. All rights reserved.
Copyright (C) 2018 - 2023, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -174,7 +174,7 @@ void bli_trsm_front
* We need to revisit this when TRSM AVX-512 kernels are implemented.
*/
if ( (bli_arch_query_id() == BLIS_ARCH_ZEN4) &&
(bli_obj_dt(a) == BLIS_FLOAT) )
((bli_obj_dt(a) == BLIS_FLOAT) || (bli_obj_dt(a) == BLIS_DOUBLE)) )
{
bli_zen4_override_trsm_blkszs(&cntx_trsm);
}

View File

@@ -188,10 +188,9 @@ void PASTEMAC(ch,varname) \
*
* We need to revisit this when TRSM AVX-512 kernels are implemented.
*/ \
if ((bli_arch_query_id() == BLIS_ARCH_ZEN4) && \
(dt == BLIS_FLOAT)) \
if (bli_arch_query_id() == BLIS_ARCH_ZEN4 && ((dt == BLIS_FLOAT) || (dt == BLIS_DOUBLE)) ) \
{ \
gemm_ukr = bli_cntx_get_l3_vir_ukr_dt( dt, BLIS_GEMM_AVX2_UKR, cntx ); \
gemm_ukr = bli_cntx_get_l3_vir_ukr_dt( dt, BLIS_GEMM_FOR_TRSM_UKR, cntx ); \
} \
\
/* Temporary C buffer for edge cases. Note that the strides of this

View File

@@ -188,10 +188,9 @@ void PASTEMAC(ch,varname) \
*
* We need to revisit this when TRSM AVX-512 kernels are implemented.
*/ \
if ((bli_arch_query_id() == BLIS_ARCH_ZEN4) && \
(dt == BLIS_FLOAT)) \
if (bli_arch_query_id() == BLIS_ARCH_ZEN4 && ((dt == BLIS_FLOAT) || (dt == BLIS_DOUBLE)) ) \
{ \
gemm_ukr = bli_cntx_get_l3_vir_ukr_dt( dt, BLIS_GEMM_AVX2_UKR, cntx ); \
gemm_ukr = bli_cntx_get_l3_vir_ukr_dt( dt, BLIS_GEMM_FOR_TRSM_UKR, cntx ); \
} \
\
/* Temporary C buffer for edge cases. Note that the strides of this

View File

@@ -194,10 +194,9 @@ void PASTEMAC(ch,varname) \
*
* We need to revisit this when TRSM AVX-512 kernels are implemented.
*/ \
if ((bli_arch_query_id() == BLIS_ARCH_ZEN4) && \
(dt == BLIS_FLOAT)) \
if (bli_arch_query_id() == BLIS_ARCH_ZEN4 && ((dt == BLIS_FLOAT) || (dt == BLIS_DOUBLE)) ) \
{ \
gemm_ukr = bli_cntx_get_l3_vir_ukr_dt( dt, BLIS_GEMM_AVX2_UKR, cntx ); \
gemm_ukr = bli_cntx_get_l3_vir_ukr_dt( dt, BLIS_GEMM_FOR_TRSM_UKR, cntx ); \
} \
\
/* Temporary C buffer for edge cases. Note that the strides of this

View File

@@ -193,10 +193,9 @@ void PASTEMAC(ch,varname) \
*
* We need to revisit this when TRSM AVX-512 kernels are implemented.
*/ \
if ((bli_arch_query_id() == BLIS_ARCH_ZEN4) && \
(dt == BLIS_FLOAT)) \
if (bli_arch_query_id() == BLIS_ARCH_ZEN4 && ((dt == BLIS_FLOAT) || (dt == BLIS_DOUBLE)) ) \
{ \
gemm_ukr = bli_cntx_get_l3_vir_ukr_dt( dt, BLIS_GEMM_AVX2_UKR, cntx ); \
gemm_ukr = bli_cntx_get_l3_vir_ukr_dt( dt, BLIS_GEMM_FOR_TRSM_UKR, cntx ); \
} \
\
/* Temporary C buffer for edge cases. Note that the strides of this

View File

@@ -6,7 +6,7 @@
Copyright (C) 2014, The University of Texas at Austin
Copyright (C) 2016, Hewlett Packard Enterprise Development LP
Copyright (C) 2021 - 22, Advanced Micro Devices, Inc. All rights reserved.
Copyright (C) 2021 - 23, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -756,6 +756,7 @@ typedef enum
BLIS_PACKM_29XK_KER = 29,
BLIS_PACKM_30XK_KER = 30,
BLIS_PACKM_31XK_KER = 31,
BLIS_PACKM_32XK_KER = 32,
BLIS_UNPACKM_0XK_KER = 0,
BLIS_UNPACKM_1XK_KER = 1,
@@ -792,7 +793,7 @@ typedef enum
} l1mkr_t;
#define BLIS_NUM_PACKM_KERS 32
#define BLIS_NUM_PACKM_KERS 33
#define BLIS_NUM_UNPACKM_KERS 32
@@ -803,7 +804,7 @@ typedef enum
BLIS_GEMMTRSM_U_UKR,
BLIS_TRSM_L_UKR,
BLIS_TRSM_U_UKR,
BLIS_GEMM_AVX2_UKR
BLIS_GEMM_FOR_TRSM_UKR
} l3ukr_t;
#define BLIS_NUM_LEVEL3_UKRS 6

View File

@@ -1,4 +1,4 @@
##Copyright (C) 2020, Advanced Micro Devices, Inc. All rights reserved.##
##Copyright (C) 2020-2023, Advanced Micro Devices, Inc. All rights reserved.##
target_sources("${PROJECT_NAME}"
PRIVATE

View File

@@ -5,7 +5,7 @@
libraries.
Copyright (C) 2014, The University of Texas at Austin
Copyright (C) 2020 - 2022, Advanced Micro Devices, Inc. All rights reserved.
Copyright (C) 2020 - 2023, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -33,10 +33,7 @@
*/
// -- level-1m --
PACKM_KER_PROT(double, d, packm_8xk_gen_zen)
PACKM_KER_PROT(double, d, packm_6xk_gen_zen)
PACKM_KER_PROT(double, d, packm_8xk_nn_zen)
PACKM_KER_PROT(double, d, packm_6xk_nn_zen)
// Removed - reference packm kernels are used
// -- level-1v --
@@ -430,4 +427,4 @@ void bli_zdscalv_zen_int10
double* restrict alpha,
dcomplex* restrict x, inc_t incx,
cntx_t* restrict cntx
);
);

View File

@@ -0,0 +1,8 @@
##Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved.##
target_sources("${PROJECT_NAME}"
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/bli_packm_zen4_asm_d16xk.c
${CMAKE_CURRENT_SOURCE_DIR}/bli_packm_zen4_asm_d24xk.c
${CMAKE_CURRENT_SOURCE_DIR}/bli_packm_zen4_asm_d32xk.c
)

View File

@@ -0,0 +1,252 @@
/*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2022-23, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <x86intrin.h>
#include "blis.h"
#define BLIS_ASM_SYNTAX_ATT
#include "bli_x86_asm_macros.h"
void bli_dpackm_zen4_asm_16xk
(
conj_t conja,
pack_t schema,
dim_t cdim0,
dim_t k0,
dim_t k0_max,
double* restrict kappa,
double* restrict a, inc_t inca0, inc_t lda0,
double* restrict p, inc_t ldp0,
cntx_t* restrict cntx
)
{
#if 0
bli_dpackm_16xk_zen4_ref
(
conja, schema, cdim0, k0, k0_max,
kappa, a, inca0, lda0, p, ldp0, cntx
);
return;
#endif
// This is the panel dimension assumed by the packm kernel.
const dim_t mnr = 16;
// This is the "packing" dimension assumed by the packm kernel.
// This should be equal to ldp.
//const dim_t packmnr = 16;
// NOTE: For the purposes of the comments in this packm kernel, we
// interpret inca and lda as rs_a and cs_a, respectively, and similarly
// interpret ldp as cs_p (with rs_p implicitly unit). Thus, when reading
// this packm kernel, you should think of the operation as packing an
// m x n micropanel, where m and n are tiny and large, respectively, and
// where elements of each column of the packed matrix P are contiguous.
// (This packm kernel can still be used to pack micropanels of matrix B
// in a gemm operation.)
const uint64_t inca = inca0;
const uint64_t lda = lda0;
const uint64_t ldp = ldp0;
// NOTE: If/when this kernel ever supports scaling by kappa within the
// assembly region, this constraint should be lifted.
const bool unitk = bli_deq1( *kappa );
// -------------------------------------------------------------------------
if ( cdim0 == mnr )
{
if ( unitk )
{
if ( bli_is_conj( conja ) )
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 16 ; i++ ) {
bli_dcopyjs( *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 16 ; i++ ) {
bli_dcopyjs( *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
else
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
_mm_prefetch( a + (8*lda), _MM_HINT_T0 );
for ( dim_t i = 0 ; i < 16 ; i++ ) {
bli_dcopys( *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 16 ; i++ ) {
bli_dcopys( *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
}
else
{
if ( bli_is_conj( conja ) )
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 16 ; i++ ) {
bli_dscal2js( *kappa, *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 16 ; i++ ) {
bli_dscal2js( *kappa, *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
else
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 16 ; i++ ) {
bli_dscal2s( *kappa, *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 16 ; i++ ) {
bli_dscal2s( *kappa, *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
}
}
else // if ( cdim0 < mnr )
{
PASTEMAC(dscal2m,BLIS_TAPI_EX_SUF)
(
0,
BLIS_NONUNIT_DIAG,
BLIS_DENSE,
( trans_t )conja,
cdim0,
k0,
kappa,
a, inca0, lda0,
p, 1, ldp0,
cntx,
NULL
);
if ( cdim0 < mnr )
{
// Handle zero-filling along the "long" edge of the micropanel.
const dim_t i = cdim0;
const dim_t m_edge = mnr - cdim0;
const dim_t n_edge = k0_max;
double* restrict p_edge = p + (i )*1;
bli_dset0s_mxn
(
m_edge,
n_edge,
p_edge, 1, ldp
);
}
}
if ( k0 < k0_max )
{
// Handle zero-filling along the "short" (far) edge of the micropanel.
const dim_t j = k0;
const dim_t m_edge = mnr;
const dim_t n_edge = k0_max - k0;
double* restrict p_edge = p + (j )*ldp;
bli_dset0s_mxn
(
m_edge,
n_edge,
p_edge, 1, ldp
);
}
}

View File

@@ -0,0 +1,254 @@
/*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2022-23, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <x86intrin.h>
#include "blis.h"
#define BLIS_ASM_SYNTAX_ATT
#include "bli_x86_asm_macros.h"
// Prototype reference packm kernels.
//PACKM_KER_PROT( double, d, packm_8xk_zen4_ref )
void bli_dpackm_zen4_asm_24xk
(
conj_t conja,
pack_t schema,
dim_t cdim0,
dim_t k0,
dim_t k0_max,
double* restrict kappa,
double* restrict a, inc_t inca0, inc_t lda0,
double* restrict p, inc_t ldp0,
cntx_t* restrict cntx
)
{
#if 0
bli_dpackm_24xk_zen4_ref
(
conja, schema, cdim0, k0, k0_max,
kappa, a, inca0, lda0, p, ldp0, cntx
);
return;
#endif
// This is the panel dimension assumed by the packm kernel.
const dim_t mnr = 24;
// This is the "packing" dimension assumed by the packm kernel.
// This should be equal to ldp.
//const dim_t packmnr = 8;
// NOTE: For the purposes of the comments in this packm kernel, we
// interpret inca and lda as rs_a and cs_a, respectively, and similarly
// interpret ldp as cs_p (with rs_p implicitly unit). Thus, when reading
// this packm kernel, you should think of the operation as packing an
// m x n micropanel, where m and n are tiny and large, respectively, and
// where elements of each column of the packed matrix P are contiguous.
// (This packm kernel can still be used to pack micropanels of matrix B
// in a gemm operation.)
const uint64_t inca = inca0;
const uint64_t lda = lda0;
const uint64_t ldp = ldp0;
// NOTE: If/when this kernel ever supports scaling by kappa within the
// assembly region, this constraint should be lifted.
const bool unitk = bli_deq1( *kappa );
// -------------------------------------------------------------------------
if ( cdim0 == mnr )
{
if ( unitk )
{
if ( bli_is_conj( conja ) )
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 24 ; i++ ) {
bli_dcopyjs( *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 24 ; i++ ) {
bli_dcopyjs( *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
else
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
_mm_prefetch( a + (6*lda), _MM_HINT_T0 );
for ( dim_t i = 0 ; i < 24 ; i++ ) {
bli_dcopys( *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 24 ; i++ ) {
bli_dcopys( *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
}
else
{
if ( bli_is_conj( conja ) )
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 24 ; i++ ) {
bli_dscal2js( *kappa, *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 24 ; i++ ) {
bli_dscal2js( *kappa, *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
else
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 24 ; i++ ) {
bli_dscal2s( *kappa, *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 24 ; i++ ) {
bli_dscal2s( *kappa, *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
}
}
else // if ( cdim0 < mnr )
{
PASTEMAC(dscal2m,BLIS_TAPI_EX_SUF)
(
0,
BLIS_NONUNIT_DIAG,
BLIS_DENSE,
( trans_t )conja,
cdim0,
k0,
kappa,
a, inca0, lda0,
p, 1, ldp0,
cntx,
NULL
);
if ( cdim0 < mnr )
{
// Handle zero-filling along the "long" edge of the micropanel.
const dim_t i = cdim0;
const dim_t m_edge = mnr - cdim0;
const dim_t n_edge = k0_max;
double* restrict p_edge = p + (i )*1;
bli_dset0s_mxn
(
m_edge,
n_edge,
p_edge, 1, ldp
);
}
}
if ( k0 < k0_max )
{
// Handle zero-filling along the "short" (far) edge of the micropanel.
const dim_t j = k0;
const dim_t m_edge = mnr;
const dim_t n_edge = k0_max - k0;
double* restrict p_edge = p + (j )*ldp;
bli_dset0s_mxn
(
m_edge,
n_edge,
p_edge, 1, ldp
);
}
}

View File

@@ -0,0 +1,254 @@
/*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2022-23, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <x86intrin.h>
#include "blis.h"
#define BLIS_ASM_SYNTAX_ATT
#include "bli_x86_asm_macros.h"
// Prototype reference packm kernels.
//PACKM_KER_PROT( double, d, packm_8xk_zen4_ref )
void bli_dpackm_zen4_asm_32xk
(
conj_t conja,
pack_t schema,
dim_t cdim0,
dim_t k0,
dim_t k0_max,
double* restrict kappa,
double* restrict a, inc_t inca0, inc_t lda0,
double* restrict p, inc_t ldp0,
cntx_t* restrict cntx
)
{
#if 0
bli_dpackm_32xk_zen4_ref
(
conja, schema, cdim0, k0, k0_max,
kappa, a, inca0, lda0, p, ldp0, cntx
);
return;
#endif
// This is the panel dimension assumed by the packm kernel.
const dim_t mnr = 32;
// This is the "packing" dimension assumed by the packm kernel.
// This should be equal to ldp.
//const dim_t packmnr = 8;
// NOTE: For the purposes of the comments in this packm kernel, we
// interpret inca and lda as rs_a and cs_a, respectively, and similarly
// interpret ldp as cs_p (with rs_p implicitly unit). Thus, when reading
// this packm kernel, you should think of the operation as packing an
// m x n micropanel, where m and n are tiny and large, respectively, and
// where elements of each column of the packed matrix P are contiguous.
// (This packm kernel can still be used to pack micropanels of matrix B
// in a gemm operation.)
const uint64_t inca = inca0;
const uint64_t lda = lda0;
const uint64_t ldp = ldp0;
// NOTE: If/when this kernel ever supports scaling by kappa within the
// assembly region, this constraint should be lifted.
const bool unitk = bli_deq1( *kappa );
// -------------------------------------------------------------------------
if ( cdim0 == mnr )
{
if ( unitk )
{
if ( bli_is_conj( conja ) )
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 32 ; i++ ) {
bli_dcopyjs( *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 32 ; i++ ) {
bli_dcopyjs( *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
else
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
_mm_prefetch( a + (8*lda), _MM_HINT_T0 );
for ( dim_t i = 0 ; i < 32 ; i++ ) {
bli_dcopys( *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 32 ; i++ ) {
bli_dcopys( *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
}
else
{
if ( bli_is_conj( conja ) )
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 32 ; i++ ) {
bli_dscal2js( *kappa, *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 32 ; i++ ) {
bli_dscal2js( *kappa, *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
else
{
if ( inca == 1 )
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 32 ; i++ ) {
bli_dscal2s( *kappa, *(a + i), *(p + i) );
}
a += lda;
p += ldp;
}
}
else
{
for ( dim_t k = k0; k != 0; --k )
{
for ( dim_t i = 0 ; i < 32 ; i++ ) {
bli_dscal2s( *kappa, *(a + i*inca), *(p + i) );
}
a += lda;
p += ldp;
}
}
}
}
}
else // if ( cdim0 < mnr )
{
PASTEMAC(dscal2m,BLIS_TAPI_EX_SUF)
(
0,
BLIS_NONUNIT_DIAG,
BLIS_DENSE,
( trans_t )conja,
cdim0,
k0,
kappa,
a, inca0, lda0,
p, 1, ldp0,
cntx,
NULL
);
if ( cdim0 < mnr )
{
// Handle zero-filling along the "long" edge of the micropanel.
const dim_t i = cdim0;
const dim_t m_edge = mnr - cdim0;
const dim_t n_edge = k0_max;
double* restrict p_edge = p + (i )*1;
bli_dset0s_mxn
(
m_edge,
n_edge,
p_edge, 1, ldp
);
}
}
if ( k0 < k0_max )
{
// Handle zero-filling along the "short" (far) edge of the micropanel.
const dim_t j = k0;
const dim_t m_edge = mnr;
const dim_t n_edge = k0_max - k0;
double* restrict p_edge = p + (j )*ldp;
bli_dset0s_mxn
(
m_edge,
n_edge,
p_edge, 1, ldp
);
}
}

View File

@@ -1,7 +1,8 @@
##Copyright (C) 2022, Advanced Micro Devices, Inc. All rights reserved.##
##Copyright (C) 2022-23, Advanced Micro Devices, Inc. All rights reserved.##
target_sources("${PROJECT_NAME}"
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/bli_gemmtrsm_l_zen_16x14.c
${CMAKE_CURRENT_SOURCE_DIR}/bli_gemmtrsm_u_zen_16x14.c
${CMAKE_CURRENT_SOURCE_DIR}/bli_dgemm_zen4_asm_32x6.c
)

View File

@@ -0,0 +1,483 @@
/*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY
OF TEXAS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
#include "bli_x86_asm_macros.h"
#define TAIL_NITER 5 // in units of 4x unrolled k iterations
// e.g. 5 -> 4*5 k iterations ~= 280 cycles
/*
* A Registers: ZMM0, ZMM1, ZMM2, ZMM3
* B Registers: ZMM4, xMM5, xMM6, xMM7
* C Registers: ZMM[8-31]
*/
#define LOOP_ALIGN ALIGN32
#define UPDATE_C(R1,R2,R3,R4) \
\
VMULPD(ZMM(R1), ZMM(R1), ZMM(0)) \
VMULPD(ZMM(R2), ZMM(R2), ZMM(0)) \
VMULPD(ZMM(R3), ZMM(R3), ZMM(0)) \
VMULPD(ZMM(R4), ZMM(R4), ZMM(0)) \
VFMADD231PD(ZMM(R1), ZMM(1), MEM(RCX)) \
VFMADD231PD(ZMM(R2), ZMM(1), MEM(RCX,64)) \
VFMADD231PD(ZMM(R3), ZMM(1), MEM(RCX,128)) \
VFMADD231PD(ZMM(R4), ZMM(1), MEM(RCX,192)) \
VMOVUPD(MEM(RCX), ZMM(R1)) \
VMOVUPD(MEM(RCX,64), ZMM(R2)) \
VMOVUPD(MEM(RCX,128), ZMM(R3)) \
VMOVUPD(MEM(RCX,192), ZMM(R4)) \
LEA(RCX, MEM(RCX,RBX,1))
#define UPDATE_C_BZ(R1,R2,R3,R4) \
\
VMULPD(ZMM(R1), ZMM(R1), ZMM(0)) \
VMULPD(ZMM(R2), ZMM(R2), ZMM(0)) \
VMULPD(ZMM(R3), ZMM(R3), ZMM(0)) \
VMULPD(ZMM(R4), ZMM(R4), ZMM(0)) \
VMOVUPD(MEM(RCX), ZMM(R1)) \
VMOVUPD(MEM(RCX,64), ZMM(R2)) \
VMOVUPD(MEM(RCX,128), ZMM(R3)) \
VMOVUPD(MEM(RCX,192), ZMM(R4)) \
LEA(RCX, MEM(RCX,RBX,1))
#define UPDATE_C_COL_SCATTERED(R1,R2,R3,R4) \
\
KXNORW(K(1), K(0), K(0)) \
KXNORW(K(2), K(0), K(0)) \
KXNORW(K(3), K(0), K(0)) \
KXNORW(K(4), K(0), K(0)) \
VGATHERQPD(ZMM(0) MASK_K(1), MEM(RCX,ZMM(4),1)) \
VFMADD231PD(ZMM(R1), ZMM(0), ZMM(1)) \
VGATHERQPD(ZMM(0) MASK_K(2), MEM(RCX,ZMM(5),1)) \
VFMADD231PD(ZMM(R2), ZMM(0), ZMM(1)) \
VSCATTERQPD(MEM(RCX,ZMM(4),1) MASK_K(3), ZMM(R1)) \
VSCATTERQPD(MEM(RCX,ZMM(5),1) MASK_K(4), ZMM(R2)) \
KXNORW(K(1), K(0), K(0)) \
KXNORW(K(2), K(0), K(0)) \
KXNORW(K(3), K(0), K(0)) \
KXNORW(K(4), K(0), K(0)) \
VGATHERQPD(ZMM(0) MASK_K(1), MEM(RCX,ZMM(6),1)) \
VFMADD231PD(ZMM(R3), ZMM(0), ZMM(1)) \
VGATHERQPD(ZMM(0) MASK_K(2), MEM(RCX,ZMM(7),1)) \
VFMADD231PD(ZMM(R4), ZMM(0), ZMM(1)) \
VSCATTERQPD(MEM(RCX,ZMM(6),1) MASK_K(3), ZMM(R3)) \
VSCATTERQPD(MEM(RCX,ZMM(7),1) MASK_K(4), ZMM(R4)) \
LEA(RCX, MEM(RCX,RBX,1))
#define UPDATE_C_BZ_COL_SCATTERED(R1,R2,R3,R4) \
\
KXNORW(K(1), K(0), K(0)) \
KXNORW(K(2), K(0), K(0)) \
KXNORW(K(3), K(0), K(0)) \
KXNORW(K(4), K(0), K(0)) \
VSCATTERQPD(MEM(RCX,ZMM(4),1) MASK_K(1), ZMM(R1)) \
VSCATTERQPD(MEM(RCX,ZMM(5),1) MASK_K(2), ZMM(R2)) \
VSCATTERQPD(MEM(RCX,ZMM(6),1) MASK_K(3), ZMM(R3)) \
VSCATTERQPD(MEM(RCX,ZMM(7),1) MASK_K(4), ZMM(R4)) \
LEA(RCX, MEM(RCX,RBX,1))
#if 0
#define SUBITER(n) \
\
VMOVUPD(XMM( 5), MEM(RBX,(6*n+ 0)*8)) \
\
VBROADCASTSD(ZMM( 4), XMM( 5)) \
VPERMILPD(XMM( 5), XMM( 5), IMM(3)) \
VFMADD231PD(ZMM( 8), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM( 9), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(10), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(11), ZMM(3), ZMM(4)) \
\
VBROADCASTSD(ZMM( 4), XMM( 5)) \
VMOVUPD(XMM( 6), MEM(RBX,(6*n+ 2)*8)) \
VFMADD231PD(ZMM(12), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM(13), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(14), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(15), ZMM(3), ZMM(4)) \
\
VBROADCASTSD(ZMM( 4), XMM( 6)) \
VPERMILPD(XMM( 6), XMM( 6), IMM(3)) \
VFMADD231PD(ZMM(16), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM(17), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(18), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(19), ZMM(3), ZMM(4)) \
\
VBROADCASTSD(ZMM( 4), XMM( 6)) \
VMOVUPD(XMM( 7), MEM(RBX,(6*n+ 4)*8)) \
VFMADD231PD(ZMM(20), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM(21), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(22), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(23), ZMM(3), ZMM(4)) \
\
VBROADCASTSD(ZMM( 4), XMM( 7)) \
VPERMILPD(XMM( 7), XMM( 7), IMM(3)) \
VFMADD231PD(ZMM(24), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM(25), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(26), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(27), ZMM(3), ZMM(4)) \
\
VBROADCASTSD(ZMM( 4), XMM( 7)) \
VFMADD231PD(ZMM(28), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM(29), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(30), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(31), ZMM(3), ZMM(4)) \
\
VMOVAPD(ZMM(0), MEM(RAX,(32*n+ 0)*8)) \
VMOVAPD(ZMM(1), MEM(RAX,(32*n+ 8)*8)) \
VMOVAPD(ZMM(2), MEM(RAX,(32*n+16)*8)) \
VMOVAPD(ZMM(3), MEM(RAX,(32*n+24)*8))
#else
#define SUBITER(n) \
\
VBROADCASTSD(ZMM( 4), MEM(RBX,(6*n+ 0)*8)) \
VBROADCASTSD(ZMM( 5), MEM(RBX,(6*n+ 1)*8)) \
VFMADD231PD(ZMM( 8), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM( 9), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(10), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(11), ZMM(3), ZMM(4)) \
\
VBROADCASTSD(ZMM( 4), MEM(RBX,(6*n+ 2)*8)) \
VFMADD231PD(ZMM(12), ZMM(0), ZMM(5)) \
VFMADD231PD(ZMM(13), ZMM(1), ZMM(5)) \
VFMADD231PD(ZMM(14), ZMM(2), ZMM(5)) \
VFMADD231PD(ZMM(15), ZMM(3), ZMM(5)) \
\
VBROADCASTSD(ZMM( 5), MEM(RBX,(6*n+ 3)*8)) \
VFMADD231PD(ZMM(16), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM(17), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(18), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(19), ZMM(3), ZMM(4)) \
\
VBROADCASTSD(ZMM( 4), MEM(RBX,(6*n+ 4)*8)) \
VFMADD231PD(ZMM(20), ZMM(0), ZMM(5)) \
VFMADD231PD(ZMM(21), ZMM(1), ZMM(5)) \
VFMADD231PD(ZMM(22), ZMM(2), ZMM(5)) \
VFMADD231PD(ZMM(23), ZMM(3), ZMM(5)) \
\
VBROADCASTSD(ZMM( 5), MEM(RBX,(6*n+ 5)*8)) \
VFMADD231PD(ZMM(24), ZMM(0), ZMM(4)) \
VFMADD231PD(ZMM(25), ZMM(1), ZMM(4)) \
VFMADD231PD(ZMM(26), ZMM(2), ZMM(4)) \
VFMADD231PD(ZMM(27), ZMM(3), ZMM(4)) \
\
VFMADD231PD(ZMM(28), ZMM(0), ZMM(5)) \
VFMADD231PD(ZMM(29), ZMM(1), ZMM(5)) \
VFMADD231PD(ZMM(30), ZMM(2), ZMM(5)) \
VFMADD231PD(ZMM(31), ZMM(3), ZMM(5)) \
\
VMOVAPD(ZMM(0), MEM(RAX,(32*n+ 0)*8)) \
VMOVAPD(ZMM(1), MEM(RAX,(32*n+ 8)*8)) \
VMOVAPD(ZMM(2), MEM(RAX,(32*n+16)*8)) \
VMOVAPD(ZMM(3), MEM(RAX,(32*n+24)*8))
#endif
//This is an array used for the scatter/gather instructions.
static int64_t offsets[32] __attribute__((aligned(64))) =
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31};
void bli_dgemm_zen4_asm_32x6(
dim_t k_,
double* restrict alpha,
double* restrict a,
double* restrict b,
double* restrict beta,
double* restrict c, inc_t rs_c_, inc_t cs_c_,
auxinfo_t* data,
cntx_t* restrict cntx
)
{
(void)data;
(void)cntx;
const int64_t* offsetPtr = &offsets[0];
const int64_t k = k_;
const int64_t rs_c = rs_c_*8; // stride in bytes
const int64_t cs_c = cs_c_*8; // stride in bytes
BEGIN_ASM()
VXORPD(YMM( 4), YMM( 4), YMM( 4)) //clear out registers
VXORPD(YMM( 5), YMM( 5), YMM( 5)) //clear out registers
VMOVAPD(YMM(6) , YMM(4))
VMOVAPD(YMM(7) , YMM(4))
VMOVAPD(YMM(8) , YMM(4))
VMOVAPD(YMM(9) , YMM(4))
VXORPD(YMM(10), YMM(10), YMM(10)) //clear out registers
VXORPD(YMM(11), YMM(11), YMM(11)) //clear out registers
VMOVAPD(YMM(12), YMM(4))
VMOVAPD(YMM(13), YMM(4))
VMOVAPD(YMM(14), YMM(4))
VMOVAPD(YMM(15), YMM(4))
VXORPD(YMM(16), YMM(16), YMM(16)) //clear out registers
VXORPD(YMM(17), YMM(17), YMM(17)) //clear out registers
VMOVAPD(YMM(18), YMM(4))
VMOVAPD(YMM(19), YMM(4))
VMOVAPD(YMM(20), YMM(4))
VMOVAPD(YMM(21), YMM(4))
VXORPD(YMM(22), YMM(22), YMM(22)) //clear out registers
VXORPD(YMM(23), YMM(23), YMM(23)) //clear out registers
VMOVAPD(YMM(24), YMM(4))
VMOVAPD(YMM(25), YMM(4))
VMOVAPD(YMM(26), YMM(4))
VMOVAPD(YMM(27), YMM(4))
VXORPD(YMM(28), YMM(28), YMM(28)) //clear out registers
VXORPD(YMM(29), YMM(29), YMM(29)) //clear out registers
VMOVAPD(YMM(30), YMM(4))
VMOVAPD(YMM(31), YMM(4))
MOV(RSI, VAR(k)) //loop index
MOV(RAX, VAR(a)) //load address of a
MOV(RBX, VAR(b)) //load address of b
MOV(RCX, VAR(c)) //load address of c
//LEA(R9, MEM(RCX,63)) // c for prefetching
MOV(R9, RCX)
VMOVAPD(ZMM(0), MEM(RAX, 0*8)) //pre-load a
VMOVAPD(ZMM(1), MEM(RAX, 8*8)) //pre-load a
VMOVAPD(ZMM(2), MEM(RAX,16*8)) //pre-load a
VMOVAPD(ZMM(3), MEM(RAX,24*8)) //pre-load a
LEA(RAX, MEM(RAX,32*8)) //adjust a for pre-load
MOV(R12, VAR(rs_c))
MOV(R10, VAR(cs_c))
MOV(RDI, RSI)
AND(RSI, IMM(3))
SAR(RDI, IMM(2))
SUB(RDI, IMM(6+TAIL_NITER))
JLE(K_LE_80)
LOOP_ALIGN
LABEL(LOOP1)
SUBITER(0)
SUBITER(1)
SUB(RDI, IMM(1))
SUBITER(2)
SUBITER(3)
LEA(RAX, MEM(RAX,4*32*8))
LEA(RBX, MEM(RBX,4*6*8))
JNZ(LOOP1)
LABEL(K_LE_80)
ADD(RDI, IMM(6))
JLE(K_LE_24)
LOOP_ALIGN
LABEL(LOOP2)
PREFETCH(0, MEM(R9))
SUBITER(0)
PREFETCH(0, MEM(R9,64))
SUBITER(1)
PREFETCH(0, MEM(R9,128))
SUB(RDI, IMM(1))
SUBITER(2)
PREFETCH(0, MEM(R9,192))
SUBITER(3)
LEA(RAX, MEM(RAX,4*32*8))
LEA(RBX, MEM(RBX,4*6*8))
LEA(R9, MEM(R9,R10,1))
JNZ(LOOP2)
LABEL(K_LE_24)
ADD(RDI, IMM(0+TAIL_NITER))
JLE(TAIL)
LOOP_ALIGN
LABEL(LOOP3)
SUBITER(0)
SUBITER(1)
SUB(RDI, IMM(1))
SUBITER(2)
SUBITER(3)
LEA(RAX, MEM(RAX,4*32*8))
LEA(RBX, MEM(RBX,4*6*8))
JNZ(LOOP3)
LABEL(TAIL)
TEST(RSI, RSI)
JZ(POSTACCUM)
LOOP_ALIGN
LABEL(TAIL_LOOP)
SUB(RSI, IMM(1))
SUBITER(0)
LEA(RAX, MEM(RAX,32*8))
LEA(RBX, MEM(RBX,6*8))
JNZ(TAIL_LOOP)
LABEL(POSTACCUM)
MOV(RAX, VAR(alpha))
MOV(RBX, VAR(beta))
VBROADCASTSD(ZMM(0), MEM(RAX))
VBROADCASTSD(ZMM(1), MEM(RBX))
VXORPD(YMM(2), YMM(2), YMM(2))
MOV(RAX, R12)
MOV(RBX, R10)
// Check if C is column stride.
CMP(RAX, IMM(8))
JNE(SCATTEREDUPDATE)
VCOMISD(XMM(1), XMM(2))
JE(COLSTORBZ)
UPDATE_C( 8, 9,10,11)
UPDATE_C(12,13,14,15)
UPDATE_C(16,17,18,19)
UPDATE_C(20,21,22,23)
UPDATE_C(24,25,26,27)
UPDATE_C(28,29,30,31)
JMP(END)
LABEL(COLSTORBZ)
UPDATE_C_BZ( 8, 9,10,11)
UPDATE_C_BZ(12,13,14,15)
UPDATE_C_BZ(16,17,18,19)
UPDATE_C_BZ(20,21,22,23)
UPDATE_C_BZ(24,25,26,27)
UPDATE_C_BZ(28,29,30,31)
JMP(END)
LABEL(SCATTEREDUPDATE)
VMULPD(ZMM( 8), ZMM( 8), ZMM(0))
VMULPD(ZMM( 9), ZMM( 9), ZMM(0))
VMULPD(ZMM(10), ZMM(10), ZMM(0))
VMULPD(ZMM(11), ZMM(11), ZMM(0))
VMULPD(ZMM(12), ZMM(12), ZMM(0))
VMULPD(ZMM(13), ZMM(13), ZMM(0))
VMULPD(ZMM(14), ZMM(14), ZMM(0))
VMULPD(ZMM(15), ZMM(15), ZMM(0))
VMULPD(ZMM(16), ZMM(16), ZMM(0))
VMULPD(ZMM(17), ZMM(17), ZMM(0))
VMULPD(ZMM(18), ZMM(18), ZMM(0))
VMULPD(ZMM(19), ZMM(19), ZMM(0))
VMULPD(ZMM(20), ZMM(20), ZMM(0))
VMULPD(ZMM(21), ZMM(21), ZMM(0))
VMULPD(ZMM(22), ZMM(22), ZMM(0))
VMULPD(ZMM(23), ZMM(23), ZMM(0))
VMULPD(ZMM(24), ZMM(24), ZMM(0))
VMULPD(ZMM(25), ZMM(25), ZMM(0))
VMULPD(ZMM(26), ZMM(26), ZMM(0))
VMULPD(ZMM(27), ZMM(27), ZMM(0))
VMULPD(ZMM(28), ZMM(28), ZMM(0))
VMULPD(ZMM(29), ZMM(29), ZMM(0))
VMULPD(ZMM(30), ZMM(30), ZMM(0))
VMULPD(ZMM(31), ZMM(31), ZMM(0))
VCOMISD(XMM(1), XMM(2))
MOV(RDI, VAR(offsetPtr))
VPBROADCASTQ(ZMM(0), RAX)
VPMULLQ(ZMM(4), ZMM(0), MEM(RDI))
VPMULLQ(ZMM(5), ZMM(0), MEM(RDI,64))
VPMULLQ(ZMM(6), ZMM(0), MEM(RDI,128))
VPMULLQ(ZMM(7), ZMM(0), MEM(RDI,192))
JE(SCATTERBZ)
UPDATE_C_COL_SCATTERED( 8, 9,10,11)
UPDATE_C_COL_SCATTERED(12,13,14,15)
UPDATE_C_COL_SCATTERED(16,17,18,19)
UPDATE_C_COL_SCATTERED(20,21,22,23)
UPDATE_C_COL_SCATTERED(24,25,26,27)
UPDATE_C_COL_SCATTERED(28,29,30,31)
JMP(END)
LABEL(SCATTERBZ)
UPDATE_C_BZ_COL_SCATTERED( 8, 9,10,11)
UPDATE_C_BZ_COL_SCATTERED(12,13,14,15)
UPDATE_C_BZ_COL_SCATTERED(16,17,18,19)
UPDATE_C_BZ_COL_SCATTERED(20,21,22,23)
UPDATE_C_BZ_COL_SCATTERED(24,25,26,27)
UPDATE_C_BZ_COL_SCATTERED(28,29,30,31)
LABEL(END)
VZEROUPPER()
END_ASM
(
: // output operands
: // input operands
[k] "m" (k),
[a] "m" (a),
[b] "m" (b),
[alpha] "m" (alpha),
[beta] "m" (beta),
[c] "m" (c),
[rs_c] "m" (rs_c),
[cs_c] "m" (cs_c),
[offsetPtr] "m" (offsetPtr)
: // register clobber list
"rax", "rbx", "rcx", "rdx", "rdi", "rsi", "r8", "r9", "r10", "r11", "r12",
"r13", "r14", "r15", "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5",
"zmm6", "zmm7", "zmm8", "zmm9", "zmm10", "zmm11", "zmm12", "zmm13",
"zmm14", "zmm15", "zmm16", "zmm17", "zmm18", "zmm19", "zmm20", "zmm21",
"zmm22", "zmm23", "zmm24", "zmm25", "zmm26", "zmm27", "zmm28", "zmm29",
"zmm30", "zmm31", "memory"
)
}

View File

@@ -1,6 +1,7 @@
##Copyright (C) 2022, Advanced Micro Devices, Inc. All rights reserved.##
##Copyright (C) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.##
add_subdirectory(1)
add_subdirectory(1m)
add_subdirectory(3)

View File

@@ -1 +0,0 @@
Currently there are no zen4 specific kernels, however, this folder is required for the the build system.

View File

@@ -4,7 +4,7 @@
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2022, Advanced Micro Devices, Inc. All rights reserved.
Copyright (C) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -41,6 +41,15 @@ AMAXV_KER_PROT( double, d, amaxv_zen_int_avx512 )
GEMMTRSM_UKR_PROT( double, d, gemmtrsm_l_zen_asm_16x14)
GEMMTRSM_UKR_PROT( double, d, gemmtrsm_u_zen_asm_16x14)
//packing kernels
PACKM_KER_PROT( double, d, packm_zen4_asm_16xk )
PACKM_KER_PROT( double, d, packm_zen4_asm_24xk )
PACKM_KER_PROT( double, d, packm_zen4_asm_32xk )
PACKM_KER_PROT( double, d, packm_32xk_zen4_ref )
// native dgemm kernel
GEMM_UKR_PROT( double, d, gemm_zen4_asm_32x6 )
//sgemm rv sup
GEMMSUP_KER_PROT( float, s, gemmsup_rv_zen_asm_12x32m )
GEMMSUP_KER_PROT( float, s, gemmsup_rv_zen_asm_8x16m )

View File

@@ -5,6 +5,7 @@
libraries.
Copyright (C) 2014, The University of Texas at Austin
Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -1720,3 +1721,250 @@ void PASTEMAC3(ch,opname,arch,suf) \
INSERT_GENTFUNC_BASIC3( packm_24xk, 24, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, mnr, arch, suf ) \
\
void PASTEMAC3(ch,opname,arch,suf) \
( \
conj_t conja, \
pack_t schema, \
dim_t cdim, \
dim_t n, \
dim_t n_max, \
ctype* restrict kappa, \
ctype* restrict a, inc_t inca, inc_t lda, \
ctype* restrict p, inc_t ldp, \
cntx_t* restrict cntx \
) \
{ \
ctype* restrict kappa_cast = kappa; \
ctype* restrict alpha1 = a; \
ctype* restrict pi1 = p; \
\
if ( cdim == mnr ) \
{ \
if ( PASTEMAC(ch,eq1)( *kappa_cast ) ) \
{ \
if ( bli_is_conj( conja ) ) \
{ \
for ( dim_t k = n; k != 0; --k ) \
{ \
PASTEMAC(ch,copyjs)( *(alpha1 + 0*inca), *(pi1 + 0) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 1*inca), *(pi1 + 1) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 2*inca), *(pi1 + 2) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 3*inca), *(pi1 + 3) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 4*inca), *(pi1 + 4) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 5*inca), *(pi1 + 5) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 6*inca), *(pi1 + 6) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 7*inca), *(pi1 + 7) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 8*inca), *(pi1 + 8) ); \
PASTEMAC(ch,copyjs)( *(alpha1 + 9*inca), *(pi1 + 9) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +10*inca), *(pi1 +10) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +11*inca), *(pi1 +11) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +12*inca), *(pi1 +12) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +13*inca), *(pi1 +13) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +14*inca), *(pi1 +14) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +15*inca), *(pi1 +15) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +16*inca), *(pi1 +16) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +17*inca), *(pi1 +17) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +18*inca), *(pi1 +18) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +19*inca), *(pi1 +19) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +20*inca), *(pi1 +20) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +21*inca), *(pi1 +21) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +22*inca), *(pi1 +22) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +23*inca), *(pi1 +23) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +24*inca), *(pi1 +24) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +25*inca), *(pi1 +25) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +26*inca), *(pi1 +26) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +27*inca), *(pi1 +27) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +28*inca), *(pi1 +28) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +29*inca), *(pi1 +29) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +30*inca), *(pi1 +30) ); \
PASTEMAC(ch,copyjs)( *(alpha1 +31*inca), *(pi1 +31) ); \
\
alpha1 += lda; \
pi1 += ldp; \
} \
} \
else \
{ \
for ( dim_t k = n; k != 0; --k ) \
{ \
PASTEMAC(ch,copys)( *(alpha1 + 0*inca), *(pi1 + 0) ); \
PASTEMAC(ch,copys)( *(alpha1 + 1*inca), *(pi1 + 1) ); \
PASTEMAC(ch,copys)( *(alpha1 + 2*inca), *(pi1 + 2) ); \
PASTEMAC(ch,copys)( *(alpha1 + 3*inca), *(pi1 + 3) ); \
PASTEMAC(ch,copys)( *(alpha1 + 4*inca), *(pi1 + 4) ); \
PASTEMAC(ch,copys)( *(alpha1 + 5*inca), *(pi1 + 5) ); \
PASTEMAC(ch,copys)( *(alpha1 + 6*inca), *(pi1 + 6) ); \
PASTEMAC(ch,copys)( *(alpha1 + 7*inca), *(pi1 + 7) ); \
PASTEMAC(ch,copys)( *(alpha1 + 8*inca), *(pi1 + 8) ); \
PASTEMAC(ch,copys)( *(alpha1 + 9*inca), *(pi1 + 9) ); \
PASTEMAC(ch,copys)( *(alpha1 +10*inca), *(pi1 +10) ); \
PASTEMAC(ch,copys)( *(alpha1 +11*inca), *(pi1 +11) ); \
PASTEMAC(ch,copys)( *(alpha1 +12*inca), *(pi1 +12) ); \
PASTEMAC(ch,copys)( *(alpha1 +13*inca), *(pi1 +13) ); \
PASTEMAC(ch,copys)( *(alpha1 +14*inca), *(pi1 +14) ); \
PASTEMAC(ch,copys)( *(alpha1 +15*inca), *(pi1 +15) ); \
PASTEMAC(ch,copys)( *(alpha1 +16*inca), *(pi1 +16) ); \
PASTEMAC(ch,copys)( *(alpha1 +17*inca), *(pi1 +17) ); \
PASTEMAC(ch,copys)( *(alpha1 +18*inca), *(pi1 +18) ); \
PASTEMAC(ch,copys)( *(alpha1 +19*inca), *(pi1 +19) ); \
PASTEMAC(ch,copys)( *(alpha1 +20*inca), *(pi1 +20) ); \
PASTEMAC(ch,copys)( *(alpha1 +21*inca), *(pi1 +21) ); \
PASTEMAC(ch,copys)( *(alpha1 +22*inca), *(pi1 +22) ); \
PASTEMAC(ch,copys)( *(alpha1 +23*inca), *(pi1 +23) ); \
PASTEMAC(ch,copys)( *(alpha1 +24*inca), *(pi1 +24) ); \
PASTEMAC(ch,copys)( *(alpha1 +25*inca), *(pi1 +25) ); \
PASTEMAC(ch,copys)( *(alpha1 +26*inca), *(pi1 +26) ); \
PASTEMAC(ch,copys)( *(alpha1 +27*inca), *(pi1 +27) ); \
PASTEMAC(ch,copys)( *(alpha1 +28*inca), *(pi1 +28) ); \
PASTEMAC(ch,copys)( *(alpha1 +29*inca), *(pi1 +29) ); \
PASTEMAC(ch,copys)( *(alpha1 +30*inca), *(pi1 +30) ); \
PASTEMAC(ch,copys)( *(alpha1 +31*inca), *(pi1 +31) ); \
\
alpha1 += lda; \
pi1 += ldp; \
} \
} \
} \
else \
{ \
if ( bli_is_conj( conja ) ) \
{ \
for ( dim_t k = n; k != 0; --k ) \
{ \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 0*inca), *(pi1 + 0) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 1*inca), *(pi1 + 1) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 2*inca), *(pi1 + 2) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 3*inca), *(pi1 + 3) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 4*inca), *(pi1 + 4) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 5*inca), *(pi1 + 5) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 6*inca), *(pi1 + 6) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 7*inca), *(pi1 + 7) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 8*inca), *(pi1 + 8) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 + 9*inca), *(pi1 + 9) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +10*inca), *(pi1 +10) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +11*inca), *(pi1 +11) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +12*inca), *(pi1 +12) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +13*inca), *(pi1 +13) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +14*inca), *(pi1 +14) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +15*inca), *(pi1 +15) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +16*inca), *(pi1 +16) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +17*inca), *(pi1 +17) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +18*inca), *(pi1 +18) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +19*inca), *(pi1 +19) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +20*inca), *(pi1 +20) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +21*inca), *(pi1 +21) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +22*inca), *(pi1 +22) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +23*inca), *(pi1 +23) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +24*inca), *(pi1 +24) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +25*inca), *(pi1 +25) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +26*inca), *(pi1 +26) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +27*inca), *(pi1 +27) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +28*inca), *(pi1 +28) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +29*inca), *(pi1 +29) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +30*inca), *(pi1 +30) ); \
PASTEMAC(ch,scal2js)( *kappa_cast, *(alpha1 +31*inca), *(pi1 +31) ); \
\
alpha1 += lda; \
pi1 += ldp; \
} \
} \
else \
{ \
for ( dim_t k = n; k != 0; --k ) \
{ \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 0*inca), *(pi1 + 0) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 1*inca), *(pi1 + 1) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 2*inca), *(pi1 + 2) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 3*inca), *(pi1 + 3) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 4*inca), *(pi1 + 4) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 5*inca), *(pi1 + 5) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 6*inca), *(pi1 + 6) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 7*inca), *(pi1 + 7) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 8*inca), *(pi1 + 8) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 + 9*inca), *(pi1 + 9) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +10*inca), *(pi1 +10) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +11*inca), *(pi1 +11) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +12*inca), *(pi1 +12) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +13*inca), *(pi1 +13) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +14*inca), *(pi1 +14) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +15*inca), *(pi1 +15) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +16*inca), *(pi1 +16) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +17*inca), *(pi1 +17) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +18*inca), *(pi1 +18) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +19*inca), *(pi1 +19) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +20*inca), *(pi1 +20) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +21*inca), *(pi1 +21) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +22*inca), *(pi1 +22) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +23*inca), *(pi1 +23) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +24*inca), *(pi1 +24) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +25*inca), *(pi1 +25) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +26*inca), *(pi1 +26) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +27*inca), *(pi1 +27) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +28*inca), *(pi1 +28) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +29*inca), *(pi1 +29) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +30*inca), *(pi1 +30) ); \
PASTEMAC(ch,scal2s)( *kappa_cast, *(alpha1 +31*inca), *(pi1 +31) ); \
\
alpha1 += lda; \
pi1 += ldp; \
} \
} \
} \
} \
else /* if ( cdim < mnr ) */ \
{ \
PASTEMAC2(ch,scal2m,BLIS_TAPI_EX_SUF) \
( \
0, \
BLIS_NONUNIT_DIAG, \
BLIS_DENSE, \
( trans_t )conja, \
cdim, \
n, \
kappa, \
a, inca, lda, \
p, 1, ldp, \
cntx, \
NULL \
); \
\
/* if ( cdim < mnr ) */ \
{ \
const dim_t i = cdim; \
const dim_t m_edge = mnr - cdim; \
const dim_t n_edge = n_max; \
ctype* restrict p_cast = p; \
ctype* restrict p_edge = p_cast + (i )*1; \
\
PASTEMAC(ch,set0s_mxn) \
( \
m_edge, \
n_edge, \
p_edge, 1, ldp \
); \
} \
} \
\
if ( n < n_max ) \
{ \
const dim_t j = n; \
const dim_t m_edge = mnr; \
const dim_t n_edge = n_max - n; \
ctype* restrict p_cast = p; \
ctype* restrict p_edge = p_cast + (j )*ldp; \
\
PASTEMAC(ch,set0s_mxn) \
( \
m_edge, \
n_edge, \
p_edge, 1, ldp \
); \
} \
}
INSERT_GENTFUNC_BASIC3( packm_32xk, 32, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )