mirror of
https://github.com/amd/blis.git
synced 2026-04-20 07:38:53 +00:00
Optimized AXPBYV Kernel using AVX2 Intrinsics
Details: - Intrinsic implementation of axpbyv for AVX2 - Bench written for axpbyv - Added definitions in zen contexts AMD-Internal: [CPUPL-1963] Change-Id: I9bc21a6170f5c944eb6e9e9f0e994b9992f8b539
This commit is contained in:
committed by
Arnav Sharma
parent
b095f1f3a2
commit
3190e547b0
@@ -191,7 +191,8 @@ blis: \
|
||||
bench_trsv_blis.x \
|
||||
bench_amaxv_blis.x \
|
||||
bench_copyv_blis.x \
|
||||
bench_swapv_blis.x
|
||||
bench_swapv_blis.x \
|
||||
bench_axpbyv_blis.x
|
||||
|
||||
openblas: \
|
||||
bench_gemm_openblas.x \
|
||||
@@ -205,7 +206,8 @@ openblas: \
|
||||
bench_trsv_openblas.x \
|
||||
bench_amaxv_openblas.x \
|
||||
bench_copyv_openblas.x \
|
||||
bench_swapv_openblas.x
|
||||
bench_swapv_openblas.x \
|
||||
bench_axpbyv_openblas.x
|
||||
|
||||
atlas: \
|
||||
bench_gemm_atlas.x \
|
||||
@@ -219,7 +221,8 @@ atlas: \
|
||||
bench_trsv_atlas.x \
|
||||
bench_amaxv_atlas.x \
|
||||
bench_copyv_atlas.x \
|
||||
bench_swapv_atlas.x
|
||||
bench_swapv_atlas.x \
|
||||
bench_axpbyv_atlax.x
|
||||
|
||||
mkl: \
|
||||
bench_gemm_mkl.x \
|
||||
@@ -233,7 +236,8 @@ mkl: \
|
||||
bench_trsv_mkl.x \
|
||||
bench_amaxv_mkl.x \
|
||||
bench_copyv_mkl.x \
|
||||
bench_swapv_mkl.x
|
||||
bench_swapv_mkl.x \
|
||||
bench_axpbyv_mkl.x
|
||||
|
||||
|
||||
# --Object file rules --
|
||||
|
||||
265
bench/bench_axpbyv.c
Normal file
265
bench/bench_axpbyv.c
Normal file
@@ -0,0 +1,265 @@
|
||||
/*
|
||||
|
||||
BLIS
|
||||
An object-based framework for developing high-performance BLAS-like
|
||||
libraries.
|
||||
|
||||
Copyright (C) 2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
- Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
- Neither the name of The University of Texas nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifdef WIN32
|
||||
#include <io.h>
|
||||
#else
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
#include "blis.h"
|
||||
|
||||
#ifndef DT
|
||||
#define DT BLIS_DOUBLE
|
||||
#endif
|
||||
#define AOCL_MATRIX_INITIALISATION
|
||||
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
obj_t x, y, alpha, beta; // BLIS objects
|
||||
dim_t p_inc = 0; // To keep track of number of inputs
|
||||
num_t dt; // BLIS datatype
|
||||
char dt_ch; // {S, D, Z, C} from input
|
||||
int r, n_repeats; // repetition counter; number of repeats
|
||||
|
||||
double dtime;
|
||||
double dtime_save;
|
||||
double gflops;
|
||||
|
||||
FILE* fin = NULL; // Input FILE*
|
||||
FILE* fout = NULL; // Output FILE*
|
||||
|
||||
n_repeats = N_REPEAT; // Fetched from Makefile
|
||||
|
||||
dt = DT; // Set datatype as BLIS_DOUBLE
|
||||
|
||||
if ( argc < 3 )
|
||||
{
|
||||
printf( "Usage: ./bench_axpbyv_XX.x input.txt output.txt\n" );
|
||||
exit( 1 );
|
||||
}
|
||||
|
||||
fin = fopen( argv[1], "r" ); // Open input file in read mode
|
||||
if ( fin == NULL )
|
||||
{
|
||||
printf( "Error opening input file %s\n", argv[1] );
|
||||
exit( 1 );
|
||||
}
|
||||
|
||||
fout = fopen( argv[2], "w" ); // Open output file in write mode
|
||||
if ( fout == NULL )
|
||||
{
|
||||
printf( "Error opening output file %s\n", argv[2] );
|
||||
exit( 1 );
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
fprintf( fout, "gflops\n" );
|
||||
#else
|
||||
fprintf(fout, "Dt\t n\t alpha_r\t alpha_i\t beta_r\t beta_i\t gflops\n" );
|
||||
#endif
|
||||
|
||||
dim_t n; // dimension
|
||||
inc_t incx; // stride x
|
||||
inc_t incy; // stride y
|
||||
char tmp[256]; // to store function name, line not present in logs
|
||||
double alpha_r, alpha_i, beta_r, beta_i;
|
||||
|
||||
// {function name} {S, D, C, Z} {n}
|
||||
// {alpha_r} {alpha_i} {incx} {beta_r} {beta_i} {incy}
|
||||
while ( fscanf( fin, "%s %c %ld %lf %lf %ld %lf %lf %ld\n",
|
||||
tmp, &dt_ch, &n,
|
||||
&alpha_r, &alpha_i, &incx, &beta_r, &beta_i, &incy ) == 9 )
|
||||
{
|
||||
if ( dt_ch == 'D' || dt_ch == 'd' ) dt = BLIS_DOUBLE;
|
||||
else if ( dt_ch == 'Z' || dt_ch == 'z' ) dt = BLIS_DCOMPLEX;
|
||||
else if ( dt_ch == 'S' || dt_ch == 's' ) dt = BLIS_FLOAT;
|
||||
else if ( dt_ch == 'C' || dt_ch == 'c' ) dt = BLIS_SCOMPLEX;
|
||||
else
|
||||
{
|
||||
printf( "Invalid data type %c\n", dt_ch );
|
||||
continue;
|
||||
}
|
||||
|
||||
// Creating BLIS objects
|
||||
bli_obj_create( dt, n, 1, incx, 1, &x ); // For input vector x
|
||||
bli_obj_create( dt, n, 1, incy, 1, &y ); // For input vector y
|
||||
bli_obj_create( dt, 1, 1, 0, 0, &alpha); // For input vector alpha
|
||||
bli_obj_create( dt, 1, 1, 0, 0, &beta); // For input vector beta
|
||||
|
||||
#ifdef AOCL_MATRIX_INITIALISATION
|
||||
bli_randm( &x );
|
||||
bli_randm( &y );
|
||||
#endif
|
||||
|
||||
bli_setsc( alpha_r, alpha_i, &alpha );
|
||||
bli_setsc( beta_r, beta_i, &beta );
|
||||
|
||||
dtime_save = DBL_MAX;
|
||||
|
||||
for ( r = 0; r < n_repeats; ++r )
|
||||
{
|
||||
dtime = bli_clock();
|
||||
|
||||
#ifdef BLIS
|
||||
bli_axpbyv( &alpha, &x, &beta, &y );
|
||||
#else
|
||||
f77_int nn = bli_obj_length( &x );
|
||||
f77_int blas_incx = bli_obj_vector_inc( &x );
|
||||
f77_int blas_incy = bli_obj_vector_inc( &y );
|
||||
|
||||
if ( bli_is_float( dt ) )
|
||||
{
|
||||
float* alphap = bli_obj_buffer( &alpha );
|
||||
float* xp = bli_obj_buffer( &x );
|
||||
float* betap = bli_obj_buffer( &beta );
|
||||
float* yp = bli_obj_buffer( &y );
|
||||
|
||||
#ifdef CBLAS
|
||||
cblas_saxpby( nn,
|
||||
*alphap,
|
||||
xp,
|
||||
blas_incx,
|
||||
*betap,
|
||||
yp,
|
||||
blas_incy );
|
||||
#else
|
||||
saxpby_( &nn,
|
||||
alphap,
|
||||
xp,
|
||||
&blas_incx,
|
||||
betap,
|
||||
yp,
|
||||
&blas_incy );
|
||||
#endif
|
||||
}
|
||||
else if ( bli_is_double( dt ) )
|
||||
{
|
||||
double* alphap = bli_obj_buffer( &alpha );
|
||||
double* xp = bli_obj_buffer( &x );
|
||||
double* betap = bli_obj_buffer( &beta );
|
||||
double* yp = bli_obj_buffer( &y );
|
||||
|
||||
#ifdef CBLAS
|
||||
cblas_daxpby( nn,
|
||||
*alphap,
|
||||
xp,
|
||||
blas_incx,
|
||||
*betap,
|
||||
yp,
|
||||
blas_incy );
|
||||
#else
|
||||
daxpby_( &nn,
|
||||
alphap,
|
||||
xp,
|
||||
&blas_incx,
|
||||
betap,
|
||||
yp,
|
||||
&blas_incy );
|
||||
#endif
|
||||
}
|
||||
else if ( bli_is_scomplex( dt ) )
|
||||
{
|
||||
scomplex* alphap = bli_obj_buffer( &alpha );
|
||||
scomplex* xp = bli_obj_buffer( &x );
|
||||
scomplex* betap = bli_obj_buffer( &beta );
|
||||
scomplex* yp = bli_obj_buffer( &y );
|
||||
|
||||
#ifdef CBLAS
|
||||
cblas_caxpby( nn,
|
||||
*alphap,
|
||||
xp,
|
||||
blas_incx,
|
||||
*betap,
|
||||
yp,
|
||||
blas_incy );
|
||||
#else
|
||||
caxpby_( &nn,
|
||||
alphap,
|
||||
xp,
|
||||
&blas_incx,
|
||||
betap,
|
||||
yp,
|
||||
&blas_incy );
|
||||
#endif
|
||||
}
|
||||
else if ( bli_is_dcomplex( dt ) )
|
||||
{
|
||||
dcomplex* alphap = bli_obj_buffer( &alpha );
|
||||
dcomplex* xp = bli_obj_buffer( &x );
|
||||
dcomplex* betap = bli_obj_buffer( &beta );
|
||||
dcomplex* yp = bli_obj_buffer( &y );
|
||||
|
||||
#ifdef CBLAS
|
||||
cblas_zaxpby( nn,
|
||||
*alphap,
|
||||
xp,
|
||||
blas_incx,
|
||||
*betap,
|
||||
yp,
|
||||
blas_incy );
|
||||
#else
|
||||
zaxpby_( &nn,
|
||||
alphap,
|
||||
xp,
|
||||
&blas_incx,
|
||||
betap,
|
||||
yp,
|
||||
&blas_incy );
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
dtime_save = bli_clock_min_diff( dtime_save, dtime );
|
||||
}
|
||||
gflops = ( 3.0 * n ) / ( dtime_save * 1.0e9 );
|
||||
if ( bli_is_complex( dt ) ) gflops *= 4.0;
|
||||
|
||||
printf( "data_axpbyv_%s", BLAS );
|
||||
|
||||
p_inc++;
|
||||
printf( " %4lu [ %4lu %7.2f ];\n",
|
||||
(unsigned long)(p_inc),
|
||||
(unsigned long)n,
|
||||
gflops );
|
||||
|
||||
fprintf( fout, "%c\t %ld\t %lf\t %lf\t %lf\t %lf\t %6.3f\n",
|
||||
dt_ch, n, alpha_r, alpha_i, beta_r, beta_i, gflops );
|
||||
fflush( fout );
|
||||
|
||||
bli_obj_free( &x );
|
||||
bli_obj_free( &y );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
40
bench/inputaxpbyv.txt
Normal file
40
bench/inputaxpbyv.txt
Normal file
@@ -0,0 +1,40 @@
|
||||
saxpbyv_ S 32 0.900000 0.000000 1 0.900000 0.000000 1
|
||||
saxpbyv_ S 64 1.000000 0.000000 1 1.000000 0.000000 1
|
||||
saxpbyv_ S 100 -1 0.000000 1 -1 0.000000 1
|
||||
saxpbyv_ S 200 -1.100000 0.000000 1 -1.100000 0.000000 1
|
||||
saxpbyv_ S 300 1.100000 0.000000 1 1.100000 0.000000 1
|
||||
saxpbyv_ S 400 0.900000 0.000000 1 0.900000 0.000000 1
|
||||
saxpbyv_ S 500 1.000000 0.000000 1 1.000000 0.000000 1
|
||||
saxpbyv_ S 1000 -1 0.000000 1 -1 0.000000 1
|
||||
saxpbyv_ S 5000 -1.100000 0.000000 1 -1.100000 0.000000 1
|
||||
saxpbyv_ S 10000 1.100000 0.000000 1 1.100000 0.000000 1
|
||||
daxpbyv_ D 32 0.900000 0.000000 1 0.900000 0.000000 1
|
||||
daxpbyv_ D 64 1.000000 0.000000 1 1.000000 0.000000 1
|
||||
daxpbyv_ D 100 -1 0.000000 1 -1 0.000000 1
|
||||
daxpbyv_ D 200 -1.100000 0.000000 1 -1.100000 0.000000 1
|
||||
daxpbyv_ D 300 1.100000 0.000000 1 1.100000 0.000000 1
|
||||
daxpbyv_ D 400 0.900000 0.000000 1 0.900000 0.000000 1
|
||||
daxpbyv_ D 500 1.000000 0.000000 1 1.000000 0.000000 1
|
||||
daxpbyv_ D 1000 -1 0.000000 1 -1 0.000000 1
|
||||
daxpbyv_ D 5000 -1.100000 0.000000 1 -1.100000 0.000000 1
|
||||
daxpbyv_ D 10000 1.100000 0.000000 1 1.100000 0.000000 1
|
||||
caxpbyv_ C 32 0.900000 -1.100000 1 0.900000 -1.100000 1
|
||||
caxpbyv_ C 64 1.000000 1.100000 1 1.000000 1.100000 1
|
||||
caxpbyv_ C 100 -1 1.000000 1 -1 1 1
|
||||
caxpbyv_ C 200 -1.100000 0.900000 1 -1.100000 0.900000 1
|
||||
caxpbyv_ C 300 1.100000 1.000000 1 1.100000 1 1
|
||||
caxpbyv_ C 400 0.900000 -1.100000 1 0.900000 -1.100000 1
|
||||
caxpbyv_ C 500 1.000000 1.000000 1 1.000000 1 1
|
||||
caxpbyv_ C 1000 -1 0.900000 1 -1 0.900000 1
|
||||
caxpbyv_ C 5000 -1.100000 -1 1 -1.100000 -1 1
|
||||
caxpbyv_ C 10000 1.100000 -1 1 1.100000 -1 1
|
||||
zaxpbyv_ Z 32 0.900000 -1.100000 1 0.900000 -1.100000 1
|
||||
zaxpbyv_ Z 64 1.000000 1.100000 1 1.000000 1.100000 1
|
||||
zaxpbyv_ Z 100 -1 1.000000 1 -1 1 1
|
||||
zaxpbyv_ Z 200 -1.100000 0.900000 1 -1.100000 0.900000 1
|
||||
zaxpbyv_ Z 300 1.100000 1.000000 1 1.100000 1 1
|
||||
zaxpbyv_ Z 400 0.900000 -1.100000 1 0.900000 -1.100000 1
|
||||
zaxpbyv_ Z 500 1.000000 1.000000 1 1.000000 1 1
|
||||
zaxpbyv_ Z 1000 -1 0.900000 1 -1 0.900000 1
|
||||
zaxpbyv_ Z 5000 -1.100000 -1 1 -1.100000 -1 1
|
||||
zaxpbyv_ Z 10000 1.100000 -1 1 1.100000 -1 1
|
||||
@@ -5,7 +5,7 @@
|
||||
libraries.
|
||||
|
||||
Copyright (C) 2014, The University of Texas at Austin
|
||||
Copyright (C) 2018 - 2021, Advanced Micro Devices, Inc. All rights reserved.
|
||||
Copyright (C) 2018 - 2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@@ -95,12 +95,18 @@ void bli_cntx_init_zen( cntx_t* cntx )
|
||||
// Update the context with optimized level-1v kernels.
|
||||
bli_cntx_set_l1v_kers
|
||||
(
|
||||
20,
|
||||
24,
|
||||
#if 1
|
||||
// amaxv
|
||||
BLIS_AMAXV_KER, BLIS_FLOAT, bli_samaxv_zen_int,
|
||||
BLIS_AMAXV_KER, BLIS_DOUBLE, bli_damaxv_zen_int,
|
||||
#endif
|
||||
// axpbyv
|
||||
BLIS_AXPBYV_KER, BLIS_FLOAT, bli_saxpbyv_zen_int10,
|
||||
BLIS_AXPBYV_KER, BLIS_DOUBLE, bli_daxpbyv_zen_int10,
|
||||
BLIS_AXPBYV_KER, BLIS_SCOMPLEX, bli_caxpbyv_zen_int,
|
||||
BLIS_AXPBYV_KER, BLIS_DCOMPLEX, bli_zaxpbyv_zen_int,
|
||||
|
||||
// axpyv
|
||||
#if 0
|
||||
BLIS_AXPYV_KER, BLIS_FLOAT, bli_saxpyv_zen_int,
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
An object-based framework for developing high-performance BLAS-like
|
||||
libraries.
|
||||
Copyright (C) 2014, The University of Texas at Austin
|
||||
Copyright (C) 2018 - 2021, Advanced Micro Devices, Inc. All rights reserved.
|
||||
Copyright (C) 2018 - 2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@@ -107,13 +107,17 @@ void bli_cntx_init_zen2( cntx_t* cntx )
|
||||
// Update the context with optimized level-1v kernels.
|
||||
bli_cntx_set_l1v_kers
|
||||
(
|
||||
20,
|
||||
24,
|
||||
#if 1
|
||||
// amaxv
|
||||
BLIS_AMAXV_KER, BLIS_FLOAT, bli_samaxv_zen_int,
|
||||
BLIS_AMAXV_KER, BLIS_DOUBLE, bli_damaxv_zen_int,
|
||||
#endif
|
||||
// axpyv
|
||||
// axpbyv
|
||||
BLIS_AXPBYV_KER, BLIS_FLOAT, bli_saxpbyv_zen_int10,
|
||||
BLIS_AXPBYV_KER, BLIS_DOUBLE, bli_daxpbyv_zen_int10,
|
||||
BLIS_AXPBYV_KER, BLIS_SCOMPLEX, bli_caxpbyv_zen_int,
|
||||
BLIS_AXPBYV_KER, BLIS_DCOMPLEX, bli_zaxpbyv_zen_int,
|
||||
|
||||
// axpyv
|
||||
BLIS_AXPYV_KER, BLIS_FLOAT, bli_saxpyv_zen_int10,
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
libraries.
|
||||
|
||||
Copyright (C) 2014, The University of Texas at Austin
|
||||
Copyright (C) 2018 - 2021, Advanced Micro Devices, Inc. All rights reserved.
|
||||
Copyright (C) 2018 - 2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@@ -107,13 +107,17 @@ void bli_cntx_init_zen3( cntx_t* cntx )
|
||||
// Update the context with optimized level-1v kernels.
|
||||
bli_cntx_set_l1v_kers
|
||||
(
|
||||
20,
|
||||
24,
|
||||
#if 1
|
||||
// amaxv
|
||||
BLIS_AMAXV_KER, BLIS_FLOAT, bli_samaxv_zen_int,
|
||||
BLIS_AMAXV_KER, BLIS_DOUBLE, bli_damaxv_zen_int,
|
||||
#endif
|
||||
// axpyv
|
||||
// axpbyv
|
||||
BLIS_AXPBYV_KER, BLIS_FLOAT, bli_saxpbyv_zen_int10,
|
||||
BLIS_AXPBYV_KER, BLIS_DOUBLE, bli_daxpbyv_zen_int10,
|
||||
BLIS_AXPBYV_KER, BLIS_SCOMPLEX, bli_caxpbyv_zen_int,
|
||||
BLIS_AXPBYV_KER, BLIS_DCOMPLEX, bli_zaxpbyv_zen_int,
|
||||
|
||||
// axpyv
|
||||
BLIS_AXPYV_KER, BLIS_FLOAT, bli_saxpyv_zen_int10,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
An object-based framework for developing high-performance BLAS-like
|
||||
libraries.
|
||||
|
||||
Copyright (C) 2021, Advanced Micro Devices, Inc. All rights reserved.
|
||||
Copyright (C) 2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@@ -106,12 +106,17 @@ void bli_cntx_init_zen4( cntx_t* cntx )
|
||||
// Update the context with optimized level-1v kernels.
|
||||
bli_cntx_set_l1v_kers
|
||||
(
|
||||
20,
|
||||
24,
|
||||
|
||||
// amaxv
|
||||
BLIS_AMAXV_KER, BLIS_FLOAT, bli_samaxv_zen_int,
|
||||
BLIS_AMAXV_KER, BLIS_DOUBLE, bli_damaxv_zen_int,
|
||||
// axpyv
|
||||
|
||||
// axpbyv
|
||||
BLIS_AXPBYV_KER, BLIS_FLOAT, bli_saxpbyv_zen_int10,
|
||||
BLIS_AXPBYV_KER, BLIS_DOUBLE, bli_daxpbyv_zen_int10,
|
||||
BLIS_AXPBYV_KER, BLIS_SCOMPLEX, bli_caxpbyv_zen_int,
|
||||
BLIS_AXPBYV_KER, BLIS_DCOMPLEX, bli_zaxpbyv_zen_int,
|
||||
|
||||
// axpyv
|
||||
BLIS_AXPYV_KER, BLIS_FLOAT, bli_saxpyv_zen_int10,
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
target_sources("${PROJECT_NAME}"
|
||||
PRIVATE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/bli_amaxv_zen_int.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/bli_axpbyv_zen_int.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/bli_axpbyv_zen_int10.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/bli_axpyv_zen_int.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/bli_axpyv_zen_int10.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/bli_copyv_zen_int.c
|
||||
|
||||
1099
kernels/zen/1/bli_axpbyv_zen_int.c
Normal file
1099
kernels/zen/1/bli_axpbyv_zen_int.c
Normal file
File diff suppressed because it is too large
Load Diff
709
kernels/zen/1/bli_axpbyv_zen_int10.c
Normal file
709
kernels/zen/1/bli_axpbyv_zen_int10.c
Normal file
@@ -0,0 +1,709 @@
|
||||
/*
|
||||
|
||||
BLIS
|
||||
An object-based framework for developing high-performance BLAS-like
|
||||
libraries.
|
||||
|
||||
Copyright (C) 2022, Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
- Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
- Neither the name(s) of the copyright holder(s) nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#include "immintrin.h"
|
||||
#include "blis.h"
|
||||
|
||||
/* Union DS to access AVX registers */
|
||||
/* One 256-bit AVX register holds 8 SP elements */
|
||||
typedef union
|
||||
{
|
||||
__m256 v;
|
||||
float f[8] __attribute__((aligned(64)));
|
||||
} v8sf_t;
|
||||
|
||||
/* One 256-bit AVX register holds 4 DP elements */
|
||||
typedef union
|
||||
{
|
||||
__m256d v;
|
||||
double d[4] __attribute__((aligned(64)));
|
||||
} v4df_t;
|
||||
|
||||
/**
|
||||
* saxpbyv kernel performs the axpbyv operation.
|
||||
* y := beta * y + alpha * conjx(x)
|
||||
* where,
|
||||
* x & y are single precision vectors of length n.
|
||||
* alpha & beta are scalers.
|
||||
*/
|
||||
void bli_saxpbyv_zen_int10
|
||||
(
|
||||
conj_t conjx,
|
||||
dim_t n,
|
||||
float* restrict alpha,
|
||||
float* restrict x, inc_t incx,
|
||||
float* restrict beta,
|
||||
float* restrict y, inc_t incy,
|
||||
cntx_t* restrict cntx
|
||||
)
|
||||
{
|
||||
AOCL_DTL_TRACE_ENTRY(AOCL_DTL_LEVEL_TRACE_4)
|
||||
const dim_t n_elem_per_reg = 8; // number of elements per register
|
||||
|
||||
dim_t i; // iterator
|
||||
|
||||
float* restrict x0;
|
||||
float* restrict y0;
|
||||
|
||||
v8sf_t alphav;
|
||||
v8sf_t betav;
|
||||
v8sf_t yv[10];
|
||||
|
||||
/* if the vector dimension is zero, or if alpha & beta are zero,
|
||||
return early. */
|
||||
if ( bli_zero_dim1( n ) ||
|
||||
( PASTEMAC( s, eq0 )( *alpha ) && PASTEMAC( s, eq0 )( *beta ) ) )
|
||||
{
|
||||
AOCL_DTL_TRACE_EXIT(AOCL_DTL_LEVEL_TRACE_4)
|
||||
return;
|
||||
}
|
||||
|
||||
// initialize local pointers
|
||||
x0 = x;
|
||||
y0 = y;
|
||||
|
||||
if ( incx == 1 && incy == 1 )
|
||||
{
|
||||
// broadcast alpha & beta to all elements of respective vector registers
|
||||
alphav.v = _mm256_broadcast_ss( alpha );
|
||||
betav.v = _mm256_broadcast_ss( beta );
|
||||
|
||||
// Processing 80 elements per loop, 10 FMAs
|
||||
for ( i = 0; ( i + 79 ) < n; i += 80 )
|
||||
{
|
||||
// loading input values
|
||||
yv[0].v = _mm256_loadu_ps( y0 + 0*n_elem_per_reg );
|
||||
yv[1].v = _mm256_loadu_ps( y0 + 1*n_elem_per_reg );
|
||||
yv[2].v = _mm256_loadu_ps( y0 + 2*n_elem_per_reg );
|
||||
yv[3].v = _mm256_loadu_ps( y0 + 3*n_elem_per_reg );
|
||||
yv[4].v = _mm256_loadu_ps( y0 + 4*n_elem_per_reg );
|
||||
yv[5].v = _mm256_loadu_ps( y0 + 5*n_elem_per_reg );
|
||||
yv[6].v = _mm256_loadu_ps( y0 + 6*n_elem_per_reg );
|
||||
yv[7].v = _mm256_loadu_ps( y0 + 7*n_elem_per_reg );
|
||||
yv[8].v = _mm256_loadu_ps( y0 + 8*n_elem_per_reg );
|
||||
yv[9].v = _mm256_loadu_ps( y0 + 9*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
yv[0].v = _mm256_mul_ps( betav.v, yv[0].v );
|
||||
yv[1].v = _mm256_mul_ps( betav.v, yv[1].v );
|
||||
yv[2].v = _mm256_mul_ps( betav.v, yv[2].v );
|
||||
yv[3].v = _mm256_mul_ps( betav.v, yv[3].v );
|
||||
yv[4].v = _mm256_mul_ps( betav.v, yv[4].v );
|
||||
yv[5].v = _mm256_mul_ps( betav.v, yv[5].v );
|
||||
yv[6].v = _mm256_mul_ps( betav.v, yv[6].v );
|
||||
yv[7].v = _mm256_mul_ps( betav.v, yv[7].v );
|
||||
yv[8].v = _mm256_mul_ps( betav.v, yv[8].v );
|
||||
yv[9].v = _mm256_mul_ps( betav.v, yv[9].v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
yv[0].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 0*n_elem_per_reg ),
|
||||
yv[0].v
|
||||
);
|
||||
yv[1].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 1*n_elem_per_reg ),
|
||||
yv[1].v
|
||||
);
|
||||
yv[2].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 2*n_elem_per_reg ),
|
||||
yv[2].v
|
||||
);
|
||||
yv[3].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 3*n_elem_per_reg ),
|
||||
yv[3].v
|
||||
);
|
||||
yv[4].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 4*n_elem_per_reg ),
|
||||
yv[4].v
|
||||
);
|
||||
yv[5].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 5*n_elem_per_reg ),
|
||||
yv[5].v
|
||||
);
|
||||
yv[6].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 6*n_elem_per_reg ),
|
||||
yv[6].v
|
||||
);
|
||||
yv[7].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 7*n_elem_per_reg ),
|
||||
yv[7].v
|
||||
);
|
||||
yv[8].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 8*n_elem_per_reg ),
|
||||
yv[8].v
|
||||
);
|
||||
yv[9].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 9*n_elem_per_reg ),
|
||||
yv[9].v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_ps( ( y0 + 0*n_elem_per_reg ), yv[0].v );
|
||||
_mm256_storeu_ps( ( y0 + 1*n_elem_per_reg ), yv[1].v );
|
||||
_mm256_storeu_ps( ( y0 + 2*n_elem_per_reg ), yv[2].v );
|
||||
_mm256_storeu_ps( ( y0 + 3*n_elem_per_reg ), yv[3].v );
|
||||
_mm256_storeu_ps( ( y0 + 4*n_elem_per_reg ), yv[4].v );
|
||||
_mm256_storeu_ps( ( y0 + 5*n_elem_per_reg ), yv[5].v );
|
||||
_mm256_storeu_ps( ( y0 + 6*n_elem_per_reg ), yv[6].v );
|
||||
_mm256_storeu_ps( ( y0 + 7*n_elem_per_reg ), yv[7].v );
|
||||
_mm256_storeu_ps( ( y0 + 8*n_elem_per_reg ), yv[8].v );
|
||||
_mm256_storeu_ps( ( y0 + 9*n_elem_per_reg ), yv[9].v );
|
||||
|
||||
x0 += 10 * n_elem_per_reg;
|
||||
y0 += 10 * n_elem_per_reg;
|
||||
}
|
||||
|
||||
// Processing 40 elements per loop, 5 FMAs
|
||||
for ( ; ( i + 39 ) < n; i += 40 )
|
||||
{
|
||||
// loading input values
|
||||
yv[0].v = _mm256_loadu_ps( y0 + 0*n_elem_per_reg );
|
||||
yv[1].v = _mm256_loadu_ps( y0 + 1*n_elem_per_reg );
|
||||
yv[2].v = _mm256_loadu_ps( y0 + 2*n_elem_per_reg );
|
||||
yv[3].v = _mm256_loadu_ps( y0 + 3*n_elem_per_reg );
|
||||
yv[4].v = _mm256_loadu_ps( y0 + 4*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
yv[0].v = _mm256_mul_ps( betav.v, yv[0].v );
|
||||
yv[1].v = _mm256_mul_ps( betav.v, yv[1].v );
|
||||
yv[2].v = _mm256_mul_ps( betav.v, yv[2].v );
|
||||
yv[3].v = _mm256_mul_ps( betav.v, yv[3].v );
|
||||
yv[4].v = _mm256_mul_ps( betav.v, yv[4].v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
yv[0].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 0*n_elem_per_reg ),
|
||||
yv[0].v
|
||||
);
|
||||
yv[1].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 1*n_elem_per_reg ),
|
||||
yv[1].v
|
||||
);
|
||||
yv[2].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 2*n_elem_per_reg ),
|
||||
yv[2].v
|
||||
);
|
||||
yv[3].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 3*n_elem_per_reg ),
|
||||
yv[3].v
|
||||
);
|
||||
yv[4].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 4*n_elem_per_reg ),
|
||||
yv[4].v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_ps( ( y0 + 0*n_elem_per_reg ), yv[0].v );
|
||||
_mm256_storeu_ps( ( y0 + 1*n_elem_per_reg ), yv[1].v );
|
||||
_mm256_storeu_ps( ( y0 + 2*n_elem_per_reg ), yv[2].v );
|
||||
_mm256_storeu_ps( ( y0 + 3*n_elem_per_reg ), yv[3].v );
|
||||
_mm256_storeu_ps( ( y0 + 4*n_elem_per_reg ), yv[4].v );
|
||||
|
||||
x0 += 5 * n_elem_per_reg;
|
||||
y0 += 5 * n_elem_per_reg;
|
||||
}
|
||||
|
||||
// Processing 32 elements per loop, 4 FMAs
|
||||
for ( ; ( i + 31 ) < n; i += 32 )
|
||||
{
|
||||
// loading input values
|
||||
yv[0].v = _mm256_loadu_ps( y0 + 0*n_elem_per_reg );
|
||||
yv[1].v = _mm256_loadu_ps( y0 + 1*n_elem_per_reg );
|
||||
yv[2].v = _mm256_loadu_ps( y0 + 2*n_elem_per_reg );
|
||||
yv[3].v = _mm256_loadu_ps( y0 + 3*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
yv[0].v = _mm256_mul_ps( betav.v, yv[0].v );
|
||||
yv[1].v = _mm256_mul_ps( betav.v, yv[1].v );
|
||||
yv[2].v = _mm256_mul_ps( betav.v, yv[2].v );
|
||||
yv[3].v = _mm256_mul_ps( betav.v, yv[3].v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
yv[0].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 0*n_elem_per_reg ),
|
||||
yv[0].v
|
||||
);
|
||||
yv[1].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 1*n_elem_per_reg ),
|
||||
yv[1].v
|
||||
);
|
||||
yv[2].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 2*n_elem_per_reg ),
|
||||
yv[2].v
|
||||
);
|
||||
yv[3].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 3*n_elem_per_reg ),
|
||||
yv[3].v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_ps( ( y0 + 0*n_elem_per_reg ), yv[0].v );
|
||||
_mm256_storeu_ps( ( y0 + 1*n_elem_per_reg ), yv[1].v );
|
||||
_mm256_storeu_ps( ( y0 + 2*n_elem_per_reg ), yv[2].v );
|
||||
_mm256_storeu_ps( ( y0 + 3*n_elem_per_reg ), yv[3].v );
|
||||
|
||||
x0 += 4 * n_elem_per_reg;
|
||||
y0 += 4 * n_elem_per_reg;
|
||||
}
|
||||
|
||||
// Processing 16 elements per loop, 2 FMAs
|
||||
for ( ; ( i + 15 ) < n; i += 16 )
|
||||
{
|
||||
// loading input values
|
||||
yv[0].v = _mm256_loadu_ps( y0 + 0*n_elem_per_reg );
|
||||
yv[1].v = _mm256_loadu_ps( y0 + 1*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
yv[0].v = _mm256_mul_ps( betav.v, yv[0].v );
|
||||
yv[1].v = _mm256_mul_ps( betav.v, yv[1].v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
yv[0].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 0*n_elem_per_reg ),
|
||||
yv[0].v
|
||||
);
|
||||
yv[1].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 1*n_elem_per_reg ),
|
||||
yv[1].v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_ps( ( y0 + 0*n_elem_per_reg ), yv[0].v );
|
||||
_mm256_storeu_ps( ( y0 + 1*n_elem_per_reg ), yv[1].v );
|
||||
|
||||
x0 += 2 * n_elem_per_reg;
|
||||
y0 += 2 * n_elem_per_reg;
|
||||
}
|
||||
|
||||
// Processing 8 elements per loop, 1 FMA
|
||||
for ( ; ( i + 7 ) < n; i += 8 )
|
||||
{
|
||||
// loading input values
|
||||
yv[0].v = _mm256_loadu_ps( y0 + 0*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
yv[0].v = _mm256_mul_ps( betav.v, yv[0].v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
yv[0].v = _mm256_fmadd_ps
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_ps( x0 + 0*n_elem_per_reg ),
|
||||
yv[0].v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_ps( ( y0 + 0*n_elem_per_reg ), yv[0].v );
|
||||
|
||||
x0 += 1 * n_elem_per_reg;
|
||||
y0 += 1 * n_elem_per_reg;
|
||||
}
|
||||
|
||||
// Issue vzeroupper instruction to clear upper lanes of ymm registers.
|
||||
// This avoids a performance penalty caused by false dependencies when
|
||||
// transitioning from from AVX to SSE instructions (which may occur
|
||||
// as soon as the n_left cleanup loop below if BLIS is compiled with
|
||||
// -mfpmath=sse).
|
||||
_mm256_zeroupper();
|
||||
|
||||
// if there are leftover iterations, perform them with scaler code
|
||||
for ( ; i < n; i++ )
|
||||
{
|
||||
*y0 = ( (*alpha) * (*x0) ) + ( (*beta) * (*y0) );
|
||||
|
||||
x0 += incx;
|
||||
y0 += incy;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// for non-unit increments, use scaler code
|
||||
for ( i = 0; i < n; ++i )
|
||||
{
|
||||
*y0 = ( (*alpha) * (*x0) ) + ( (*beta) * (*y0) );
|
||||
|
||||
x0 += incx;
|
||||
y0 += incy;
|
||||
}
|
||||
}
|
||||
AOCL_DTL_TRACE_EXIT(AOCL_DTL_LEVEL_TRACE_4)
|
||||
}
|
||||
|
||||
/**
|
||||
* daxpbyv kernel performs the axpbyv operation.
|
||||
* y := beta * y + alpha * conjx(x)
|
||||
* where,
|
||||
* x & y are double precision vectors of length n.
|
||||
* alpha & beta are scalers.
|
||||
*/
|
||||
void bli_daxpbyv_zen_int10
|
||||
(
|
||||
conj_t conjx,
|
||||
dim_t n,
|
||||
double* restrict alpha,
|
||||
double* restrict x, inc_t incx,
|
||||
double* restrict beta,
|
||||
double* restrict y, inc_t incy,
|
||||
cntx_t* restrict cntx
|
||||
)
|
||||
{
|
||||
AOCL_DTL_TRACE_ENTRY(AOCL_DTL_LEVEL_TRACE_4)
|
||||
const dim_t n_elem_per_reg = 4; // number of elements per register
|
||||
const dim_t n_iter_unroll = 10; // number of registers per iteration
|
||||
|
||||
dim_t i; // iterator
|
||||
|
||||
double* restrict x0;
|
||||
double* restrict y0;
|
||||
|
||||
v4df_t alphav;
|
||||
v4df_t betav;
|
||||
v4df_t y0v, y1v, y2v, y3v, y4v, y5v, y6v, y7v, y8v, y9v;
|
||||
|
||||
/* if the vector dimension is zero, or if alpha & beta are zero,
|
||||
return early. */
|
||||
if ( bli_zero_dim1( n ) ||
|
||||
( PASTEMAC( s, eq0 )( *alpha ) && PASTEMAC( s, eq0 )( *beta ) ) )
|
||||
{
|
||||
AOCL_DTL_TRACE_EXIT(AOCL_DTL_LEVEL_TRACE_4)
|
||||
return;
|
||||
}
|
||||
|
||||
// initialize local pointers
|
||||
x0 = x;
|
||||
y0 = y;
|
||||
|
||||
if ( incx == 1 && incy == 1 )
|
||||
{
|
||||
// broadcast alpha & beta to all elements of respective vector registers
|
||||
alphav.v = _mm256_broadcast_sd( alpha );
|
||||
betav.v = _mm256_broadcast_sd( beta );
|
||||
|
||||
// Using 10 FMAs per loop
|
||||
for ( i = 0; ( i + 39 ) < n; i += 40 )
|
||||
{
|
||||
// loading input y
|
||||
y0v.v = _mm256_loadu_pd( y0 + 0*n_elem_per_reg );
|
||||
y1v.v = _mm256_loadu_pd( y0 + 1*n_elem_per_reg );
|
||||
y2v.v = _mm256_loadu_pd( y0 + 2*n_elem_per_reg );
|
||||
y3v.v = _mm256_loadu_pd( y0 + 3*n_elem_per_reg );
|
||||
y4v.v = _mm256_loadu_pd( y0 + 4*n_elem_per_reg );
|
||||
y5v.v = _mm256_loadu_pd( y0 + 5*n_elem_per_reg );
|
||||
y6v.v = _mm256_loadu_pd( y0 + 6*n_elem_per_reg );
|
||||
y7v.v = _mm256_loadu_pd( y0 + 7*n_elem_per_reg );
|
||||
y8v.v = _mm256_loadu_pd( y0 + 8*n_elem_per_reg );
|
||||
y9v.v = _mm256_loadu_pd( y0 + 9*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
y0v.v = _mm256_mul_pd( betav.v, y0v.v );
|
||||
y1v.v = _mm256_mul_pd( betav.v, y1v.v );
|
||||
y2v.v = _mm256_mul_pd( betav.v, y2v.v );
|
||||
y3v.v = _mm256_mul_pd( betav.v, y3v.v );
|
||||
y4v.v = _mm256_mul_pd( betav.v, y4v.v );
|
||||
y5v.v = _mm256_mul_pd( betav.v, y5v.v );
|
||||
y6v.v = _mm256_mul_pd( betav.v, y6v.v );
|
||||
y7v.v = _mm256_mul_pd( betav.v, y7v.v );
|
||||
y8v.v = _mm256_mul_pd( betav.v, y8v.v );
|
||||
y9v.v = _mm256_mul_pd( betav.v, y9v.v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
// := beta * y + alpha * x
|
||||
y0v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 0*n_elem_per_reg ),
|
||||
y0v.v
|
||||
);
|
||||
y1v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 1*n_elem_per_reg ),
|
||||
y1v.v
|
||||
);
|
||||
y2v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 2*n_elem_per_reg ),
|
||||
y2v.v
|
||||
);
|
||||
y3v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 3*n_elem_per_reg ),
|
||||
y3v.v
|
||||
);
|
||||
y4v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 4*n_elem_per_reg ),
|
||||
y4v.v
|
||||
);
|
||||
y5v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 5*n_elem_per_reg ),
|
||||
y5v.v
|
||||
);
|
||||
y6v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 6*n_elem_per_reg ),
|
||||
y6v.v
|
||||
);
|
||||
y7v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 7*n_elem_per_reg ),
|
||||
y7v.v
|
||||
);
|
||||
y8v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 8*n_elem_per_reg ),
|
||||
y8v.v
|
||||
);
|
||||
y9v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 9*n_elem_per_reg ),
|
||||
y9v.v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_pd( ( y0 + 0*n_elem_per_reg ), y0v.v );
|
||||
_mm256_storeu_pd( ( y0 + 1*n_elem_per_reg ), y1v.v );
|
||||
_mm256_storeu_pd( ( y0 + 2*n_elem_per_reg ), y2v.v );
|
||||
_mm256_storeu_pd( ( y0 + 3*n_elem_per_reg ), y3v.v );
|
||||
_mm256_storeu_pd( ( y0 + 4*n_elem_per_reg ), y4v.v );
|
||||
_mm256_storeu_pd( ( y0 + 5*n_elem_per_reg ), y5v.v );
|
||||
_mm256_storeu_pd( ( y0 + 6*n_elem_per_reg ), y6v.v );
|
||||
_mm256_storeu_pd( ( y0 + 7*n_elem_per_reg ), y7v.v );
|
||||
_mm256_storeu_pd( ( y0 + 8*n_elem_per_reg ), y8v.v );
|
||||
_mm256_storeu_pd( ( y0 + 9*n_elem_per_reg ), y9v.v );
|
||||
|
||||
x0 += n_elem_per_reg * n_iter_unroll;
|
||||
y0 += n_elem_per_reg * n_iter_unroll;
|
||||
}
|
||||
|
||||
// Using 5 FMAs per loop
|
||||
for ( ; ( i + 19 ) < n; i += 20 )
|
||||
{
|
||||
// loading input y
|
||||
y0v.v = _mm256_loadu_pd( y0 + 0*n_elem_per_reg );
|
||||
y1v.v = _mm256_loadu_pd( y0 + 1*n_elem_per_reg );
|
||||
y2v.v = _mm256_loadu_pd( y0 + 2*n_elem_per_reg );
|
||||
y3v.v = _mm256_loadu_pd( y0 + 3*n_elem_per_reg );
|
||||
y4v.v = _mm256_loadu_pd( y0 + 4*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
y0v.v = _mm256_mul_pd( betav.v, y0v.v );
|
||||
y1v.v = _mm256_mul_pd( betav.v, y1v.v );
|
||||
y2v.v = _mm256_mul_pd( betav.v, y2v.v );
|
||||
y3v.v = _mm256_mul_pd( betav.v, y3v.v );
|
||||
y4v.v = _mm256_mul_pd( betav.v, y4v.v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
// := beta * y + alpha * x
|
||||
y0v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 0*n_elem_per_reg ),
|
||||
y0v.v
|
||||
);
|
||||
y1v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 1*n_elem_per_reg ),
|
||||
y1v.v
|
||||
);
|
||||
y2v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 2*n_elem_per_reg ),
|
||||
y2v.v
|
||||
);
|
||||
y3v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 3*n_elem_per_reg ),
|
||||
y3v.v
|
||||
);
|
||||
y4v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 4*n_elem_per_reg ),
|
||||
y4v.v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_pd( ( y0 + 0*n_elem_per_reg ), y0v.v );
|
||||
_mm256_storeu_pd( ( y0 + 1*n_elem_per_reg ), y1v.v );
|
||||
_mm256_storeu_pd( ( y0 + 2*n_elem_per_reg ), y2v.v );
|
||||
_mm256_storeu_pd( ( y0 + 3*n_elem_per_reg ), y3v.v );
|
||||
_mm256_storeu_pd( ( y0 + 4*n_elem_per_reg ), y4v.v );
|
||||
|
||||
x0 += n_elem_per_reg * 5;
|
||||
y0 += n_elem_per_reg * 5;
|
||||
}
|
||||
|
||||
// Using 2 FMAs per loop
|
||||
for ( ; ( i + 7 ) < n; i += 8 )
|
||||
{
|
||||
// loading input y
|
||||
y0v.v = _mm256_loadu_pd( y0 + 0*n_elem_per_reg );
|
||||
y1v.v = _mm256_loadu_pd( y0 + 1*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
y0v.v = _mm256_mul_pd( betav.v, y0v.v );
|
||||
y1v.v = _mm256_mul_pd( betav.v, y1v.v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
// := beta * y + alpha * x
|
||||
y0v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 0*n_elem_per_reg ),
|
||||
y0v.v
|
||||
);
|
||||
y1v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 1*n_elem_per_reg ),
|
||||
y1v.v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_pd( ( y0 + 0*n_elem_per_reg ), y0v.v );
|
||||
_mm256_storeu_pd( ( y0 + 1*n_elem_per_reg ), y1v.v );
|
||||
|
||||
x0 += n_elem_per_reg * 2;
|
||||
y0 += n_elem_per_reg * 2;
|
||||
}
|
||||
|
||||
// Using 1 FMAs per loop
|
||||
for ( ; ( i + 3 ) < n; i += 4 )
|
||||
{
|
||||
// loading input y
|
||||
y0v.v = _mm256_loadu_pd( y0 + 0*n_elem_per_reg );
|
||||
|
||||
// y' := y := beta * y
|
||||
y0v.v = _mm256_mul_pd( betav.v, y0v.v );
|
||||
|
||||
// y := y' + alpha * x
|
||||
// := beta * y + alpha * x
|
||||
y0v.v = _mm256_fmadd_pd
|
||||
(
|
||||
alphav.v,
|
||||
_mm256_loadu_pd( x0 + 0*n_elem_per_reg ),
|
||||
y0v.v
|
||||
);
|
||||
|
||||
// storing the output
|
||||
_mm256_storeu_pd( ( y0 + 0*n_elem_per_reg ), y0v.v );
|
||||
|
||||
x0 += n_elem_per_reg * 1;
|
||||
y0 += n_elem_per_reg * 1;
|
||||
}
|
||||
|
||||
// Issue vzeroupper instruction to clear upper lanes of ymm registers.
|
||||
// This avoids a performance penalty caused by false dependencies when
|
||||
// transitioning from from AVX to SSE instructions (which may occur
|
||||
// as soon as the n_left cleanup loop below if BLIS is compiled with
|
||||
// -mfpmath=sse).
|
||||
_mm256_zeroupper();
|
||||
|
||||
// if there are leftover iterations, perform them with scaler code
|
||||
for ( ; i < n; ++i )
|
||||
{
|
||||
*y0 = ( (*alpha) * (*x0) ) + ( (*beta) * (*y0) );
|
||||
|
||||
x0 += incx;
|
||||
y0 += incy;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// for non-unit increments, use scaler code
|
||||
for ( i = 0; i < n; ++i )
|
||||
{
|
||||
*y0 = ( (*alpha) * (*x0) ) + ( (*beta) * (*y0) );
|
||||
|
||||
x0 += incx;
|
||||
y0 += incy;
|
||||
}
|
||||
}
|
||||
AOCL_DTL_TRACE_EXIT(AOCL_DTL_LEVEL_TRACE_4)
|
||||
}
|
||||
@@ -56,6 +56,16 @@ AMAXV_KER_PROT( float, s, amaxv_zen_int_avx512 )
|
||||
AMAXV_KER_PROT( double, d, amaxv_zen_int )
|
||||
AMAXV_KER_PROT( double, d, amaxv_zen_int_avx512 )
|
||||
|
||||
// axpbyv (intrinsics)
|
||||
AXPBYV_KER_PROT( float, s, axpbyv_zen_int )
|
||||
AXPBYV_KER_PROT( double, d, axpbyv_zen_int )
|
||||
AXPBYV_KER_PROT( scomplex, c, axpbyv_zen_int )
|
||||
AXPBYV_KER_PROT( dcomplex, z, axpbyv_zen_int )
|
||||
|
||||
// axpbyv (intrinsics, unrolled x10)
|
||||
AXPBYV_KER_PROT( float, s, axpbyv_zen_int10 )
|
||||
AXPBYV_KER_PROT( double, d, axpbyv_zen_int10 )
|
||||
|
||||
// axpyv (intrinsics)
|
||||
AXPYV_KER_PROT( float, s, axpyv_zen_int )
|
||||
AXPYV_KER_PROT( double, d, axpyv_zen_int )
|
||||
|
||||
Reference in New Issue
Block a user