/*************************************************************************************************** * Copyright (c) 2017 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/functional.h" #include "cutlass/numeric_types.h" #include "cutlass/platform/platform.h" namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Statically sized array for any data type template < typename T, int N, bool RegisterSized = sizeof_bits::value >= 32 > struct Array; namespace detail { template struct is_Array : platform::false_type {}; template < typename T, int N, bool RegisterSized > struct is_Array > : platform::true_type {}; template constexpr bool is_Array_v = is_Array::value; } // namespace detail //////////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the size of an Array<> in bits template struct sizeof_bits > { static constexpr int value = sizeof(Array) * 8; }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if the argument is a power of 2 CUTLASS_HOST_DEVICE constexpr bool ispow2(unsigned x) { return x && (!(x & (x - 1))); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Returns the largest power of two not greater than the argument. CUTLASS_HOST_DEVICE constexpr unsigned floor_pow_2(unsigned x) { return (x == 0 || ispow2(x)) ? x : ((floor_pow_2(x >> 1)) << 1); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Statically sized array for any data type template < typename T, int N > struct Array { /// Storage type using Storage = T; /// Element type using Element = T; /// Number of storage elements //static std::size_t const kStorageElements = N; static constexpr size_t kStorageElements = N; /// Number of logical elements static constexpr size_t kElements = N; // // C++ standard members // typedef T value_type; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef value_type &reference; typedef value_type const & const_reference; typedef value_type *pointer; typedef value_type const * const_pointer; // // Iterators // /// Bidirectional iterator over elements class iterator { /// Pointer to object T *ptr_; public: CUTLASS_HOST_DEVICE iterator(): ptr_(nullptr) { } CUTLASS_HOST_DEVICE iterator(T *_ptr): ptr_(_ptr) { } CUTLASS_HOST_DEVICE iterator &operator++() { ++ptr_; return *this; } CUTLASS_HOST_DEVICE iterator &operator--() { --ptr_; return *this; } CUTLASS_HOST_DEVICE iterator operator++(int) { iterator ret(*this); ++ptr_; return ret; } CUTLASS_HOST_DEVICE iterator operator--(int) { iterator ret(*this); --ptr_; return ret; } CUTLASS_HOST_DEVICE T &operator*() const { return *ptr_; } CUTLASS_HOST_DEVICE bool operator==(iterator const &other) const { return ptr_ == other.ptr_; } CUTLASS_HOST_DEVICE bool operator!=(iterator const &other) const { return ptr_ != other.ptr_; } }; /// Bidirectional constant iterator over elements class const_iterator { /// Pointer to object const T *ptr_; public: CUTLASS_HOST_DEVICE const_iterator(): ptr_(nullptr) { } CUTLASS_HOST_DEVICE const_iterator(T const *_ptr): ptr_(_ptr) { } CUTLASS_HOST_DEVICE const_iterator &operator++() { ++ptr_; return *this; } CUTLASS_HOST_DEVICE const_iterator &operator--() { --ptr_; return *this; } CUTLASS_HOST_DEVICE const_iterator operator++(int) { const_iterator ret(*this); ++ptr_; return ret; } CUTLASS_HOST_DEVICE const_iterator operator--(int) { const_iterator ret(*this); --ptr_; return ret; } CUTLASS_HOST_DEVICE T const &operator*() const { return *ptr_; } CUTLASS_HOST_DEVICE bool operator==(const_iterator const &other) const { return ptr_ == other.ptr_; } CUTLASS_HOST_DEVICE bool operator!=(const_iterator const &other) const { return ptr_ != other.ptr_; } }; /// Bidirectional iterator over elements class reverse_iterator { /// Pointer to object T *ptr_; public: CUTLASS_HOST_DEVICE reverse_iterator(): ptr_(nullptr) { } CUTLASS_HOST_DEVICE reverse_iterator(T *_ptr): ptr_(_ptr) { } CUTLASS_HOST_DEVICE reverse_iterator &operator++() { --ptr_; return *this; } CUTLASS_HOST_DEVICE reverse_iterator &operator--() { ++ptr_; return *this; } CUTLASS_HOST_DEVICE reverse_iterator operator++(int) { iterator ret(*this); --ptr_; return ret; } CUTLASS_HOST_DEVICE reverse_iterator operator--(int) { iterator ret(*this); ++ptr_; return ret; } CUTLASS_HOST_DEVICE T &operator*() const { return *(ptr_ - 1); } CUTLASS_HOST_DEVICE bool operator==(reverse_iterator const &other) const { return ptr_ == other.ptr_; } CUTLASS_HOST_DEVICE bool operator!=(reverse_iterator const &other) const { return ptr_ != other.ptr_; } }; /// Bidirectional constant iterator over elements class const_reverse_iterator { /// Pointer to object T const *ptr_; public: CUTLASS_HOST_DEVICE const_reverse_iterator(): ptr_(nullptr) { } CUTLASS_HOST_DEVICE const_reverse_iterator(T const *_ptr): ptr_(_ptr) { } CUTLASS_HOST_DEVICE const_reverse_iterator &operator++() { --ptr_; return *this; } CUTLASS_HOST_DEVICE const_reverse_iterator &operator--() { ++ptr_; return *this; } CUTLASS_HOST_DEVICE const_reverse_iterator operator++(int) { const_reverse_iterator ret(*this); --ptr_; return ret; } CUTLASS_HOST_DEVICE const_reverse_iterator operator--(int) { const_reverse_iterator ret(*this); ++ptr_; return ret; } CUTLASS_HOST_DEVICE T const &operator*() const { return *(ptr_ - 1); } CUTLASS_HOST_DEVICE bool operator==(const_iterator const &other) const { return ptr_ == other.ptr_; } CUTLASS_HOST_DEVICE bool operator!=(const_iterator const &other) const { return ptr_ != other.ptr_; } }; /// Internal storage Storage storage[kElements]; /// Efficient clear method CUTLASS_HOST_DEVICE void clear() { fill(T(0)); } CUTLASS_HOST_DEVICE reference at(size_type pos) { return reinterpret_cast(storage[pos]); } CUTLASS_HOST_DEVICE const_reference at(size_type pos) const { return reinterpret_cast(storage[pos]); } CUTLASS_HOST_DEVICE reference operator[](size_type pos) { return reinterpret_cast(storage[pos]); } CUTLASS_HOST_DEVICE const_reference operator[](size_type pos) const { return reinterpret_cast(storage[pos]); } CUTLASS_HOST_DEVICE reference front() { return reinterpret_cast(storage[0]); } CUTLASS_HOST_DEVICE const_reference front() const { return reinterpret_cast(storage[0]); } CUTLASS_HOST_DEVICE reference back() { return reinterpret_cast(storage[kStorageElements - 1]); } CUTLASS_HOST_DEVICE const_reference back() const { return reinterpret_cast(storage[kStorageElements - 1]); } CUTLASS_HOST_DEVICE pointer data() { return reinterpret_cast(storage); } CUTLASS_HOST_DEVICE const_pointer data() const { return reinterpret_cast(storage); } CUTLASS_HOST_DEVICE pointer raw_data() { return reinterpret_cast(storage); } CUTLASS_HOST_DEVICE const_pointer raw_data() const { return reinterpret_cast(storage); } CUTLASS_HOST_DEVICE constexpr bool empty() const { return !kElements; } CUTLASS_HOST_DEVICE constexpr size_type size() const { return kElements; } CUTLASS_HOST_DEVICE constexpr size_type max_size() const { return kElements; } CUTLASS_HOST_DEVICE void fill(T const &value) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < int(kElements); ++i) { storage[i] = static_cast(value); } } CUTLASS_HOST_DEVICE iterator begin() { return iterator(storage); } CUTLASS_HOST_DEVICE const_iterator begin() const { return cbegin(); } CUTLASS_HOST_DEVICE const_iterator cbegin() const { return const_iterator(storage); } CUTLASS_HOST_DEVICE iterator end() { return iterator(reinterpret_cast(storage + kStorageElements)); } CUTLASS_HOST_DEVICE const_iterator end() const { return cend(); } CUTLASS_HOST_DEVICE const_iterator cend() const { return const_iterator(reinterpret_cast(storage + kStorageElements)); } CUTLASS_HOST_DEVICE reverse_iterator rbegin() { return reverse_iterator(reinterpret_cast(storage + kStorageElements)); } CUTLASS_HOST_DEVICE const_reverse_iterator rbegin() const { return crbegin(); } CUTLASS_HOST_DEVICE const_reverse_iterator crbegin() const { return const_reverse_iterator(reinterpret_cast(storage + kStorageElements)); } CUTLASS_HOST_DEVICE reverse_iterator rend() { return reverse_iterator(reinterpret_cast(storage)); } CUTLASS_HOST_DEVICE const_reverse_iterator rend() const { return crend(); } CUTLASS_HOST_DEVICE const_reverse_iterator crend() const { return const_reverse_iterator(reinterpret_cast(storage)); } // // Comparison operators // }; //////////////////////////////////////////////////////////////////////////////////////////////////// // Factories //////////////////////////////////////////////////////////////////////////////////////////////////// template CUTLASS_HOST_DEVICE Array make_Array(Element x) { return {x}; } template CUTLASS_HOST_DEVICE Array make_Array(Element x, Element y) { return {x,y}; } template CUTLASS_HOST_DEVICE Array make_Array(Element x, Element y, Element z) { return {x,y,z}; } template CUTLASS_HOST_DEVICE Array make_Array(Element x, Element y, Element z, Element w) { return {x,y,z,w}; } ///////////////////////////////////////////////////////////////////////////////////////////////// // functional.h numeric specializations ///////////////////////////////////////////////////////////////////////////////////////////////// template struct absolute_value_op< Array > { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs) const { Array result; absolute_value_op scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i]); } return result; } }; template struct plus> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, Array const &rhs) const { Array result; plus scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], rhs[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, T const &scalar) const { Array result; plus scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], scalar); } return result; } CUTLASS_HOST_DEVICE Array operator()( T const &scalar, Array const &rhs) const { Array result; plus scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, rhs[i]); } return result; } }; template struct minus> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, Array const &rhs) const { Array result; minus scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], rhs[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, T const &scalar) const { Array result; minus scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], scalar); } return result; } CUTLASS_HOST_DEVICE Array operator()( T const &scalar, Array const &rhs) const { Array result; minus scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, rhs[i]); } return result; } }; template struct multiplies> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, Array const &rhs) const { Array result; multiplies scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], rhs[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, T const &scalar) const { Array result; multiplies scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], scalar); } return result; } CUTLASS_HOST_DEVICE Array operator()( T const &scalar, Array const &rhs) const { Array result; multiplies scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, rhs[i]); } return result; } }; template struct maximum_absolute_value_reduction, PropogateNaN> { CUTLASS_HOST_DEVICE T operator() (T const& scalar, Array const& rhs) const { T result = scalar; maximum_absolute_value_reduction scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result = scalar_op(result, rhs[i]); } return result; } }; template struct scale> { T const scaling_factor_; CUTLASS_HOST_DEVICE scale(T scaling_factor) : scaling_factor_(scaling_factor) { } CUTLASS_HOST_DEVICE Array operator()(Array const & rhs) const { Array result; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = rhs[i] * scaling_factor_; } return result; } }; template struct divides> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, Array const &rhs) const { Array result; divides scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], rhs[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, T const &scalar) const { Array result; divides scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], scalar); } return result; } CUTLASS_HOST_DEVICE Array operator()( T const &scalar, Array const &rhs) const { Array result; divides scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, rhs[i]); } return result; } }; template struct reciprocal_approximate> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs) const { Array result; reciprocal_approximate scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i]); } return result; } }; template struct reciprocal_approximate_ftz> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs) const { Array result; reciprocal_approximate_ftz scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i]); } return result; } }; template struct maximum, PropagateNaN> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, Array const &rhs) const { Array result; maximum scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], rhs[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, T const &scalar) const { Array result; maximum scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], scalar); } return result; } CUTLASS_HOST_DEVICE Array operator()(T const &scalar, Array const &rhs) const { Array result; maximum scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, rhs[i]); } return result; } }; template struct minimum, PropagateNaN> { CUTLASS_HOST_DEVICE static T scalar_op(T const &lhs, T const &rhs) { return (rhs < lhs ? rhs : lhs); } CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, Array const &rhs) const { Array result; minimum scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], rhs[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, T const &scalar) const { Array result; minimum scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i], scalar); } return result; } CUTLASS_HOST_DEVICE Array operator()(T const &scalar, Array const &rhs) const { Array result; minimum scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, rhs[i]); } return result; } }; template struct minimum_with_nan_propagation> : minimum, true> {}; template struct negate> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs) const { Array result; negate scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(lhs[i]); } return result; } }; /// Fused multiply-add template struct multiply_add, Array, Array> { CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b, Array const &c) const { Array result; multiply_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], b[i], c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &a, T const &scalar, Array const &c) const { Array result; multiply_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], scalar, c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(T const &scalar, Array const &b, Array const &c) const { Array result; multiply_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, b[i], c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b, T const &scalar) const { Array result; multiply_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], b[i], scalar); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &a, T const &scalar_b, T const &scalar_c) const { Array result; multiply_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], scalar_b, scalar_c); } return result; } }; /// Fused square-and-plus template struct square_and_plus> { CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, Array const &rhs) const { multiply_add, Array, Array> ma_op; return ma_op(rhs, rhs, lhs); } CUTLASS_HOST_DEVICE Array operator()(Array const &lhs, T const &rhs) const { plus> plus_op; multiplies multiplies_op; return plus_op(multiplies_op(rhs, rhs), lhs); } }; /// Inverse-square-root template struct inverse_square_root> { CUTLASS_HOST_DEVICE Array operator()(Array const &a) const { Array result; inverse_square_root scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i]); } return result; } }; template struct inverse_square_root> { CUTLASS_HOST_DEVICE Array operator()(Array const & a) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = h2rsqrt(a_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&a); __half d_residual = hrsqrt(a_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else inverse_square_root scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i]); } #endif return result; } }; /// Fused multiply-add-relu0 template struct multiply_add_relu0, Array, Array> { CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b, Array const &c) const { Array result; multiply_add scalar_op; maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(scalar_op(a[i], b[i], c[i]), T(0)); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &a, T const &scalar, Array const &c) const { Array result; multiply_add scalar_op; maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(scalar_op(a[i], scalar, c[i]), T(0)); } return result; } CUTLASS_HOST_DEVICE Array operator()(T const &scalar, Array const &b, Array const &c) const { Array result; multiply_add scalar_op; maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(scalar_op(scalar, b[i], c[i]), T(0)); } return result; } }; template struct conjugate > { CUTLASS_HOST_DEVICE Array operator()(Array const &a) const { conjugate conj_op; Array ca; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { ca[i] = conj_op(a[i]); } return ca; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // functional.h numeric specializations targeting SIMD instructions in device code. ///////////////////////////////////////////////////////////////////////////////////////////////// template struct plus> { CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hadd2(lhs_ptr[i], rhs_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = __hadd(a_residual_ptr[N - 1], b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs[i] + rhs[i]; } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(half_t const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs)); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hadd2(lhs_pair, rhs_ptr[i]); } if constexpr (N % 2) { __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = __hadd(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs + rhs[i]; } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, half_t const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hadd2(lhs_ptr[i], rhs_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half d_residual = __hadd(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs)); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs[i] + rhs; } #endif return result; } }; template struct minus> { CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hsub2(lhs_ptr[i], rhs_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = __hsub(a_residual_ptr[N - 1], b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs[i] - rhs[i]; } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(half_t const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs)); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hsub2(lhs_pair, rhs_ptr[i]); } if constexpr (N % 2) { __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = __hsub(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs - rhs[i]; } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, half_t const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hsub2(lhs_ptr[i], rhs_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half d_residual = __hsub(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs)); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs[i] - rhs; } #endif return result; } }; template struct multiplies> { CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hmul2(lhs_ptr[i], rhs_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = __hmul(a_residual_ptr[N - 1], b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs[i] * rhs[i]; } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(half_t const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs)); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hmul2(lhs_pair, rhs_ptr[i]); } if constexpr (N % 2) { __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = __hmul( reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs * rhs[i]; } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, half_t const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hmul2(lhs_ptr[i], rhs_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half d_residual = __hmul( a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs)); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs[i] * rhs; } #endif return result; } }; template struct divides> { CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __h2div(lhs_ptr[i], rhs_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = __hdiv( a_residual_ptr[N - 1], b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs[i] / rhs[i]; } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(half_t const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs)); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __h2div(lhs_pair, rhs_ptr[i]); } if constexpr (N % 2) { __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = __hdiv( reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs / rhs[i]; } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, half_t const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __h2div(lhs_ptr[i], rhs_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half d_residual = __hdiv( a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs)); result[N - 1] = reinterpret_cast(d_residual); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = lhs[i] / rhs; } #endif return result; } }; template struct negate> { CUTLASS_HOST_DEVICE Array operator()(Array const & lhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *source_ptr = reinterpret_cast<__half2 const *>(&lhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hneg2(source_ptr[i]); } if constexpr (N % 2) { half_t x = -lhs[N - 1]; __half lhs_val = reinterpret_cast<__half const &>(x); result[N - 1] = reinterpret_cast(lhs_val); } #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = -lhs[i]; } #endif return result; } }; /// Fused multiply-add template struct multiply_add, Array, Array> { CUTLASS_HOST_DEVICE Array operator()( Array const &a, Array const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a); __half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b); __half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2(a_ptr[i], b_ptr[i], c_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&a); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&b); __half const *c_residual_ptr = reinterpret_cast<__half const *>(&c); __half d_residual = __hfma( a_residual_ptr[N - 1], b_residual_ptr[N - 1], c_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a[i], b[i], c[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( half_t const &a, Array const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 a_pair = __half2half2(reinterpret_cast<__half const &>(a)); __half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b); __half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2(a_pair, b_ptr[i], c_ptr[i]); } if constexpr (N % 2) { __half const *b_residual_ptr = reinterpret_cast<__half const *>(&b); __half const *c_residual_ptr = reinterpret_cast<__half const *>(&c); __half d_residual = __hfma( reinterpret_cast<__half const &>(a), b_residual_ptr[N - 1], c_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a, b[i], c[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( Array const &a, half_t const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a); __half2 b_pair = __half2half2(reinterpret_cast<__half const &>(b)); __half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2(a_ptr[i], b_pair, c_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&a); __half const *c_residual_ptr = reinterpret_cast<__half const *>(&c); __half d_residual = __hfma( a_residual_ptr[N - 1], reinterpret_cast<__half const &>(b), c_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a[i], b, c[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( Array const &a, Array const &b, half_t const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a); __half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b); __half2 c_pair = __half2half2(reinterpret_cast<__half const &>(c)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2(a_ptr[i], b_ptr[i], c_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&a); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&b); __half d_residual = __hfma( a_residual_ptr[N - 1], b_residual_ptr[N - 1], reinterpret_cast<__half const &>(c)); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a[i], b[i], c); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( Array const &a, half_t const &b, half_t const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a); __half2 b_pair = __half2half2(reinterpret_cast<__half const &>(b)); __half2 c_pair = __half2half2(reinterpret_cast<__half const &>(c)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2(a_ptr[i], b_pair, c_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&a); __half d_residual = __hfma( a_residual_ptr[N - 1], reinterpret_cast<__half const &>(b), reinterpret_cast<__half const &>(c)); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a[i], b, c); } #endif return result; } }; /// Fused multiply-add-relu0 template struct multiply_add_relu0, Array, Array> { CUTLASS_HOST_DEVICE Array operator()( Array const &a, Array const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a); __half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b); __half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2_relu(a_ptr[i], b_ptr[i], c_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&a); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&b); __half const *c_residual_ptr = reinterpret_cast<__half const *>(&c); __half d_residual = __hfma_relu( a_residual_ptr[N - 1], b_residual_ptr[N - 1], c_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(op(a[i], b[i], c[i]), (half_t)0); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( half_t const &a, Array const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 a_pair = __half2half2(reinterpret_cast<__half const &>(a)); __half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b); __half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2_relu(a_pair, b_ptr[i], c_ptr[i]); } if constexpr (N % 2) { __half const *b_residual_ptr = reinterpret_cast<__half const *>(&b); __half const *c_residual_ptr = reinterpret_cast<__half const *>(&c); __half d_residual = __hfma_relu( reinterpret_cast<__half const &>(a), b_residual_ptr[N - 1], c_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(op(a, b[i], c[i]), half_t(0)); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( Array const &a, half_t const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a); __half2 b_pair = __half2half2(reinterpret_cast<__half const &>(b)); __half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2_relu(a_ptr[i], b_pair, c_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&a); __half const *c_residual_ptr = reinterpret_cast<__half const *>(&c); __half d_residual = __hfma_relu( a_residual_ptr[N - 1], reinterpret_cast<__half const &>(b), c_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(op(a[i], b, c[i]), half_t(0)); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( Array const &a, Array const &b, half_t const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a); __half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b); __half2 c_pair = __half2half2(reinterpret_cast<__half const &>(c)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = __hfma2_relu(a_ptr[i], b_ptr[i], c_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&a); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&b); __half d_residual = __hfma_relu( a_residual_ptr[N - 1], b_residual_ptr[N - 1], reinterpret_cast<__half const &>(c)); result[N - 1] = reinterpret_cast(d_residual); } #else multiply_add op; maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(op(a[i], b[i], c), half_t(0)); } #endif return result; } }; template struct minimum, PropagateNaN> { CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = PropagateNaN ? __hmin2_nan(lhs_ptr[i], rhs_ptr[i]) : __hmin2(lhs_ptr[i], rhs_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = PropagateNaN ? __hmin_nan(a_residual_ptr[N - 1], b_residual_ptr[N - 1]) : __hmin(a_residual_ptr[N - 1], b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else minimum mn; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mn(lhs[i],rhs[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(half_t const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs)); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = PropagateNaN ? __hmin2_nan(lhs_pair, rhs_ptr[i]) : __hmin2(lhs_pair, rhs_ptr[i]); } if constexpr (N % 2) { __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = PropagateNaN ? __hmin_nan(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]) : __hmin(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else minimum mn; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mn(lhs, rhs[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, half_t const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = PropagateNaN ? __hmin2_nan(lhs_ptr[i], rhs_pair) : __hmin2(lhs_ptr[i], rhs_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half d_residual = PropagateNaN ? __hmin_nan(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs)) : __hmin(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs)); result[N - 1] = reinterpret_cast(d_residual); } #else minimum mn; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mn(lhs[i], rhs); } #endif return result; } }; template struct maximum, PropagateNaN> { CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = PropagateNaN ? __hmax2_nan(lhs_ptr[i], rhs_ptr[i]) : __hmax2(lhs_ptr[i], rhs_ptr[i]); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = PropagateNaN ? __hmax(a_residual_ptr[N - 1], b_residual_ptr[N - 1]) : __hmax_nan(a_residual_ptr[N - 1], b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(lhs[i], rhs[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(half_t const & lhs, Array const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs)); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = PropagateNaN ? __hmax2_nan(lhs_pair, rhs_ptr[i]) : __hmax2(lhs_pair, rhs_ptr[i]); } if constexpr (N % 2) { __half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs); __half d_residual = PropagateNaN ? __hmax_nan(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]) : __hmax(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]); result[N - 1] = reinterpret_cast(d_residual); } #else maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(lhs, rhs[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()(Array const & lhs, half_t const &rhs) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) __half2 *result_ptr = reinterpret_cast<__half2 *>(&result); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs); __half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { result_ptr[i] = PropagateNaN ? __hmax2_nan(lhs_ptr[i], rhs_pair) : __hmax2(lhs_ptr[i], rhs_pair); } if constexpr (N % 2) { __half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs); __half d_residual = PropagateNaN ? __hmax_nan(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs)) : __hmax(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs)); result[N - 1] = reinterpret_cast(d_residual); } #else maximum mx; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = mx(lhs[i], rhs); } #endif return result; } }; /// Fused multiply-add template struct multiply_add, Array, Array> { CUTLASS_HOST_DEVICE Array operator()( Array const &a, Array const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) unsigned *result_ptr = reinterpret_cast(&result); unsigned const *a_ptr = reinterpret_cast(&a); unsigned const *b_ptr = reinterpret_cast(&b); unsigned const *c_ptr = reinterpret_cast(&c); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n" : "=r"(result_ptr[i]) : "r"(a_ptr[i]), "r"(b_ptr[i]), "r"(c_ptr[i]) ); } if constexpr (N % 2) { uint16_t *result_ptr = reinterpret_cast(&result); uint16_t const *a_residual_ptr = reinterpret_cast(&a); uint16_t const *b_residual_ptr = reinterpret_cast(&b); uint16_t const *c_residual_ptr = reinterpret_cast(&c); asm ("fma.rn.bf16 %0, %1, %2, %3;\n" : "=h"(result_ptr[N - 1]) : "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[N - 1]) ); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a[i], b[i], c[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( bfloat16_t const &a, Array const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) unsigned *result_ptr = reinterpret_cast(&result); unsigned const *b_ptr = reinterpret_cast(&b); unsigned const *c_ptr = reinterpret_cast(&c); unsigned a_packed = static_cast(a.raw()); a_packed = (a_packed | (a_packed << 16)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n" : "=r"(result_ptr[i]) : "r"(a_packed), "r"(b_ptr[i]), "r"(c_ptr[i]) ); } if constexpr (N % 2) { uint16_t *result_ptr = reinterpret_cast(&result); uint16_t const *a_residual_ptr = reinterpret_cast(&a); uint16_t const *b_residual_ptr = reinterpret_cast(&b); uint16_t const *c_residual_ptr = reinterpret_cast(&c); asm ("fma.rn.bf16 %0, %1, %2, %3;\n" : "=h"(result_ptr[N - 1]) : "h"(a_residual_ptr[0]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[N - 1]) ); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a, b[i], c[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( Array const &a, bfloat16_t const &b, Array const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) unsigned *result_ptr = reinterpret_cast(&result); unsigned const *a_ptr = reinterpret_cast(&a); unsigned const *c_ptr = reinterpret_cast(&c); unsigned b_packed = static_cast(b.raw()); b_packed = (b_packed | (b_packed << 16)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n" : "=r"(result_ptr[i]) : "r"(a_ptr[i]), "r"(b_packed), "r"(c_ptr[i]) ); } if constexpr (N % 2) { uint16_t *result_ptr = reinterpret_cast(&result); uint16_t const *a_residual_ptr = reinterpret_cast(&a); uint16_t const *b_residual_ptr = reinterpret_cast(&b); uint16_t const *c_residual_ptr = reinterpret_cast(&c); asm ("fma.rn.bf16 %0, %1, %2, %3;\n" : "=h"(result_ptr[N - 1]) : "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[0]), "h"(c_residual_ptr[N - 1]) ); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a[i], b, c[i]); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( Array const &a, Array const &b, bfloat16_t const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) unsigned *result_ptr = reinterpret_cast(&result); unsigned const *a_ptr = reinterpret_cast(&a); unsigned const *b_ptr = reinterpret_cast(&b); unsigned c_packed = static_cast(c.raw()); c_packed = (c_packed | (c_packed << 16)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n" : "=r"(result_ptr[i]) : "r"(a_ptr[i]), "r"(b_ptr[i]), "r"(c_packed) ); } if constexpr (N % 2) { uint16_t *result_ptr = reinterpret_cast(&result); uint16_t const *a_residual_ptr = reinterpret_cast(&a); uint16_t const *b_residual_ptr = reinterpret_cast(&b); uint16_t const *c_residual_ptr = reinterpret_cast(&c); asm ("fma.rn.bf16 %0, %1, %2, %3;\n" : "=h"(result_ptr[N - 1]) : "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[0]) ); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a[i], b[i], c); } #endif return result; } CUTLASS_HOST_DEVICE Array operator()( Array const &a, bfloat16_t const &b, bfloat16_t const &c) const { Array result; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) unsigned *result_ptr = reinterpret_cast(&result); unsigned const *a_ptr = reinterpret_cast(&a); unsigned b_packed = static_cast(b.raw()); b_packed = (b_packed | (b_packed << 16)); unsigned c_packed = static_cast(c.raw()); c_packed = (c_packed | (c_packed << 16)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N / 2; ++i) { asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n" : "=r"(result_ptr[i]) : "r"(a_ptr[i]), "r"(b_packed), "r"(c_packed) ); } if constexpr (N % 2) { uint16_t *result_ptr = reinterpret_cast(&result); uint16_t const *a_residual_ptr = reinterpret_cast(&a); uint16_t const *b_residual_ptr = reinterpret_cast(&b); uint16_t const *c_residual_ptr = reinterpret_cast(&c); asm ("fma.rn.bf16 %0, %1, %2, %3;\n" : "=h"(result_ptr[N - 1]) : "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[0]), "h"(c_residual_ptr[0]) ); } #else multiply_add op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = op(a[i], b, c); } #endif return result; } }; /// bit_and template struct bit_and> { CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b) const { using ArrayType = Array; using Storage = typename ArrayType::Storage; ArrayType result; Storage *result_data = result.raw_data(); Storage const *a_data = a.raw_data(); Storage const *b_data = b.raw_data(); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ArrayType::kStorageElements; ++i) { result_data[i] = (a_data[i] & b_data[i]); } return result; } }; /// bit_or template struct bit_or> { CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b) const { using ArrayType = Array; using Storage = typename ArrayType::Storage; ArrayType result; Storage *result_data = result.raw_data(); Storage const *a_data = a.raw_data(); Storage const *b_data = b.raw_data(); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ArrayType::kStorageElements; ++i) { result_data[i] = (a_data[i] | b_data[i]); } return result; } }; /// bit_not template struct bit_not> { CUTLASS_HOST_DEVICE Array operator()(Array const &a) const { using ArrayType = Array; using Storage = typename ArrayType::Storage; ArrayType result; Storage *result_data = result.raw_data(); Storage const *a_data = a.raw_data(); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ArrayType::kStorageElements; ++i) { result_data[i] = (~a_data[i]); } return result; } }; /// bit_xor template struct bit_xor> { CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b) const { using ArrayType = Array; using Storage = typename ArrayType::Storage; ArrayType result; Storage *result_data = result.raw_data(); Storage const *a_data = a.raw_data(); Storage const *b_data = b.raw_data(); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ArrayType::kStorageElements; ++i) { result_data[i] = (a_data[i] ^ b_data[i]); } return result; } }; /// Fused and-popc-add template struct and_popc_add, Array, Array> { CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b, Array const &c) const { Array result; and_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], b[i], c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &a, T const &scalar, Array const &c) const { Array result; and_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], scalar, c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(T const &scalar, Array const &b, Array const &c) const { Array result; and_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, b[i], c[i]); } return result; } }; /// Fused or-popc-add template struct or_popc_add, Array, Array> { CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b, Array const &c) const { Array result; or_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], b[i], c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &a, T const &scalar, Array const &c) const { Array result; or_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], scalar, c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(T const &scalar, Array const &b, Array const &c) const { Array result; or_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, b[i], c[i]); } return result; } }; /// Fused xor-popc-add template struct xor_popc_add, Array, Array> { CUTLASS_HOST_DEVICE Array operator()(Array const &a, Array const &b, Array const &c) const { Array result; xor_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], b[i], c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(Array const &a, T const &scalar, Array const &c) const { Array result; xor_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(a[i], scalar, c[i]); } return result; } CUTLASS_HOST_DEVICE Array operator()(T const &scalar, Array const &b, Array const &c) const { Array result; xor_popc_add scalar_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { result[i] = scalar_op(scalar, b[i], c[i]); } return result; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Operator overloads ///////////////////////////////////////////////////////////////////////////////////////////////// template CUTLASS_HOST_DEVICE Array operator+(Array const &lhs, Array const &rhs) { plus> op; return op(lhs, rhs); } template CUTLASS_HOST_DEVICE Array operator+(T const &lhs, Array const &rhs) { plus> op; return op(lhs, rhs); } template CUTLASS_HOST_DEVICE Array operator+(Array const &lhs, T const &rhs) { plus> op; return op(lhs, rhs); } template CUTLASS_HOST_DEVICE Array operator-(Array const &lhs, Array const &rhs) { minus> op; return op(lhs, rhs); } template CUTLASS_HOST_DEVICE Array operator-(Array const &lhs) { negate> op; return op(lhs); } template CUTLASS_HOST_DEVICE Array operator*(Array const &lhs, Array const &rhs) { multiplies> op; return op(lhs, rhs); } template CUTLASS_HOST_DEVICE Array operator*(T lhs, Array const &rhs) { multiplies> op; return op(lhs, rhs); } template CUTLASS_HOST_DEVICE Array operator*(Array const &lhs, T rhs) { multiplies> op; return op(lhs, rhs); } template CUTLASS_HOST_DEVICE Array operator/(Array const &lhs, Array const &rhs) { divides> op; return op(lhs, rhs); } template CUTLASS_HOST_DEVICE Array fma(Array const &a, Array const &b, Array const &c) { multiply_add> op; return op(a, b, c); } template CUTLASS_HOST_DEVICE Array fma(T a, Array const &b, Array const &c) { multiply_add> op; return op(a, b, c); } template CUTLASS_HOST_DEVICE Array fma(Array const &a, T b, Array const &c) { multiply_add> op; return op(a, b, c); } template CUTLASS_HOST_DEVICE Array fma(Array const &a, Array const &b, T c) { multiply_add> op; return op(a, b, c); } //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// // AlignedArray //////////////////////////////////////////////////////////////////////////////////////////////////// /// Aligned array type template < /// Element type typename T, /// Number of elements in the array int N, /// Alignment requirement in bytes int Alignment = ( sizeof_bits::value * N + 7 ) / 8 > class alignas(Alignment) AlignedArray: public Array { public: }; } // namespace cutlass //////////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/array_subbyte.h" ////////////////////////////////////////////////////////////////////////////////////////////////////