From 209baee299936d3da662df516e722a4b11213684 Mon Sep 17 00:00:00 2001 From: zjing14 Date: Thu, 23 Feb 2023 18:59:37 -0600 Subject: [PATCH 01/31] disable tensor contraction f64 on MI100 (#602) --- .../impl/device_contraction_multiple_d_xdl_cshuffle.hpp | 5 +++++ script/cmake-ck-dev.sh | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp index 72c6d0b6f7..7a4c8bf267 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp @@ -586,6 +586,11 @@ struct DeviceContractionMultipleD_Xdl_CShuffle return false; } + if(ck::get_device_name() != "gfx90a" && std::is_same::value) + { + return false; + } + if(!GridwiseGemm::CheckValidity(arg.a_grid_desc_m_k_, arg.b_grid_desc_n_k_, arg.ds_grid_desc_m_n_, diff --git a/script/cmake-ck-dev.sh b/script/cmake-ck-dev.sh index 3e530478b0..2e605ce8de 100755 --- a/script/cmake-ck-dev.sh +++ b/script/cmake-ck-dev.sh @@ -10,8 +10,8 @@ cmake -D CMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc \ -D CMAKE_CXX_FLAGS="-O3 -ftemplate-backtrace-limit=0 -gline-tables-only -save-temps=$PWD" \ -D CMAKE_BUILD_TYPE=Release \ --D BUILD_DEV=OFF \ --D GPU_TARGETS="gfx90a" \ +-D BUILD_DEV=ON \ +-D GPU_TARGETS="gfx908;gfx90a" \ -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ -D USE_BITINT_EXTENSION_INT4=OFF \ ${MY_PROJECT_SOURCE} From 8f455615a8092822637fb2e7691d38a98456f276 Mon Sep 17 00:00:00 2001 From: Chao Liu Date: Sun, 26 Feb 2023 21:19:11 -0800 Subject: [PATCH 02/31] Fast GeLU using built-in function (#587) * clean up * fast gelu using builtin function * clean * clean * clean * clean: * clean * fix compilation * clean * clean --------- Co-authored-by: zjing14 --- example/04_gemm_add_add_fastgelu/common.hpp | 2 +- .../gemm_add_add_fastgelu_xdl_bf16.cpp | 11 +- .../gemm_add_add_fastgelu_xdl_fp16.cpp | 11 +- .../gemm_add_add_fastgelu_xdl_fp32.cpp | 12 +-- .../gemm_add_add_fastgelu_xdl_int4.cpp | 11 +- .../gemm_add_add_fastgelu_xdl_int8.cpp | 11 +- .../run_gemm_add_add_fastgelu_example.inc | 2 +- include/ck/ck.hpp | 3 + .../element/binary_element_wise_operation.hpp | 54 +++++----- .../gpu/element/element_wise_operation.hpp | 102 +++++++++++------- .../element/unary_element_wise_operation.hpp | 91 ++++++++++++---- 11 files changed, 194 insertions(+), 116 deletions(-) diff --git a/example/04_gemm_add_add_fastgelu/common.hpp b/example/04_gemm_add_add_fastgelu/common.hpp index 3f9375e092..839587c148 100644 --- a/example/04_gemm_add_add_fastgelu/common.hpp +++ b/example/04_gemm_add_add_fastgelu/common.hpp @@ -62,7 +62,7 @@ struct ExecutionConfig final }; inline bool -parse_cmd_args(int argc, char* argv[], ProblemSize& problem_size, ExecutionConfig config) +parse_cmd_args(int argc, char* argv[], ProblemSize& problem_size, ExecutionConfig& config) { if(argc == 1) { diff --git a/example/04_gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_bf16.cpp b/example/04_gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_bf16.cpp index 5e50c14dc2..ba0476b9b9 100644 --- a/example/04_gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_bf16.cpp +++ b/example/04_gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_bf16.cpp @@ -7,10 +7,11 @@ using ADataType = BF16; using BDataType = BF16; using AccDataType = F32; using CShuffleDataType = F32; -using D0DataType = BF16; -using D1DataType = BF16; -using DsDataType = ck::Tuple; -using EDataType = BF16; +using CDataType = F32; // C matrix doesn't exsit in GPU memory, this is used for host verification +using D0DataType = BF16; +using D1DataType = BF16; +using DsDataType = ck::Tuple; +using EDataType = BF16; using ALayout = Row; using BLayout = Col; @@ -36,7 +37,7 @@ using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleD_Xdl_C using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm; -using EDataType = F16; +using CDataType = F32; // C matrix doesn't exsit in GPU memory, this is used for host verification +using D0DataType = F16; +using D1DataType = F16; +using DsDataType = ck::Tuple; +using EDataType = F16; using ALayout = Row; using BLayout = Col; @@ -36,7 +37,7 @@ using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleD_Xdl_C using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm; -using EDataType = F32; +using CDataType = F32; // C matrix doesn't exsit in GPU memory, this is used for host verification +using D0DataType = F32; +using D1DataType = F32; +using DsDataType = ck::Tuple; +using EDataType = F32; using ALayout = Row; using BLayout = Col; @@ -36,7 +36,7 @@ using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleD_Xdl_C using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm; -using EDataType = I4; +using CDataType = I32; // C matrix doesn't exsit in GPU memory, this is used for host verification +using D0DataType = I4; +using D1DataType = I4; +using DsDataType = ck::Tuple; +using EDataType = I4; using KernelADataType = I8; using KernelBDataType = I8; @@ -47,7 +48,7 @@ using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleD_Xdl_C using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm; -using EDataType = I8; +using CDataType = I32; // C matrix doesn't exsit in GPU memory, this is used for host verification +using D0DataType = I8; +using D1DataType = I8; +using DsDataType = ck::Tuple; +using EDataType = I8; using ALayout = Row; using BLayout = Col; @@ -36,7 +37,7 @@ using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleD_Xdl_C using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm c_m_n({M, N}); + Tensor c_m_n({M, N}); auto ref_gemm = ReferenceGemmInstance{}; auto ref_invoker = ref_gemm.MakeInvoker(); diff --git a/include/ck/ck.hpp b/include/ck/ck.hpp index ffd7e74f12..1257a77649 100644 --- a/include/ck/ck.hpp +++ b/include/ck/ck.hpp @@ -168,6 +168,9 @@ // tuning parameter #define CK_WORKAROUND_SWDEV_325164 0 +// workaround: compiler not emiting reciprocal instruction frm __frcp_rn() +#define CK_WORKAROUND_SWDEV_383542 1 + // flag to enable (1) or disable (0) the debugging output in some kernels #define DEBUG_LOG 0 diff --git a/include/ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp b/include/ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp index 69fa75c3fd..136017c6d1 100644 --- a/include/ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp +++ b/include/ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp @@ -4,6 +4,7 @@ #pragma once #include "ck/utility/data_type.hpp" +#include "ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp" namespace ck { namespace tensor_operation { @@ -280,43 +281,42 @@ struct AddHardswish }; }; -// C = A * B // E = FastGelu(C + D) struct AddFastGelu { - // Fast GeLU - // https://paperswithcode.com/method/gelu - // y = 0.5*x*(1+tanh(sqrt(2/pi)*(x+0.044715*x^3))) - __host__ __device__ static constexpr float GetFastGeLU(float x) - { - const float u = 2.f * x * (0.035677f * x * x + 0.797885f); - const float emu = exp(-u); - const float cdf = 0.5f + 0.5f * (2.f / (1.f + emu) - 1.f); - return x * cdf; - } - - template - static inline constexpr bool is_valid_param_type_v = - std::is_same_v || std::is_same_v || std::is_same_v || - std::is_same_v || std::is_same_v; - template - __host__ __device__ constexpr void operator()(E& e, const C& c, const D& d) const + __host__ __device__ constexpr void operator()(E& e, const C& c, const D& d) const; + + template <> + __host__ __device__ constexpr void + operator()(float& e, const float& c, const float& d) const { - static_assert(is_valid_param_type_v && is_valid_param_type_v && - is_valid_param_type_v); + const float x = c + d; - const float y = GetFastGeLU(type_convert(c) + type_convert(d)); - - e = type_convert(y); + FastGelu{}.template operator()(e, x); } - template - __host__ __device__ constexpr void operator()(float& e, const float& c, const D& d) const + template <> + __host__ __device__ constexpr void + operator()(half_t& e, const half_t& c, const half_t& d) const { - static_assert(is_valid_param_type_v); + const half_t x = c + d; - e = GetFastGeLU(c + type_convert(d)); + ck::tensor_operation::element_wise::FastGelu{}.template operator()(e, x); + } + + template <> + __host__ __device__ constexpr void + operator()(half_t& e, const float& c, const half_t& d) const + { + const float x0_f = c + d; + + float x1_f = 0; + + ck::tensor_operation::element_wise::FastGelu{}.template operator()(x1_f, + x0_f); + + e = type_convert(x1_f); } }; diff --git a/include/ck/tensor_operation/gpu/element/element_wise_operation.hpp b/include/ck/tensor_operation/gpu/element/element_wise_operation.hpp index 7f3d450a39..ceb2b665b9 100644 --- a/include/ck/tensor_operation/gpu/element/element_wise_operation.hpp +++ b/include/ck/tensor_operation/gpu/element/element_wise_operation.hpp @@ -16,7 +16,7 @@ namespace element_wise { // Need to ensure compiler will fail if there is no matching candidate, instead of compiler // siliently do implicit type conversion // -// Method 1: +// Example: // // struct ExampleElementwiseOp // { @@ -30,19 +30,6 @@ namespace element_wise { // { // } // }; -// -// Method 2: -// -// template -// struct ExampleElementwiseOp; -// -// template <> -// struct ExampleElementwiseOp -// { -// __host__ __device__ void operator()(float& y, ck::bhalf_t& x) const -// { -// } -// }; struct AddReluAdd { @@ -208,41 +195,74 @@ struct AddMultiply } }; -// C = A * B // E = FastGelu(C + D0 + D1) struct AddAddFastGelu { - // Fast GeLU - // https://paperswithcode.com/method/gelu - // y = 0.5*x*(1+tanh(sqrt(2/pi)*(x+0.044715*x^3))) - __host__ __device__ static constexpr float GetFastGeLU(float x) - { - const float u = 2.f * x * (0.035677f * x * x + 0.797885f); - const float emu = exp(-u); - const float cdf = 0.5f + 0.5f * (2.f / (1.f + emu) - 1.f); - return x * cdf; - } - - template - static inline constexpr bool is_valid_param_type_v = - std::is_same_v || std::is_same_v || std::is_same_v || - std::is_same_v || std::is_same_v -#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 - || std::is_same_v -#endif - ; - template __host__ __device__ constexpr void - operator()(E& e, const C& c, const D0& d0, const D1& d1) const + operator()(E& e, const C& c, const D0& d0, const D1& d1) const; + + template <> + __host__ __device__ constexpr void operator()(float& e, + const float& c, + const float& d0, + const float& d1) const { - static_assert(is_valid_param_type_v && is_valid_param_type_v && - is_valid_param_type_v && is_valid_param_type_v); + const float x = c + d0 + d1; - const float y = - GetFastGeLU(type_convert(c) + type_convert(d0) + type_convert(d1)); + FastGelu{}.template operator()(e, x); + } - e = type_convert(y); + template <> + __host__ __device__ constexpr void operator()( + half_t& e, const half_t& c, const half_t& d0, const half_t& d1) const + { + const half_t x = c + d0 + d1; + + ck::tensor_operation::element_wise::FastGelu{}.template operator()(e, x); + } + + template <> + __host__ __device__ constexpr void operator()( + half_t& e, const float& c, const half_t& d0, const half_t& d1) const + { + const float x0_f = c + d0 + d1; + + float x1_f = 0; + + ck::tensor_operation::element_wise::FastGelu{}.template operator()(x1_f, + x0_f); + + e = type_convert(x1_f); + } + + template <> + __host__ __device__ constexpr void operator()( + bhalf_t& e, const float& c, const bhalf_t& d0, const bhalf_t& d1) const + { + const float x0_f = c + type_convert(d0) + type_convert(d1); + + float x1_f = 0; + + ck::tensor_operation::element_wise::FastGelu{}.template operator()(x1_f, + x0_f); + + e = type_convert(x1_f); + } + + template <> + __host__ __device__ constexpr void operator()( + int8_t& e, const int32_t& c, const int8_t& d0, const int8_t& d1) const + { + const float x0_f = + type_convert(c) + type_convert(d0) + type_convert(d1); + + float x1_f = 0; + + ck::tensor_operation::element_wise::FastGelu{}.template operator()(x1_f, + x0_f); + + e = type_convert(x1_f); } }; diff --git a/include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp b/include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp index 2167a79e01..6b4df3b60e 100644 --- a/include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp +++ b/include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp @@ -11,6 +11,10 @@ namespace ck { namespace tensor_operation { namespace element_wise { +#if CK_WORKAROUND_SWDEV_383542 +extern "C" __device__ float __ocml_native_recip_f32(float); +#endif + struct PassThrough { template @@ -200,36 +204,83 @@ struct Relu } }; -// Y = FastGelu(X) +// Fast GeLU +// https://paperswithcode.com/method/gelu +// y = 0.5*x*(1+tanh(sqrt(2/pi)*(x+0.044715*x^3))) +// host code use higher accuracy "exp" and "div" +// gpu code use lower accuracy "__expf" and "rcp" function struct FastGelu { - // Fast GeLU - // https://paperswithcode.com/method/gelu - // y = 0.5*x*(1+tanh(sqrt(2/pi)*(x+0.044715*x^3))) - __host__ __device__ static constexpr float GetFastGeLU(float x) + template + __host__ void operator()(Y& y, const X& x) const; + + template + __device__ void operator()(Y& y, const X& x) const; + + template <> + __host__ void operator()(float& y, const float& x) const { const float u = 2.f * x * (0.035677f * x * x + 0.797885f); const float emu = exp(-u); const float cdf = 0.5f + 0.5f * (2.f / (1.f + emu) - 1.f); - return x * cdf; + + y = x * cdf; } - template - static inline constexpr bool is_valid_param_type_v = - std::is_same_v || std::is_same_v || std::is_same_v || - std::is_same_v || std::is_same_v -#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 - || std::is_same_v -#endif - ; - - template - __host__ __device__ void operator()(Y& y, const X& x) const + // device code, use lower precision "__expf" and "rcp" + template <> + __device__ void operator()(float& y, const float& x) const { - static_assert(is_valid_param_type_v && is_valid_param_type_v); + const float u = 2.f * x * (0.035677f * x * x + 0.797885f); + const float emu = __expf(-u); - const float tmp_y = GetFastGeLU(type_convert(x)); - y = type_convert(tmp_y); +#if !CK_WORKAROUND_SWDEV_383542 + const float cdf = 0.5f + 0.5f * (2.f * __frcp_rn(1.f + emu) - 1.f); +#else + const float cdf = 0.5f + 0.5f * (2.f * __ocml_native_recip_f32(1.f + emu) - 1.f); +#endif + + y = x * cdf; + } + + template <> + __host__ void operator()(half_t& y, const half_t& x) const + { + float y_f; + + this->operator()(y_f, type_convert(x)); + + y = type_convert(y_f); + } + + template <> + __device__ void operator()(half_t& y, const half_t& x) const + { + float y_f; + + this->operator()(y_f, type_convert(x)); + + y = type_convert(y_f); + } + + template <> + __host__ void operator()(half_t& y, const float& x) const + { + float y_f; + + this->operator()(y_f, x); + + y = type_convert(y_f); + } + + template <> + __device__ void operator()(half_t& y, const float& x) const + { + float y_f; + + this->operator()(y_f, x); + + y = type_convert(y_f); } }; From 68dbf40a79210c10bc1a9e2ed4b2838f371694f6 Mon Sep 17 00:00:00 2001 From: Haocong WANG Date: Thu, 2 Mar 2023 02:07:42 +0800 Subject: [PATCH 03/31] [Navi3x Bug Fix] fix typo to accept MNKPadding flag correctly. (#597) * fix a bug blocking wmma_gemm_multipleD * Utilize matrix padder in device_wmma_op * cosmetic change for gemmpadding format * clang format * Change gridwise gemm from FIFO to KMN loop fashion --- example/01_gemm/gemm_wmma_fp16.cpp | 12 +- .../run_grouped_conv_bwd_weight_example.inc | 2 +- .../device_gemm_multiple_d_wmma_cshuffle.hpp | 132 ++++++--------- .../gpu/device/impl/device_gemm_wmma.hpp | 153 +++++++----------- .../gpu/grid/gridwise_gemm_wmma.hpp | 2 +- 5 files changed, 115 insertions(+), 186 deletions(-) diff --git a/example/01_gemm/gemm_wmma_fp16.cpp b/example/01_gemm/gemm_wmma_fp16.cpp index 48bcca257a..58f965be88 100644 --- a/example/01_gemm/gemm_wmma_fp16.cpp +++ b/example/01_gemm/gemm_wmma_fp16.cpp @@ -19,15 +19,15 @@ using AElementOp = PassThrough; using BElementOp = PassThrough; using CElementOp = PassThrough; -static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; // clang-format off using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemmWmma_CShuffle -// ######| ALayout| BLayout| CLayout| AData| BData| CData| AccData| CShuffle| A| B| C| GEMM| Block| MPer| NPer| K0Per| K1| MPer| NPer|MRepeat|NRepeat| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| -// ######| | | | Type| Type| Type| Type| DataType| Elementwise| Elementwise| Elementwise| Spacialization| Size| Block| Block| Block| | WMMA| WMMA| | | ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN|MWmmaPerWave|NWmmaPerWave| _MBlock_MWaveMPerWmma| ScalarPerVector| -// ######| | | | | | | | | Operation| Operation| Operation| | | | | | | | | | | Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerWmma| _NWaveNPerWmma| -// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | - < ALayout, BLayout, CLayout, ADataType, BDataType, CDataType, AccDataType, CShuffleDataType, AElementOp, BElementOp, CElementOp, GemmDefault, 256, 128, 256, 8, 8, 16, 16, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, 1, 1, S<1, 32, 1, 8>, 8, 1>; +// ######| ALayout| BLayout| CLayout| AData| BData| CData| AccData| CShuffle| A| B| C| GEMM| Block| MPer| NPer| K0Per| K1| MPer| NPer|MRepeat|NRepeat| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| +// ######| | | | Type| Type| Type| Type| DataType| Elementwise| Elementwise| Elementwise| Spacialization| Size| Block| Block| Block| | WMMA| WMMA| | | ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN|MWmmaPerWave|NWmmaPerWave| _MBlock_MWaveMPerWmma| ScalarPerVector| +// ######| | | | | | | | | Operation| Operation| Operation| | | | | | | | | | | Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerWmma| _NWaveNPerWmma| +// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + < ALayout, BLayout, CLayout, ADataType, BDataType, CDataType, AccDataType, CShuffleDataType, AElementOp, BElementOp, CElementOp, GemmMNKPadding, 256, 128, 256, 8, 8, 16, 16, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, 1, 1, S<1, 32, 1, 8>, 8, 1>; // clang-format on using ReferenceGemmInstance = ck::tensor_operation::host:: diff --git a/example/20_grouped_conv_bwd_weight/run_grouped_conv_bwd_weight_example.inc b/example/20_grouped_conv_bwd_weight/run_grouped_conv_bwd_weight_example.inc index dc45db9865..7891812375 100644 --- a/example/20_grouped_conv_bwd_weight/run_grouped_conv_bwd_weight_example.inc +++ b/example/20_grouped_conv_bwd_weight/run_grouped_conv_bwd_weight_example.inc @@ -26,7 +26,7 @@ bool run_grouped_conv_bwd_weight(const ExecutionConfig& config, { split_k = 1; } - + const auto in_g_n_c_wis_desc = ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed< InputLayout>(conv_param); diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp index 66c4de7f05..1d705a28b0 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp @@ -86,120 +86,84 @@ struct DeviceGemmMultipleD_Wmma_CShuffle : public DeviceGemmMultipleD{}; - static auto MakeAGridDescriptor_K0_M_K1(index_t M, index_t K, index_t StrideA) + static constexpr auto matrix_padder = + MatrixPadder{MPerBlock, NPerBlock, K0PerBlock* K1}; + + static auto MakeAGridDescriptor_K0_M_K1(index_t MRaw, index_t KRaw, index_t StrideA) { - assert(K % K1 == 0); - - const index_t K0 = K / K1; - - const auto a_grid_desc_m_k = [&]() { - if constexpr(is_same::value) + const auto a_grid_desc_mraw_kraw = [&]() { + if constexpr(is_same_v) { - return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(StrideA, I1)); + return make_naive_tensor_descriptor(make_tuple(MRaw, KRaw), + make_tuple(StrideA, I1)); } -#ifdef ENABLE_COLMAJOR - else if constexpr(is_same::value) + else if constexpr(is_same_v) { - return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(I1, StrideA)); + return make_naive_tensor_descriptor(make_tuple(MRaw, KRaw), + make_tuple(I1, StrideA)); } -#endif }(); - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; + const auto a_grid_desc_m_k = matrix_padder.PadADescriptor_M_K(a_grid_desc_mraw_kraw); + const auto M = a_grid_desc_m_k.GetLength(I0); + const auto K = a_grid_desc_m_k.GetLength(I1); + assert(K % K1 == 0); + const index_t K0 = K / K1; - return transform_tensor_descriptor( - a_grid_desc_m_k, - make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), - make_right_pad_transform(M, PadM)), - make_tuple(Sequence<1>{}, Sequence<0>{}), - make_tuple(Sequence<0, 2>{}, Sequence<1>{})); - } - else - { - return transform_tensor_descriptor( - a_grid_desc_m_k, - make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), - make_pass_through_transform(M)), - make_tuple(Sequence<1>{}, Sequence<0>{}), - make_tuple(Sequence<0, 2>{}, Sequence<1>{})); - } + return transform_tensor_descriptor( + a_grid_desc_m_k, + make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), + make_pass_through_transform(M)), + make_tuple(Sequence<1>{}, Sequence<0>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); } - static auto MakeBGridDescriptor_K0_N_K1(index_t K, index_t N, index_t StrideB) + static auto MakeBGridDescriptor_K0_N_K1(index_t KRaw, index_t NRaw, index_t StrideB) { - assert(K % K1 == 0); - - const index_t K0 = K / K1; - - const auto b_grid_desc_k_n = [&]() { - if constexpr(is_same::value) + const auto b_grid_desc_nraw_kraw = [&]() { + if constexpr(is_same_v) { - return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(StrideB, I1)); + return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), + make_tuple(StrideB, I1)); } - else if constexpr(is_same::value) + else if constexpr(is_same_v) { - return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(I1, StrideB)); + return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), + make_tuple(I1, StrideB)); } }(); - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; + const auto b_grid_desc_n_k = matrix_padder.PadBDescriptor_N_K(b_grid_desc_nraw_kraw); + const auto N = b_grid_desc_n_k.GetLength(I0); + const auto K = b_grid_desc_n_k.GetLength(I1); + assert(K % K1 == 0); + const index_t K0 = K / K1; - return transform_tensor_descriptor( - b_grid_desc_k_n, - make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), - make_right_pad_transform(N, PadN)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0, 2>{}, Sequence<1>{})); - } - else - { - return transform_tensor_descriptor( - b_grid_desc_k_n, - make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), - make_pass_through_transform(N)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0, 2>{}, Sequence<1>{})); - } + return transform_tensor_descriptor( + b_grid_desc_n_k, + make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), + make_pass_through_transform(N)), + make_tuple(Sequence<1>{}, Sequence<0>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); } template - static auto MakeEGridDescriptor_M_N(index_t M, index_t N, index_t StrideE) + static auto MakeEGridDescriptor_M_N(index_t MRaw, index_t NRaw, index_t StrideE) { - const auto e_grid_desc_m_n = [&]() { + const auto e_grid_desc_mraw_nraw = [&]() { if constexpr(is_same::value) { - return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(StrideE, I1)); + return make_naive_tensor_descriptor(make_tuple(MRaw, NRaw), + make_tuple(StrideE, I1)); } else if constexpr(is_same::value) { - return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(I1, StrideE)); + return make_naive_tensor_descriptor(make_tuple(MRaw, NRaw), + make_tuple(I1, StrideE)); } }(); - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; - const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; - - return transform_tensor_descriptor( - e_grid_desc_m_n, - make_tuple(make_right_pad_transform(M, PadM), make_right_pad_transform(N, PadN)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0>{}, Sequence<1>{})); - } - else - { - - return transform_tensor_descriptor( - e_grid_desc_m_n, - make_tuple(make_pass_through_transform(M), make_pass_through_transform(N)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0>{}, Sequence<1>{})); - } + return matrix_padder.PadCDescriptor_M_N(e_grid_desc_mraw_nraw); } static auto MakeDsGridDescriptor_M_N(const std::array& Ms, diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp index dbcceac68f..e8e67532be 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp @@ -12,6 +12,7 @@ #include "ck/tensor_operation/gpu/device/tensor_layout.hpp" #include "ck/tensor_operation/gpu/device/device_gemm.hpp" #include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/matrix_padder.hpp" #include "ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp" #include "ck/host_utility/device_prop.hpp" #include "ck/host_utility/kernel_launch.hpp" @@ -78,119 +79,83 @@ struct DeviceGemmWmma_CShuffle : public DeviceGemm{}; - static auto MakeAGridDescriptor_K0_M_K1(index_t M, index_t K, index_t StrideA) + static constexpr auto matrix_padder = + MatrixPadder{MPerBlock, NPerBlock, K0PerBlock* K1}; + + static auto MakeAGridDescriptor_K0_M_K1(index_t MRaw, index_t KRaw, index_t StrideA) { - assert(K % K1 == 0); - - const index_t K0 = K / K1; - - const auto a_grid_desc_m_k = [&]() { - if constexpr(is_same::value) + const auto a_grid_desc_mraw_kraw = [&]() { + if constexpr(is_same_v) { - return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(StrideA, I1)); + return make_naive_tensor_descriptor(make_tuple(MRaw, KRaw), + make_tuple(StrideA, I1)); } -#ifdef ENABLE_COLMAJOR - else if constexpr(is_same::value) + else if constexpr(is_same_v) { - return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(I1, StrideA)); - } -#endif - }(); - - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; - - return transform_tensor_descriptor( - a_grid_desc_m_k, - make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), - make_right_pad_transform(M, PadM)), - make_tuple(Sequence<1>{}, Sequence<0>{}), - make_tuple(Sequence<0, 2>{}, Sequence<1>{})); - } - else - { - return transform_tensor_descriptor( - a_grid_desc_m_k, - make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), - make_pass_through_transform(M)), - make_tuple(Sequence<1>{}, Sequence<0>{}), - make_tuple(Sequence<0, 2>{}, Sequence<1>{})); - } - } - - static auto MakeBGridDescriptor_K0_N_K1(index_t K, index_t N, index_t StrideB) - { - assert(K % K1 == 0); - - const index_t K0 = K / K1; - - const auto b_grid_desc_k_n = [&]() { - if constexpr(is_same::value) - { - return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(StrideB, I1)); - } - else if constexpr(is_same::value) - { - return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(I1, StrideB)); + return make_naive_tensor_descriptor(make_tuple(MRaw, KRaw), + make_tuple(I1, StrideA)); } }(); - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; + const auto a_grid_desc_m_k = matrix_padder.PadADescriptor_M_K(a_grid_desc_mraw_kraw); + const auto M = a_grid_desc_m_k.GetLength(I0); + const auto K = a_grid_desc_m_k.GetLength(I1); + assert(K % K1 == 0); + const index_t K0 = K / K1; - return transform_tensor_descriptor( - b_grid_desc_k_n, - make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), - make_right_pad_transform(N, PadN)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0, 2>{}, Sequence<1>{})); - } - else - { - return transform_tensor_descriptor( - b_grid_desc_k_n, - make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), - make_pass_through_transform(N)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0, 2>{}, Sequence<1>{})); - } + return transform_tensor_descriptor( + a_grid_desc_m_k, + make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), + make_pass_through_transform(M)), + make_tuple(Sequence<1>{}, Sequence<0>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); } - static auto MakeCGridDescriptor_M_N(index_t M, index_t N, index_t StrideC) + static auto MakeBGridDescriptor_K0_N_K1(index_t KRaw, index_t NRaw, index_t StrideB) { - const auto c_grid_desc_m_n = [&]() { + const auto b_grid_desc_nraw_kraw = [&]() { + if constexpr(is_same_v) + { + return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), + make_tuple(StrideB, I1)); + } + else if constexpr(is_same_v) + { + return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), + make_tuple(I1, StrideB)); + } + }(); + + const auto b_grid_desc_n_k = matrix_padder.PadBDescriptor_N_K(b_grid_desc_nraw_kraw); + const auto N = b_grid_desc_n_k.GetLength(I0); + const auto K = b_grid_desc_n_k.GetLength(I1); + assert(K % K1 == 0); + const index_t K0 = K / K1; + + return transform_tensor_descriptor( + b_grid_desc_n_k, + make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), + make_pass_through_transform(N)), + make_tuple(Sequence<1>{}, Sequence<0>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); + } + + static auto MakeCGridDescriptor_M_N(index_t MRaw, index_t NRaw, index_t StrideC) + { + const auto c_grid_desc_mraw_nraw = [&]() { if constexpr(is_same::value) { - return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(StrideC, I1)); + return make_naive_tensor_descriptor(make_tuple(MRaw, NRaw), + make_tuple(StrideC, I1)); } else if constexpr(is_same::value) { - return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(I1, StrideC)); + return make_naive_tensor_descriptor(make_tuple(MRaw, NRaw), + make_tuple(I1, StrideC)); } }(); - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; - const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; - - return transform_tensor_descriptor( - c_grid_desc_m_n, - make_tuple(make_right_pad_transform(M, PadM), make_right_pad_transform(N, PadN)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0>{}, Sequence<1>{})); - } - else - { - - return transform_tensor_descriptor( - c_grid_desc_m_n, - make_tuple(make_pass_through_transform(M), make_pass_through_transform(N)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0>{}, Sequence<1>{})); - } + return matrix_padder.PadCDescriptor_M_N(c_grid_desc_mraw_nraw); } // Gridwise descriptor, mapping to whole given provblem. diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp index d70c5180da..fda0464caa 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp @@ -414,7 +414,7 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_wmma constexpr auto KPack = math::integer_least_multiple(K1, WmmaK); auto blockwise_gemm = - BlockwiseGemmWMMA_k0mk1_k0nk1_m0m1m2n0n1n2m3_CShuffle_FIFO Date: Wed, 1 Mar 2023 10:08:13 -0800 Subject: [PATCH 04/31] Suppress reserved-identifier warning and catch all warnings. (#608) * suppress the reserved-identifier warnings * keep BUILD_DEV=On and use -Werror by default --- Jenkinsfile | 4 ++-- cmake/EnableCompilerWarnings.cmake | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index a9f8813707..6b255ce13c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -172,7 +172,7 @@ def cmake_build(Map conf=[:]){ if(conf.get("build_install","") == "true") { config_targets = 'install ' + config_targets - setup_args = ' -DBUILD_DEV=Off -DCMAKE_INSTALL_PREFIX=../install' + setup_args + setup_args = ' -DBUILD_DEV=On -DCMAKE_INSTALL_PREFIX=../install' + setup_args } else{ setup_args = ' -DBUILD_DEV=On' + setup_args } @@ -657,7 +657,7 @@ pipeline { { agent{ label rocmnode("gfx908 || gfx90a") } environment{ - setup_args = "${params.COMPILER_VERSION == "ck-9110" ? """ -DBUILD_DEV=Off -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3 -Xclang -mlink-builtin-bitcode -Xclang /opt/rocm/amdgcn/bitcode/oclc_abi_version_400.bc" """ : """ -DBUILD_DEV=Off -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3 " """ }" + setup_args = "${params.COMPILER_VERSION == "ck-9110" ? """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3 -Xclang -mlink-builtin-bitcode -Xclang /opt/rocm/amdgcn/bitcode/oclc_abi_version_400.bc" """ : """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3 " """ }" execute_args = "${params.COMPILER_VERSION == "ck-9110" ? """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3 -Xclang -mlink-builtin-bitcode -Xclang /opt/rocm/amdgcn/bitcode/oclc_abi_version_400.bc" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ : """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx908,gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ }" } steps{ diff --git a/cmake/EnableCompilerWarnings.cmake b/cmake/EnableCompilerWarnings.cmake index 78133af031..87bcb08e83 100644 --- a/cmake/EnableCompilerWarnings.cmake +++ b/cmake/EnableCompilerWarnings.cmake @@ -65,7 +65,8 @@ else() -Wuninitialized -Wunreachable-code -Wunused - + -Wno-reserved-identifier + -Werror -Wsign-compare -Wno-extra-semi-stmt ) From e6cda9f8ff8baa58d61905239a70f93db1933eb5 Mon Sep 17 00:00:00 2001 From: Illia Silin <98187287+illsilin@users.noreply.github.com> Date: Thu, 2 Mar 2023 09:24:31 -0800 Subject: [PATCH 05/31] Change the CI workflow. (#611) * add new parallel stage on navi node * dont run performance tests on navi, get rid of 9110 compiler * only run navi build when not doing QA * fix syntax * use navi21 label * dont stash profiler on navi nodes, scp deb package to ginger * disable tests on navi nodes * test posting a binary to ginger * add sshpass and use it to copy deb package * fix the scp example * fix syntax * debug the scp issues * add jenkins user to docker * dont try whoami * change jenkins uid and add user with uid=1002 * try scp from the last stage on micimaster * rename and stash the package, scp from micimaster --- Dockerfile | 3 +++ Jenkinsfile | 54 ++++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 44 insertions(+), 13 deletions(-) diff --git a/Dockerfile b/Dockerfile index dd2a97c7bd..b03cb836ad 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,8 @@ ARG compiler_commit="" RUN set -xe ARG DEB_ROCM_REPO=http://repo.radeon.com/rocm/apt/.apt_$ROCMVERSION/ +RUN useradd -rm -d /home/jenkins -s /bin/bash -u 1004 jenkins +RUN useradd -rm -d /home/manitera -s /bin/bash -u 1002 manitera # Add rocm repository RUN apt-get update RUN apt-get install -y wget gnupg @@ -37,6 +39,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow- python-dev \ python3-dev \ python3-pip \ + sshpass \ software-properties-common \ rocm-dev \ rocm-device-libs \ diff --git a/Jenkinsfile b/Jenkinsfile index 6b255ce13c..6bd6aa81b2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -14,7 +14,6 @@ def show_node_info() { def runShell(String command){ def responseCode = sh returnStatus: true, script: "${command} > tmp.txt" def output = readFile(file: "tmp.txt") - echo "tmp.txt contents: $output" return (output != "") } @@ -427,6 +426,7 @@ def Build_CK(Map conf=[:]){ def variant = env.STAGE_NAME def retimage + def navi_node = 0 gitStatusWrapper(credentialsId: "${status_wrapper_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCmSoftwarePlatform', repo: 'composable_kernel') { try { @@ -440,6 +440,9 @@ def Build_CK(Map conf=[:]){ else{ echo "GPU is OK" } + if ( runShell('grep -n "gfx1030" clinfo.log') ){ + navi_node = 1 + } } } } @@ -458,6 +461,9 @@ def Build_CK(Map conf=[:]){ else{ echo "GPU is OK" } + if ( runShell('grep -n "gfx1030" clinfo.log') ){ + navi_node = 1 + } } } } @@ -466,16 +472,20 @@ def Build_CK(Map conf=[:]){ { cmake_build(conf) dir("build"){ - //run tests and examples - sh 'make -j check' - //we only need the ckProfiler to run the performance tests, so we pack and stash it - sh 'tar -zcvf ckProfiler.tar.gz bin/ckProfiler' - stash "ckProfiler.tar.gz" + if (navi_node == 0 ){ + //run tests and examples on all nodes except Navi + sh 'make -j check' + //we only need the ckProfiler to run the performance tests, so we pack and stash it + sh 'tar -zcvf ckProfiler.tar.gz bin/ckProfiler' + stash "ckProfiler.tar.gz" + } if (params.RUN_FULL_QA){ // build deb packages sh 'make -j package' archiveArtifacts artifacts: 'composablekernel-ckprofiler_*.deb' archiveArtifacts artifacts: 'composablekernel-tests_*.deb' + sh 'mv composablekernel-ckprofiler_*.deb ckprofiler_0.2.0_amd64.deb' + stash "ckprofiler_0.2.0_amd64.deb" } } } @@ -543,6 +553,8 @@ def process_results(Map conf=[:]){ unstash "perf_splitK_gemm.log" unstash "perf_onnx_gemm.log" sh "./process_qa_data.sh" + unstash "ckprofiler_0.2.0_amd64.deb" + sh "sshpass -p ${env.ck_deb_pw} scp -o StrictHostKeyChecking=no ckprofiler_0.2.0_amd64.deb ${env.ck_deb_user}@${env.ck_deb_ip}:/var/www/html/composable_kernel/" } else{ // unstash perf files to master @@ -564,7 +576,7 @@ def process_results(Map conf=[:]){ //launch develop branch daily at 23:00 UT in FULL_QA mode and at 19:00 UT with latest staging compiler version CRON_SETTINGS = BRANCH_NAME == "develop" ? '''0 23 * * * % RUN_FULL_QA=true - 0 21 * * * % RUN_FULL_QA=false;COMPILER_VERSION=release;COMPILER_COMMIT= + 0 21 * * * % COMPILER_VERSION=release;COMPILER_COMMIT= 0 19 * * * % BUILD_DOCKER=true;COMPILER_VERSION=amd-stg-open;COMPILER_COMMIT=''' : "" pipeline { @@ -653,12 +665,28 @@ pipeline { { parallel { - stage("Build CK and run Tests") + stage("Build CK and run Tests on MI100/MI200") { agent{ label rocmnode("gfx908 || gfx90a") } environment{ - setup_args = "${params.COMPILER_VERSION == "ck-9110" ? """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3 -Xclang -mlink-builtin-bitcode -Xclang /opt/rocm/amdgcn/bitcode/oclc_abi_version_400.bc" """ : """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3 " """ }" - execute_args = "${params.COMPILER_VERSION == "ck-9110" ? """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3 -Xclang -mlink-builtin-bitcode -Xclang /opt/rocm/amdgcn/bitcode/oclc_abi_version_400.bc" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ : """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx908,gfx90a;gfx1030" -DCMAKE_CXX_FLAGS="-O3" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ }" + setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908;gfx90a" """ + execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx908,gfx90a" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ + } + steps{ + Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local') + } + } + stage("Build CK and run Tests on Navi") + { + when { + beforeAgent true + expression { !params.RUN_FULL_QA.toBoolean() } + } + agent{ label rocmnode("navi21") } + environment{ + setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx1030" """ + execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx1030" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ + } steps{ Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local') @@ -671,7 +699,7 @@ pipeline { { parallel { - stage("Run ckProfiler: gfx908 or gfx90a") + stage("Run ckProfiler: gfx90*") { when { beforeAgent true @@ -680,7 +708,7 @@ pipeline { options { retry(2) } agent{ label rocmnode("gfx908 || gfx90a")} environment{ - setup_args = "${params.COMPILER_VERSION == "ck-9110" ? """ -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS=" -O3 -Xclang -mlink-builtin-bitcode -Xclang /opt/rocm/amdgcn/bitcode/oclc_abi_version_400.bc" -DBUILD_DEV=On """ : """ -DGPU_TARGETS="gfx908;gfx90a;gfx1030" -DCMAKE_CXX_FLAGS=" -O3 " -DBUILD_DEV=On """}" + setup_args = """ -DGPU_TARGETS="gfx908;gfx90a" -DBUILD_DEV=On """ } steps{ runPerfTest(setup_args:setup_args, config_targets: "ckProfiler", no_reboot:true, build_type: 'Release') @@ -695,7 +723,7 @@ pipeline { options { retry(2) } agent{ label rocmnode("gfx90a")} environment{ - setup_args = "${params.COMPILER_VERSION == "ck-9110" ? """ -DGPU_TARGETS="gfx90a" -DCMAKE_CXX_FLAGS=" -O3 -Xclang -mlink-builtin-bitcode -Xclang /opt/rocm/amdgcn/bitcode/oclc_abi_version_400.bc" -DBUILD_DEV=On """ : """ -DGPU_TARGETS="gfx90a" -DCMAKE_CXX_FLAGS=" -O3 " -DBUILD_DEV=On """}" + setup_args = """ -DGPU_TARGETS="gfx90a" -DBUILD_DEV=On """ } steps{ runPerfTest(setup_args:setup_args, config_targets: "ckProfiler", no_reboot:true, build_type: 'Release') From e4bf6d422e3ffc52932f03658f5dbff8362c0946 Mon Sep 17 00:00:00 2001 From: pmaybank <113125070+pmaybank@users.noreply.github.com> Date: Mon, 6 Mar 2023 17:39:16 +0000 Subject: [PATCH 06/31] Generate output using Doxygen / Breathe (#598) * Modify Doxygen config to pick up include directories recursively * Add DeviceMem struct to API Reference guide * Add classes that are used in Flash Attention kernel * Add a reference and config for generating bibliography Co-authored-by: Philip Maybank --- docs/Doxyfile | 8 ++-- docs/source/API_Reference_Guide.rst | 41 ++++++++++++++++--- docs/source/conf.py | 5 ++- docs/source/refs.bib | 7 ++++ .../gpu/block/blockwise_gemm_xdlops.hpp | 15 ++++--- .../gpu/block/blockwise_softmax.hpp | 10 +++++ ...hread_group_tensor_slice_transfer_v4r1.hpp | 13 ++++-- ...ched_gemm_softmax_gemm_xdl_cshuffle_v1.hpp | 4 ++ .../threadwise_tensor_slice_transfer.hpp | 7 +++- .../ck/library/utility/device_memory.hpp | 4 ++ 10 files changed, 94 insertions(+), 20 deletions(-) create mode 100644 docs/source/refs.bib diff --git a/docs/Doxyfile b/docs/Doxyfile index 958b3b6f44..ca354598b2 100644 --- a/docs/Doxyfile +++ b/docs/Doxyfile @@ -775,8 +775,10 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = ../library/include \ - ../library/include/internal +INPUT = ../include/ck/tensor_operation/gpu/grid \ + ../include/ck/tensor_operation/gpu/block \ + ../include/ck/tensor_operation/gpu/thread \ + ../library/include/ck/library/utility # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -845,7 +847,7 @@ FILE_PATTERNS = *.c \ # be searched for input files as well. # The default value is: NO. -RECURSIVE = NO +RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a diff --git a/docs/source/API_Reference_Guide.rst b/docs/source/API_Reference_Guide.rst index 1ad2ecd9a9..3665049dd6 100644 --- a/docs/source/API_Reference_Guide.rst +++ b/docs/source/API_Reference_Guide.rst @@ -1,11 +1,11 @@ -=================== +******************* API Reference Guide -=================== +******************* ------------- +================= Introduction ------------- +================= This document contains details of the APIs for the Composable Kernel (CK) library and introduces some of the key design principles that are used to write new classes that extend CK functionality. @@ -16,8 +16,37 @@ Using CK API This section describes how to use the CK library API. ------------------ +================= CK Datatypes +================= + +----------------- +DeviceMem ----------------- -[TODO] \ No newline at end of file +.. doxygenstruct:: DeviceMem + +--------------------------- +Kernels For Flashattention +--------------------------- + +The Flashattention algorithm is defined in :cite:t:`dao2022flashattention`. This sections lists the classes that are +used in the CK GPU implementation of Flashattention. + +**Gridwise classes** + +.. doxygenstruct:: ck::GridwiseBatchedGemmSoftmaxGemm_Xdl_CShuffle + +**Blockwise classes** + +.. doxygenstruct:: ck::ThreadGroupTensorSliceTransfer_v4r1 + +.. doxygenstruct:: ck::BlockwiseGemmXdlops_v2 + +.. doxygenstruct:: ck::BlockwiseSoftmax + +**Threadwise classes** + +.. doxygenstruct:: ck::ThreadwiseTensorSliceTransfer_StaticToStatic + +.. bibliography:: \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 8968e2fbe6..65ac187034 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,10 +59,13 @@ if read_the_docs_build: # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.mathjax', 'breathe'] +extensions = ['sphinx.ext.mathjax', 'breathe', 'sphinxcontrib.bibtex'] + breathe_projects = { "CK": "../docBin/xml" } breathe_default_project = "CK" +bibtex_bibfiles = ['refs.bib'] + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/docs/source/refs.bib b/docs/source/refs.bib new file mode 100644 index 0000000000..3c6d775a7e --- /dev/null +++ b/docs/source/refs.bib @@ -0,0 +1,7 @@ + +@article{dao2022flashattention, + title={Flashattention: Fast and memory-efficient exact attention with io-awareness}, + author={Dao, Tri and Fu, Daniel Y and Ermon, Stefano and Rudra, Atri and R{\'e}, Christopher}, + journal={arXiv preprint arXiv:2205.14135}, + year={2022} +} diff --git a/include/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops.hpp b/include/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops.hpp index aeef03d51f..5328dfde9b 100644 --- a/include/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops.hpp +++ b/include/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops.hpp @@ -622,11 +622,16 @@ constexpr auto BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_Selector() } }; -// Blockwise gemm supporting -// 1. regular XDL output M2_M3_M4_M2 and transposed XDL output M2_N2_N3_N4 -// 2. decoupled input tile descriptor and mma tile descriptor in order to support both vgpr and LDS -// source buffer -// 3. configurable k index starting position and step size after each FMA/XDL instruction +/** + * @brief Blockwise gemm + * + * Supports + * 1. regular XDL output M2_M3_M4_M2 and transposed XDL output M2_N2_N3_N4 + * 2. decoupled input tile descriptor and mma tile descriptor in order to support both vgpr and LDS + * source buffer + * 3. configurable k index starting position and step size after each FMA/XDL instruction + */ + template Date: Mon, 6 Mar 2023 15:11:58 -0600 Subject: [PATCH 07/31] Add descriptions to avoid build issues (#619) Co-authored-by: Rosty Geyyer --- .../tensor_operation/gpu/block/blockwise_softmax.hpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/ck/tensor_operation/gpu/block/blockwise_softmax.hpp b/include/ck/tensor_operation/gpu/block/blockwise_softmax.hpp index d9f3bc7b43..7e62a822a8 100644 --- a/include/ck/tensor_operation/gpu/block/blockwise_softmax.hpp +++ b/include/ck/tensor_operation/gpu/block/blockwise_softmax.hpp @@ -15,12 +15,12 @@ namespace ck { /** * @brief Blockwise softmax * - * @tparam BlockSize - * @tparam AccDataType - * @tparam ThreadMap_M_K - * @tparam ThreadClusterDesc_M_K - * @tparam ThreadSliceDesc_M_K - * @tparam IgnoreNaN + * @tparam BlockSize Block size + * @tparam AccDataType Accumulator data type + * @tparam ThreadMap_M_K Thread id to m_k + * @tparam ThreadClusterDesc_M_K Threadwise cluster descriptor + * @tparam ThreadSliceDesc_M_K Threadwise slices descriptor + * @tparam IgnoreNaN Flag to ignore NaN, false by default */ template Date: Wed, 8 Mar 2023 05:06:56 +0100 Subject: [PATCH 08/31] GroupedGEMM + Gelu client example/instances/profiler (#614) * Grouped gemm + Gelu instances. * Device Instance Factory for GroupedGemm+Gelu * Client example * Rangify fill helper functions. * Fix name clash. * Profiler for grouped_gemm+gelu * No need to use full namespace name. * Add check for MRaw divisible by vector load. * Ugly fix for big errors. * Add grouped_gemm+gelu to profiler CMakelists. * Store in argument additional info. * Information about Mraw, Nraw, Kraw values. * Use FastGelu instead of Gelu. * Change client ex to use FastGelu * Remove relaxed error precision. * Remove duplicate output elementwise-op --------- Co-authored-by: Adam Osewski Co-authored-by: zjing14 --- .../17_grouped_gemm_fastgelu/CMakeLists.txt | 2 + .../grouped_gemm_fastgelu.cpp | 232 +++++++++++++++ .../device/impl/device_grouped_gemm_xdl.hpp | 31 +- .../device_operation_instance_factory.hpp | 1 + .../gpu/grouped_gemm.hpp | 29 +- .../gpu/grouped_gemm_fastgelu.hpp | 136 +++++++++ library/include/ck/library/utility/fill.hpp | 18 ++ .../gpu/grouped_gemm_fastgelu/CMakeLists.txt | 6 + ...gelu_xdl_f16_f16_f16_km_kn_mn_instance.cpp | 104 +++++++ ...gelu_xdl_f16_f16_f16_km_nk_mn_instance.cpp | 104 +++++++ ...gelu_xdl_f16_f16_f16_mk_kn_mn_instance.cpp | 103 +++++++ ...gelu_xdl_f16_f16_f16_mk_nk_mn_instance.cpp | 104 +++++++ .../profile_grouped_gemm_fastgelu_impl.hpp | 280 ++++++++++++++++++ .../profiler/profile_grouped_gemm_impl.hpp | 7 +- profiler/src/CMakeLists.txt | 2 + profiler/src/profile_grouped_gemm.cpp | 10 +- .../src/profile_grouped_gemm_fastgelu.cpp | 177 +++++++++++ 17 files changed, 1320 insertions(+), 26 deletions(-) create mode 100644 client_example/17_grouped_gemm_fastgelu/CMakeLists.txt create mode 100644 client_example/17_grouped_gemm_fastgelu/grouped_gemm_fastgelu.cpp create mode 100644 library/include/ck/library/tensor_operation_instance/gpu/grouped_gemm_fastgelu.hpp create mode 100644 library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/CMakeLists.txt create mode 100644 library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instance.cpp create mode 100644 profiler/include/profiler/profile_grouped_gemm_fastgelu_impl.hpp create mode 100644 profiler/src/profile_grouped_gemm_fastgelu.cpp diff --git a/client_example/17_grouped_gemm_fastgelu/CMakeLists.txt b/client_example/17_grouped_gemm_fastgelu/CMakeLists.txt new file mode 100644 index 0000000000..659e6769d8 --- /dev/null +++ b/client_example/17_grouped_gemm_fastgelu/CMakeLists.txt @@ -0,0 +1,2 @@ +add_executable(client_grouped_gemm_fastgelu grouped_gemm_fastgelu.cpp) +target_link_libraries(client_grouped_gemm_fastgelu PRIVATE composable_kernel::device_operations) \ No newline at end of file diff --git a/client_example/17_grouped_gemm_fastgelu/grouped_gemm_fastgelu.cpp b/client_example/17_grouped_gemm_fastgelu/grouped_gemm_fastgelu.cpp new file mode 100644 index 0000000000..223ed29be9 --- /dev/null +++ b/client_example/17_grouped_gemm_fastgelu/grouped_gemm_fastgelu.cpp @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_gemm.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/gpu/grouped_gemm_fastgelu.hpp" + +using F16 = ck::half_t; +using F32 = float; + +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using FastGelu = ck::tensor_operation::element_wise::FastGelu; + +using ADataType = F16; +using BDataType = F16; +using DsDataType = ck::Tuple<>; +using EDataType = F16; + +using ALayout = Row; +using BLayout = Col; +using DsLayout = ck::Tuple<>; +using ELayout = Row; + +using AElementOp = PassThrough; +using BElementOp = PassThrough; +using CDEElementOp = FastGelu; + +struct SimpleDeviceMem +{ + SimpleDeviceMem() = delete; + + SimpleDeviceMem(std::size_t mem_size) : p_mem_{} + { + (void)hipMalloc(static_cast(&p_mem_), mem_size); + } + + void* GetDeviceBuffer() { return p_mem_; } + + ~SimpleDeviceMem() { (void)hipFree(p_mem_); } + + void* p_mem_; +}; + +int main() +{ + std::mt19937 gen(19391); + std::uniform_int_distribution<> distrib(1, 10); + int group_count = distrib(gen); + + std::vector Ms, Ns, Ks, StrideAs, StrideBs, StrideEs; + + for(int i = 0; i < group_count; ++i) + { + Ms.push_back(256 + 256 * distrib(gen)); + Ns.push_back(256 + 256 * distrib(gen)); + Ks.push_back(128 + 128 * distrib(gen)); + + StrideAs.push_back(std::is_same::value ? Ks[i] : Ms[i]); + StrideBs.push_back(std::is_same::value ? Ns[i] : Ks[i]); + StrideEs.push_back(std::is_same::value ? Ns[i] : Ms[i]); + } + + auto f_matrix_space_size = + [](std::size_t nRow, std::size_t nCol, std::size_t stride, auto layout) { + using Layout = decltype(layout); + + if constexpr(std::is_same::value) + { + return (nRow - 1) * stride + nCol; + } + else + { + return (nCol - 1) * stride + nRow; + } + }; + + std::vector a_dev_bufs, b_dev_bufs, e_dev_bufs; + + a_dev_bufs.reserve(group_count); + b_dev_bufs.reserve(group_count); + e_dev_bufs.reserve(group_count); + + std::vector p_a, p_b; + std::vector p_e; + + p_a.reserve(group_count); + p_b.reserve(group_count); + p_e.reserve(group_count); + + std::vector gemm_descs; + + gemm_descs.reserve(group_count); + + for(int i = 0; i < group_count; ++i) + { + a_dev_bufs.emplace_back(sizeof(ADataType) * + f_matrix_space_size(Ms[i], Ks[i], StrideAs[i], ALayout{})); + b_dev_bufs.emplace_back(sizeof(BDataType) * + f_matrix_space_size(Ks[i], Ns[i], StrideBs[i], BLayout{})); + e_dev_bufs.emplace_back(sizeof(EDataType) * + f_matrix_space_size(Ms[i], Ns[i], StrideEs[i], ELayout{})); + + gemm_descs.push_back({Ms[i], Ns[i], Ks[i], StrideAs[i], StrideBs[i], StrideEs[i], {}}); + + p_a.push_back(a_dev_bufs[i].GetDeviceBuffer()); + p_b.push_back(b_dev_bufs[i].GetDeviceBuffer()); + p_e.push_back(e_dev_bufs[i].GetDeviceBuffer()); + } + + using DeviceOp = ck::tensor_operation::device::DeviceGroupedGemm; + + // get device op instances + const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory< + DeviceOp>::GetInstances(); + + std::cout << "found " << op_ptrs.size() << " instances" << std::endl; + + const auto a_element_op = AElementOp{}; + const auto b_element_op = BElementOp{}; + const auto cde_element_op = CDEElementOp{}; + + std::string best_op_name; + bool found = false; + int best_op_id = -1; + float best_ave_time = 0; + float best_tflops = 0; + float best_gb_per_sec = 0; + + auto p_ds = std::vector>{}; + + // profile device operation instances + std::cout << "Run all instances and do timing" << std::endl; + + for(int i = 0; i < op_ptrs.size(); ++i) + { + auto& op_ptr = op_ptrs[i]; + + auto argument_ptr = op_ptr->MakeArgumentPointer( + p_a, p_b, p_ds, p_e, gemm_descs, a_element_op, b_element_op, cde_element_op); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + + SimpleDeviceMem gemm_desc_workspace(op_ptr->GetWorkSpaceSize(argument_ptr.get())); + op_ptr->SetWorkSpacePointer(argument_ptr.get(), gemm_desc_workspace.GetDeviceBuffer()); + std::string op_name = op_ptr->GetTypeString(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + + float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); + + std::size_t flop = 0, num_btype = 0; + for(std::size_t j = 0; j < gemm_descs.size(); ++j) + { + flop += std::size_t(2) * Ms[j] * Ns[j] * Ks[j]; + + num_btype += sizeof(ADataType) * Ms[j] * Ks[j] + sizeof(BDataType) * Ks[j] * Ns[j] + + sizeof(EDataType) * Ms[j] * Ns[j]; + } + + float tflops = static_cast(flop) / 1.E9 / ave_time; + float gb_per_sec = num_btype / 1.E6 / ave_time; + + std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, " + << gb_per_sec << " GB/s, " << op_name << std::endl; + + if(tflops > best_tflops) + { + found = true; + best_op_id = i; + best_op_name = op_name; + best_tflops = tflops; + best_ave_time = ave_time; + best_gb_per_sec = gb_per_sec; + } + } + else + { + std::cout << op_name << " does not support this problem" << std::endl; + } + } + + std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, " + << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + + // run the best intance + if(found) + { + auto& op_ptr = op_ptrs[best_op_id]; + + std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() + << std::endl; + + auto argument_ptr = op_ptr->MakeArgumentPointer( + p_a, p_b, p_ds, p_e, gemm_descs, a_element_op, b_element_op, cde_element_op); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + + SimpleDeviceMem gemm_desc_workspace(op_ptr->GetWorkSpaceSize(argument_ptr.get())); + op_ptr->SetWorkSpacePointer(argument_ptr.get(), gemm_desc_workspace.GetDeviceBuffer()); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false}); + } + + std::cout << "Done" << std::endl; + } + + return 0; +} diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp index 07009a9e38..88750778cf 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp @@ -381,6 +381,9 @@ struct DeviceGroupedGemm_Xdl : public DeviceGroupedGemm gemm_desc_kernel_arg_; + std::vector> a_mtx_mraw_kraw_; + std::vector> b_mtx_nraw_kraw_; index_t grid_size_; }; @@ -599,7 +604,28 @@ struct DeviceGroupedGemm_Xdl : public DeviceGroupedGemm{}); + const auto b_vector_dim = arg.b_mtx_nraw_kraw_[i].At(Number{}); + + supported = supported & (a_vector_dim % ABlockTransferSrcScalarPerVector == 0); + supported = supported & (b_vector_dim % BBlockTransferSrcScalarPerVector == 0); + } + } + + return supported; } // polymorphic @@ -661,7 +687,8 @@ struct DeviceGroupedGemm_Xdl : public DeviceGroupedGemm"; // clang-format on diff --git a/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp b/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp index 6210637ad3..104b21a3ec 100644 --- a/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp +++ b/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp @@ -93,6 +93,7 @@ using AddReluAdd = ck::tensor_operation::element_wise::AddReluAdd; using FastGelu = ck::tensor_operation::element_wise::FastGelu; using AddMultiply = ck::tensor_operation::element_wise::AddMultiply; using ScaleAdd = ck::tensor_operation::element_wise::ScaleAdd; +using Gelu = ck::tensor_operation::element_wise::Gelu; template using Activation_Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; diff --git a/library/include/ck/library/tensor_operation_instance/gpu/grouped_gemm.hpp b/library/include/ck/library/tensor_operation_instance/gpu/grouped_gemm.hpp index c64598dadd..fa7462dbd9 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/grouped_gemm.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/grouped_gemm.hpp @@ -74,18 +74,17 @@ template -struct DeviceOperationInstanceFactory> +struct DeviceOperationInstanceFactory> { using DeviceOp = DeviceGroupedGemm; + PassThrough, + PassThrough, + PassThrough>; static auto GetInstances() { diff --git a/library/include/ck/library/tensor_operation_instance/gpu/grouped_gemm_fastgelu.hpp b/library/include/ck/library/tensor_operation_instance/gpu/grouped_gemm_fastgelu.hpp new file mode 100644 index 0000000000..eb181ebc7f --- /dev/null +++ b/library/include/ck/library/tensor_operation_instance/gpu/grouped_gemm_fastgelu.hpp @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include +#include + +#include "ck/ck.hpp" +#include "ck/library/tensor_operation_instance/device_operation_instance_factory.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_gemm.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +void add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instances( + std::vector>>& instances); + +void add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instances( + std::vector>>& instances); + +void add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instances( + std::vector>>& instances); + +void add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instances( + std::vector>>& instances); + +// GroupedGEMM + GELU +template +struct DeviceOperationInstanceFactory> +{ + using DeviceOp = DeviceGroupedGemm; + + static auto GetInstances() + { + std::vector> op_ptrs; + + if constexpr(is_same_v && is_same_v && + is_same_v) + { + if constexpr(is_same_v && is_same_v && + is_same_v) + { + add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instances(op_ptrs); + } + else if constexpr(is_same_v && is_same_v && + is_same_v) + { + add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instances(op_ptrs); + } + else if constexpr(is_same_v && is_same_v && + is_same_v) + { + add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instances(op_ptrs); + } + else if constexpr(is_same_v && is_same_v && + is_same_v) + { + add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instances(op_ptrs); + } + } + return op_ptrs; + } +}; + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/include/ck/library/utility/fill.hpp b/library/include/ck/library/utility/fill.hpp index 54d58f362c..c0bc372764 100644 --- a/library/include/ck/library/utility/fill.hpp +++ b/library/include/ck/library/utility/fill.hpp @@ -100,6 +100,15 @@ struct FillMonotonicSeq return tmp; }); } + + template + auto operator()(ForwardRange&& range) const -> std::void_t()(std::begin(std::forward(range)), + std::end(std::forward(range))))> + { + (*this)(std::begin(std::forward(range)), + std::end(std::forward(range))); + } }; template @@ -112,6 +121,15 @@ struct FillConstant { std::fill(first, last, value_); } + + template + auto operator()(ForwardRange&& range) const -> std::void_t< + decltype(std::declval()(std::begin(std::forward(range)), + std::end(std::forward(range))))> + { + (*this)(std::begin(std::forward(range)), + std::end(std::forward(range))); + } }; } // namespace utils diff --git a/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/CMakeLists.txt b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/CMakeLists.txt new file mode 100644 index 0000000000..648f2146cb --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/CMakeLists.txt @@ -0,0 +1,6 @@ +add_instance_library(device_grouped_gemm_fastgelu_instance + device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instance.cpp + device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instance.cpp + device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instance.cpp + device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instance.cpp +) diff --git a/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instance.cpp new file mode 100644 index 0000000000..c2f5f00c7a --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instance.cpp @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F16 = ck::half_t; +using F32 = float; + +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +template +using S = ck::Sequence; + +using Empty_Tuple = ck::Tuple<>; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using FastGelu = ck::tensor_operation::element_wise::FastGelu; + +static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; + +// a[k, m] * b[k, n] = e[m, n] +using device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instances = std::tuple< + // clang-format off + //###################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //###################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //###################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //###################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 256, 128, 32, 2, 2, 32, 32, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 256, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 256, 32, 2, 2, 32, 32, 2, 4, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 256, 32, 8, 8, 32, 32, 2, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 128, 32, 2, 2, 32, 32, 4, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 128, 32, 2, 2, 32, 32, 2, 2, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 64, 32, 2, 2, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 64, 128, 32, 2, 2, 32, 32, 2, 2, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 64, 32, 2, 2, 32, 32, 2, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 64, 128, 32, 2, 2, 32, 32, 1, 2, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 8>, 8> + // clang-format on + >; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +using device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_irregular_tile_instances = std::tuple< + // clang-format off + //###################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //###################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //###################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //###################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 64, 16, 16, 32, 8, 8, 16, 16, 1, 1, S<4, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, S<4, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, 1, 1, S<1, 16, 1, 4>, 1>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 16, 64, 32, 8, 8, 16, 16, 1, 1, S<4, 16, 4>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 2, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, 1, 1, S<1, 16, 1, 4>, 1>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 64, 32, 2, 2, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 64, 128, 32, 2, 2, 32, 32, 2, 2, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 64, 32, 2, 2, 32, 32, 2, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 64, 128, 32, 2, 2, 32, 32, 1, 2, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 8>, 8> + // clang-format on + >; + +void add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_instances{}); + add_device_operation_instances( + instances, + device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_kn_mn_irregular_tile_instances{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instance.cpp new file mode 100644 index 0000000000..476d4ce1f8 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instance.cpp @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F16 = ck::half_t; +using F32 = float; + +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +template +using S = ck::Sequence; + +using Empty_Tuple = ck::Tuple<>; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using FastGelu = ck::tensor_operation::element_wise::FastGelu; + +static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; + +// a[k, m] * b[n, k] = e[m, n] +using device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instances = std::tuple< + // clang-format off + //###################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //###################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //###################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //###################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 256, 128, 32, 2, 8, 32, 32, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 256, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 256, 32, 2, 8, 32, 32, 2, 4, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 256, 32, 8, 8, 32, 32, 2, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 128, 32, 2, 8, 32, 32, 4, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 128, 32, 2, 8, 32, 32, 2, 2, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 64, 32, 2, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 64, 128, 32, 2, 8, 32, 32, 2, 2, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 64, 32, 2, 8, 32, 32, 2, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 64, 128, 32, 2, 8, 32, 32, 1, 2, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8> + // clang-format on + >; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +using device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_irregular_tile_instances = std::tuple< + // clang-format off + //###################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //###################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //###################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //###################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 64, 16, 16, 32, 8, 8, 16, 16, 1, 1, S<4, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 8, 1, 1, 1, S<1, 16, 1, 4>, 1>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 16, 64, 32, 8, 8, 16, 16, 1, 1, S<4, 16, 4>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 8, 1, 1, 1, S<1, 16, 1, 4>, 1>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 64, 32, 2, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 64, 128, 32, 2, 8, 32, 32, 2, 2, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 64, 32, 2, 8, 32, 32, 2, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 64, 128, 32, 2, 8, 32, 32, 1, 2, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Col, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8> + // clang-format on + >; + +void add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_instances{}); + add_device_operation_instances( + instances, + device_grouped_gemm_fastgelu_xdl_f16_f16_f16_km_nk_mn_irregular_tile_instances{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instance.cpp new file mode 100644 index 0000000000..1023fa4810 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instance.cpp @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F16 = ck::half_t; +using F32 = float; + +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +template +using S = ck::Sequence; + +using Empty_Tuple = ck::Tuple<>; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using FastGelu = ck::tensor_operation::element_wise::FastGelu; + +static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; + +// a[m, k] * b[k, n] = e[m, n] +using device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instances = std::tuple< + // clang-format off + //###################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //###################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //###################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //###################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 256, 128, 32, 8, 2, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 256, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 256, 32, 8, 2, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 256, 32, 8, 8, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 128, 32, 8, 2, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 128, 32, 8, 2, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 64, 32, 8, 2, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 64, 128, 32, 8, 2, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 64, 32, 8, 2, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 64, 128, 32, 8, 2, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 8>, 8> + // clang-format on + >; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +using device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_irregular_tile_instances = std::tuple< + // clang-format off + //###################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //###################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //###################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //###################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 64, 32, 8, 2, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 64, 128, 32, 8, 2, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 64, 32, 8, 2, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 64, 128, 32, 8, 2, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, 0, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Row, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 8, 1, 1, 1, S<1, 16, 1, 8>, 8> + // clang-format on + >; + +void add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_instances{}); + add_device_operation_instances( + instances, + device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_kn_mn_irregular_tile_instances{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instance.cpp new file mode 100644 index 0000000000..6b065c0f82 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/grouped_gemm_fastgelu/device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instance.cpp @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F16 = ck::half_t; +using F32 = float; + +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +template +using S = ck::Sequence; + +using Empty_Tuple = ck::Tuple<>; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using FastGelu = ck::tensor_operation::element_wise::FastGelu; + +static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; + +// a[m, k] * b[n, k] = e[m, n] +using device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instances = std::tuple< + // clang-format off + //###################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //###################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //###################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //###################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 256, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 256, 32, 8, 8, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 64, 64, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 128, 32, 32, 8, 8, 32, 32, 2, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 128, 32, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 64, 64, 32, 32, 8, 8, 32, 32, 2, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmDefault, 1, 64, 32, 64, 32, 8, 8, 32, 32, 1, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8> + // clang-format on + >; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +using device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_irregular_tile_instances = std::tuple< + // clang-format off + //###################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //###################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //###################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //###################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 256, 32, 8, 8, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 128, 64, 32, 8, 8, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 64, 128, 32, 8, 8, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 128, 32, 32, 8, 8, 32, 32, 2, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 32, 128, 32, 8, 8, 32, 32, 1, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 128, 32, 256, 32, 8, 8, 32, 32, 1, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 64, 64, 64, 32, 8, 8, 32, 32, 2, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 64, 64, 32, 32, 8, 8, 32, 32, 2, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8>, + DeviceGroupedGemm_Xdl< Row, Col, Empty_Tuple, Row, F16, F16, F32, F16, Empty_Tuple, F16, PassThrough, PassThrough, FastGelu, GemmMNKPadding, 1, 64, 32, 64, 32, 8, 8, 32, 32, 1, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 8> + // clang-format on + >; + +void add_device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_instances{}); + add_device_operation_instances( + instances, + device_grouped_gemm_fastgelu_xdl_f16_f16_f16_mk_nk_mn_irregular_tile_instances{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/profiler/include/profiler/profile_grouped_gemm_fastgelu_impl.hpp b/profiler/include/profiler/profile_grouped_gemm_fastgelu_impl.hpp new file mode 100644 index 0000000000..87e6ae44c7 --- /dev/null +++ b/profiler/include/profiler/profile_grouped_gemm_fastgelu_impl.hpp @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include + +#include "ck/ck.hpp" +#include "ck/library/tensor_operation_instance/gpu/grouped_gemm_fastgelu.hpp" +#include "ck/library/utility/check_err.hpp" +#include "ck/library/utility/device_memory.hpp" +#include "ck/library/utility/host_tensor.hpp" +#include "ck/library/utility/fill.hpp" +#include "ck/library/utility/literals.hpp" +#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_gemm.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +namespace ck { +namespace profiler { + +template +bool profile_grouped_gemm_fastgelu_impl(int do_verification, + int init_method, + bool do_log, + bool time_kernel, + const std::vector& Ms, + const std::vector& Ns, + const std::vector& Ks, + const std::vector& StrideAs, + const std::vector& StrideBs, + const std::vector& StrideCs) +{ + + bool pass = true; + + auto f_host_tensor_descriptor = + [](std::size_t row, std::size_t col, std::size_t stride, auto layout) { + using namespace ck::literals; + + if(is_same::value) + { + return HostTensorDescriptor({row, col}, {stride, 1_uz}); + } + else + { + return HostTensorDescriptor({row, col}, {1_uz, stride}); + } + }; + + std::size_t group_count = Ms.size(); + + if(!(group_count == Ns.size() && group_count == Ks.size() && group_count == StrideAs.size() && + group_count == StrideBs.size() && group_count == StrideCs.size())) + { + throw std::runtime_error("wrong! inconsistent M/N/Ks, StrideA/B/Cs size\n"); + } + + std::vector> a_m_k; + std::vector> b_k_n; + std::vector> c_m_n_device_results; + + for(std::size_t i = 0; i < group_count; i++) + { + a_m_k.push_back( + Tensor(f_host_tensor_descriptor(Ms[i], Ks[i], StrideAs[i], ALayout{}))); + b_k_n.push_back( + Tensor(f_host_tensor_descriptor(Ks[i], Ns[i], StrideBs[i], BLayout{}))); + + c_m_n_device_results.push_back( + Tensor(f_host_tensor_descriptor(Ms[i], Ns[i], StrideCs[i], CLayout{}))); + + std::cout << "group: " << i << " a_m_k[" << i << "]:" << a_m_k[i].mDesc << ", b_k_n[" << i + << "]:" << b_k_n[i].mDesc << ", c_m_n_device_results[" << i + << "]:" << c_m_n_device_results[i].mDesc << std::endl; + + switch(init_method) + { + case 0: break; + case 1: + ck::utils::FillUniformDistributionIntegerValue{}(a_m_k[i]); + ck::utils::FillUniformDistributionIntegerValue{}(b_k_n[i]); + break; + default: + ck::utils::FillUniformDistribution{0.0, 1.0}(a_m_k[i]); + ck::utils::FillUniformDistribution{-0.5, 0.5}(b_k_n[i]); + } + + ck::utils::FillConstant{}(c_m_n_device_results[i]); + } + + using AElementOp = ck::tensor_operation::element_wise::PassThrough; + using BElementOp = ck::tensor_operation::element_wise::PassThrough; + using CElementOp = ck::tensor_operation::element_wise::FastGelu; + + const auto a_element_op = AElementOp{}; + const auto b_element_op = BElementOp{}; + const auto c_element_op = CElementOp{}; + + using DeviceMemPtr = std::unique_ptr; + std::vector a_device_buf, b_device_buf, c_device_buf; + + a_device_buf.reserve(group_count); + b_device_buf.reserve(group_count); + c_device_buf.reserve(group_count); + + std::vector p_a, p_b; + std::vector p_c; + + p_a.reserve(group_count); + p_b.reserve(group_count); + p_c.reserve(group_count); + + std::vector gemm_descs; + + gemm_descs.reserve(group_count); + + for(std::size_t i = 0; i < group_count; i++) + { + a_device_buf.emplace_back( + std::make_unique(sizeof(ADataType) * a_m_k[i].mDesc.GetElementSpaceSize())); + b_device_buf.emplace_back( + std::make_unique(sizeof(BDataType) * b_k_n[i].mDesc.GetElementSpaceSize())); + c_device_buf.emplace_back(std::make_unique( + sizeof(CDataType) * c_m_n_device_results[i].mDesc.GetElementSpaceSize())); + + a_device_buf[i]->ToDevice(a_m_k[i].mData.data()); + b_device_buf[i]->ToDevice(b_k_n[i].mData.data()); + c_device_buf[i]->SetZero(); + + gemm_descs.push_back({Ms[i], Ns[i], Ks[i], StrideAs[i], StrideBs[i], StrideCs[i], {}}); + + p_a.push_back(a_device_buf[i]->GetDeviceBuffer()); + p_b.push_back(b_device_buf[i]->GetDeviceBuffer()); + p_c.push_back(c_device_buf[i]->GetDeviceBuffer()); + } + + using DeviceOp = ck::tensor_operation::device::DeviceGroupedGemm, + CLayout, + ADataType, + BDataType, + ck::Tuple<>, + CDataType, + AElementOp, + BElementOp, + CElementOp>; + + const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory< + DeviceOp>::GetInstances(); + + if(op_ptrs.size() <= 0) + { + throw std::runtime_error("wrong! no device GEMM instance found"); + } + + std::string best_gemm_name; + float best_ave_time = 0; + float best_tflops = 0; + float best_gb_per_sec = 0; + + auto p_ds = std::vector>{}; + + // profile device GEMM instances + for(auto& gemm_ptr : op_ptrs) + { + auto argument_ptr = gemm_ptr->MakeArgumentPointer( + p_a, p_b, p_ds, p_c, gemm_descs, a_element_op, b_element_op, c_element_op); + + auto invoker_ptr = gemm_ptr->MakeInvokerPointer(); + DeviceMem gemm_desc_workspace(gemm_ptr->GetWorkSpaceSize(argument_ptr.get())); + gemm_ptr->SetWorkSpacePointer(argument_ptr.get(), gemm_desc_workspace.GetDeviceBuffer()); + + if(gemm_ptr->IsSupportedArgument(argument_ptr.get())) + { + std::string gemm_name = gemm_ptr->GetTypeString(); + + float ave_time = + invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel}); + + std::size_t flop = 0, num_btype = 0; + for(std::size_t i = 0; i < gemm_descs.size(); i++) + { + flop += std::size_t(2) * Ms[i] * Ns[i] * Ks[i]; + num_btype += sizeof(ADataType) * Ms[i] * Ks[i] + sizeof(BDataType) * Ks[i] * Ns[i] + + sizeof(CDataType) * Ms[i] * Ns[i]; + } + + float tflops = static_cast(flop) / 1.E9 / ave_time; + + float gb_per_sec = num_btype / 1.E6 / ave_time; + std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, " + << gb_per_sec << " GB/s, " << gemm_name << std::endl; + + if(tflops > best_tflops) + { + best_gemm_name = gemm_name; + best_tflops = tflops; + best_ave_time = ave_time; + best_gb_per_sec = gb_per_sec; + } + + if(do_verification) + { + for(std::size_t i = 0; i < gemm_descs.size(); i++) + { + + c_device_buf[i]->FromDevice(c_m_n_device_results[i].mData.data()); + Tensor c_m_n_host_result( + f_host_tensor_descriptor(Ms[i], Ns[i], StrideCs[i], CLayout{})); + + using ReferenceGemmInstance = + ck::tensor_operation::host::ReferenceGemm; + + auto ref_gemm = ReferenceGemmInstance{}; + auto ref_invoker = ref_gemm.MakeInvoker(); + auto ref_argument = ref_gemm.MakeArgument(a_m_k[i], + b_k_n[i], + c_m_n_host_result, + a_element_op, + b_element_op, + c_element_op); + + ref_invoker.Run(ref_argument); + + bool group_pass = + ck::utils::check_err(c_m_n_device_results[i], c_m_n_host_result); + pass = pass && group_pass; + + std::cout << "group: " << i << " verification result: " << std::boolalpha + << group_pass << std::endl; + + if(do_log) + { + LogRangeAsType(std::cout << "a : ", a_m_k[i].mData, ",") + << std::endl; + LogRangeAsType(std::cout << "b: ", b_k_n[i].mData, ",") << std::endl; + LogRangeAsType( + std::cout << "c_device: ", c_m_n_device_results[i].mData, ",") + << std::endl; + LogRangeAsType( + std::cout << "c_host : ", c_m_n_host_result.mData, ",") + << std::endl; + } + } + } + } + else + { + std::cout << "does not support this GEMM problem" << std::endl; + } + } + + if(do_verification) + { + std::cout << "Verification: " << (pass ? "SUCCESS" : "FAILURE") << std::endl; + } + + std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, " + << best_gb_per_sec << " GB/s, " << best_gemm_name << std::endl; + + return pass; +} + +} // namespace profiler +} // namespace ck diff --git a/profiler/include/profiler/profile_grouped_gemm_impl.hpp b/profiler/include/profiler/profile_grouped_gemm_impl.hpp index 04f94a0f24..2c28850e65 100644 --- a/profiler/include/profiler/profile_grouped_gemm_impl.hpp +++ b/profiler/include/profiler/profile_grouped_gemm_impl.hpp @@ -108,11 +108,6 @@ bool profile_grouped_gemm_impl(int do_verification, const auto b_element_op = BElementOp{}; const auto c_element_op = CElementOp{}; - // if(do_verification) - // { - - // } - using DeviceMemPtr = std::unique_ptr; std::vector a_device_buf, b_device_buf, c_device_buf; @@ -285,7 +280,7 @@ bool profile_grouped_gemm_impl(int do_verification, << best_gb_per_sec << " GB/s, " << best_gemm_name << std::endl; return pass; -} // namespace profiler +} } // namespace profiler } // namespace ck diff --git a/profiler/src/CMakeLists.txt b/profiler/src/CMakeLists.txt index d3ab88a167..c21fff7de9 100644 --- a/profiler/src/CMakeLists.txt +++ b/profiler/src/CMakeLists.txt @@ -29,6 +29,7 @@ set(PROFILER_SOURCES profile_batchnorm_fwd.cpp profile_batchnorm_bwd.cpp profile_batchnorm_infer.cpp + profile_grouped_gemm_fastgelu.cpp ) set(PROFILER_EXECUTABLE ckProfiler) @@ -68,4 +69,5 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_normalization_instan target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_softmax_instance) target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_reduce_instance) target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batchnorm_instance) +target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_gemm_fastgelu_instance) rocm_install(TARGETS ${PROFILER_EXECUTABLE} COMPONENT profiler) diff --git a/profiler/src/profile_grouped_gemm.cpp b/profiler/src/profile_grouped_gemm.cpp index 65e24bd9cc..871b2edfd5 100644 --- a/profiler/src/profile_grouped_gemm.cpp +++ b/profiler/src/profile_grouped_gemm.cpp @@ -29,6 +29,11 @@ enum struct GemmDataType INT8_INT8_INT8, // 3 }; +#define OP_NAME "grouped_gemm" +#define OP_DESC "Grouped GEMM" + +namespace { + std::vector argToIntArray(char* input) { std::vector out; @@ -45,9 +50,6 @@ std::vector argToIntArray(char* input) return out; } -#define OP_NAME "grouped_gemm" -#define OP_DESC "Grouped GEMM" - int profile_grouped_gemm(int argc, char* argv[]) { if(!(argc == 14)) @@ -166,4 +168,6 @@ int profile_grouped_gemm(int argc, char* argv[]) return 0; } +} // anonymous namespace + REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_grouped_gemm); diff --git a/profiler/src/profile_grouped_gemm_fastgelu.cpp b/profiler/src/profile_grouped_gemm_fastgelu.cpp new file mode 100644 index 0000000000..9b6142f015 --- /dev/null +++ b/profiler/src/profile_grouped_gemm_fastgelu.cpp @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include +#include + +#include "profiler/profile_grouped_gemm_fastgelu_impl.hpp" +#include "profiler_operation_registry.hpp" + +enum struct GemmMatrixLayout +{ + MK_KN_MN, // 0 + MK_NK_MN, // 1 + KM_KN_MN, // 2 + KM_NK_MN, // 3 + MK_KN_NM, // 4 + MK_NK_NM, // 5 + KM_KN_NM, // 6 + KM_NK_NM, // 7 +}; + +enum struct GemmDataType +{ + F32_F32_F32, // 0 + F16_F16_F16, // 1 + BF16_BF16_BF16, // 2 + INT8_INT8_INT8, // 3 +}; + +#define OP_NAME "grouped_gemm_fastgelu" +#define OP_DESC "Grouped GEMM+FastGelu" + +namespace { + +std::vector argToIntArray(char* input) +{ + std::vector out; + + std::istringstream in(input); + + std::string item; + + while(std::getline(in, item, ',')) + { + out.push_back(std::stoi(item)); + } + + return out; +} + +int profile_grouped_gemm_fastgelu(int argc, char* argv[]) +{ + if(!(argc == 14)) + { + printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"); + printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n"); + printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); + printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); + printf(" 2: A[k, m] * B[k, n] = C[m, n];\n"); + printf(" 3: A[k, m] * B[n, k] = C[m, n])\n"); + printf("arg4: verification (0: no; 1: yes)\n"); + printf("arg5: initialization (0: no init; 1: integer value; 2: decimal value)\n"); + printf("arg6: print tensor value (0: no; 1: yes)\n"); + printf("arg7: time kernel (0=n0, 1=yes)\n"); + printf("arg8 to 13: Ms, Ns, Ks, StrideAs, StrideBs, StrideCs (e.g., 256,256 128,128 64,64 " + "64,64 64,64 128,128)\n"); + exit(1); + } + + const auto data_type = static_cast(std::stoi(argv[2])); + const auto layout = static_cast(std::stoi(argv[3])); + const bool do_verification = std::stoi(argv[4]); + const int init_method = std::stoi(argv[5]); + const bool do_log = std::stoi(argv[6]); + const bool time_kernel = std::stoi(argv[7]); + + const auto Ms = argToIntArray(argv[8]); + const auto Ns = argToIntArray(argv[9]); + const auto Ks = argToIntArray(argv[10]); + + const auto StrideAs = argToIntArray(argv[11]); + const auto StrideBs = argToIntArray(argv[12]); + const auto StrideCs = argToIntArray(argv[13]); + + if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_KN_MN) + { + ck::profiler::profile_grouped_gemm_fastgelu_impl( + do_verification, + init_method, + do_log, + time_kernel, + Ms, + Ns, + Ks, + StrideAs, + StrideBs, + StrideCs); + } + else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::MK_NK_MN) + { + ck::profiler::profile_grouped_gemm_fastgelu_impl( + do_verification, + init_method, + do_log, + time_kernel, + Ms, + Ns, + Ks, + StrideAs, + StrideBs, + StrideCs); + } + else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::KM_KN_MN) + { + ck::profiler::profile_grouped_gemm_fastgelu_impl( + do_verification, + init_method, + do_log, + time_kernel, + Ms, + Ns, + Ks, + StrideAs, + StrideBs, + StrideCs); + } + else if(data_type == GemmDataType::F16_F16_F16 && layout == GemmMatrixLayout::KM_NK_MN) + { + ck::profiler::profile_grouped_gemm_fastgelu_impl( + do_verification, + init_method, + do_log, + time_kernel, + Ms, + Ns, + Ks, + StrideAs, + StrideBs, + StrideCs); + } + else + { + throw std::runtime_error("wrong! this GEMM data_type & layout is not implemented"); + } + + return 0; +} + +} // anonymous namespace + +REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_grouped_gemm_fastgelu); From 0ccecc7c31e213b7003031fee30f0930fa1c48b0 Mon Sep 17 00:00:00 2001 From: Illia Silin <98187287+illsilin@users.noreply.github.com> Date: Thu, 9 Mar 2023 05:56:40 -0800 Subject: [PATCH 09/31] [gfx110x] support Navi3x architectures. (#628) * enable building on Nav31 * fix syntax * replace GPU_TARGETS with offload-arch * add gfx1102 rachitecture * fix typo * update changelog --- CHANGELOG.md | 3 ++- Jenkinsfile | 4 ++-- example/01_gemm/CMakeLists.txt | 2 +- example/02_gemm_bilinear/CMakeLists.txt | 2 +- .../29_batched_gemm_bias_e_permute/CMakeLists.txt | 2 +- .../30_grouped_conv_fwd_multiple_d/CMakeLists.txt | 2 +- include/ck/ck.hpp | 12 ++---------- ..._batched_contraction_multiple_d_wmma_cshuffle.hpp | 3 ++- .../impl/device_gemm_multiple_d_wmma_cshuffle.hpp | 3 ++- .../gpu/device/impl/device_gemm_wmma.hpp | 3 ++- ...ice_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp | 3 ++- .../grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp | 11 +++++++---- .../tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp | 3 ++- 13 files changed, 27 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79c45a0db3..0188350046 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ Full documentation for Composable Kernel is not yet available. -## CK 0.1.1 for ROCm 5.5.0 +## CK 0.2.0 for ROCm 5.5.0 ### Fixed - Fixed a bug in 6-dimensional kernels (#555). @@ -12,6 +12,7 @@ Full documentation for Composable Kernel is not yet available. - Improve proformance of normalization kernel ### Added +- Added support on NAVI3x. - Added user tutorial (#563). - Added more instances for irregular GEMM sizes (#560). - Added inter-wave consumer-producer programming model for GEMM kernels (#310). diff --git a/Jenkinsfile b/Jenkinsfile index 6bd6aa81b2..bf50033593 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -684,8 +684,8 @@ pipeline { } agent{ label rocmnode("navi21") } environment{ - setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx1030" """ - execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx1030" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ + setup_args = """ -DCMAKE_INSTALL_PREFIX=../install """ + execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DCMAKE_CXX_FLAGS=" --offload-arch=gfx1030 --offload-arch=gfx1100 --offload-arch=gfx1101 --offload-arch=gfx1102" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ } steps{ diff --git a/example/01_gemm/CMakeLists.txt b/example/01_gemm/CMakeLists.txt index 7f8fdf35f4..c5a8295188 100644 --- a/example/01_gemm/CMakeLists.txt +++ b/example/01_gemm/CMakeLists.txt @@ -38,7 +38,7 @@ add_example_executable_no_testing(example_gemm_xdl_fp64 gemm_xdl_fp64.cpp) add_dependencies(example_gemm_xdl example_gemm_xdl_skip_b_lds_fp16) add_dependencies(example_gemm_xdl example_gemm_xdl_fp64) -if(GPU_TARGETS MATCHES "gfx1100") +if(GPU_TARGETS MATCHES "gfx1100" OR GPU_TARGETS MATCHES "gfx1101" OR GPU_TARGETS MATCHES "gfx1102") add_custom_target(example_gemm_wmma) add_example_executable(example_gemm_wmma_fp16 gemm_wmma_fp16.cpp) add_dependencies(example_gemm_wmma example_gemm_wmma_fp16) diff --git a/example/02_gemm_bilinear/CMakeLists.txt b/example/02_gemm_bilinear/CMakeLists.txt index 1343a814ad..16a8211027 100644 --- a/example/02_gemm_bilinear/CMakeLists.txt +++ b/example/02_gemm_bilinear/CMakeLists.txt @@ -1,4 +1,4 @@ add_example_executable(example_gemm_bilinear_xdl_fp16 gemm_bilinear_xdl_fp16.cpp) -if(GPU_TARGETS MATCHES "gfx1100") +if(GPU_TARGETS MATCHES "gfx1100" OR GPU_TARGETS MATCHES "gfx1101" OR GPU_TARGETS MATCHES "gfx1102") add_example_executable(example_gemm_bilinear_wmma_fp16 gemm_bilinear_wmma_fp16.cpp) endif() diff --git a/example/29_batched_gemm_bias_e_permute/CMakeLists.txt b/example/29_batched_gemm_bias_e_permute/CMakeLists.txt index c74294feb0..32a87dd200 100644 --- a/example/29_batched_gemm_bias_e_permute/CMakeLists.txt +++ b/example/29_batched_gemm_bias_e_permute/CMakeLists.txt @@ -1,5 +1,5 @@ add_example_executable(example_batched_gemm_bias_e_permute_xdl_fp16 batched_gemm_bias_e_permute_xdl_fp16.cpp) -if(GPU_TARGETS MATCHES "gfx1100") +if(GPU_TARGETS MATCHES "gfx1100" OR GPU_TARGETS MATCHES "gfx1101" OR GPU_TARGETS MATCHES "gfx1102") add_example_executable(example_batched_gemm_bias_e_permute_wmma_fp16 batched_gemm_bias_e_permute_wmma_fp16.cpp) endif() diff --git a/example/30_grouped_conv_fwd_multiple_d/CMakeLists.txt b/example/30_grouped_conv_fwd_multiple_d/CMakeLists.txt index acf9bcdb46..4b0ea4f157 100644 --- a/example/30_grouped_conv_fwd_multiple_d/CMakeLists.txt +++ b/example/30_grouped_conv_fwd_multiple_d/CMakeLists.txt @@ -16,7 +16,7 @@ if(USE_BITINT_EXTENSION_INT4) add_dependencies(example_grouped_conv_fwd_multiple_d example_grouped_conv_fwd_bias_relu_add_xdl_int4) endif() # USE_BITINT_EXTENSION_INT4 -if(GPU_TARGETS MATCHES "gfx1100") +if(GPU_TARGETS MATCHES "gfx1100" OR GPU_TARGETS MATCHES "gfx1101" OR GPU_TARGETS MATCHES "gfx1102") add_example_executable(example_grouped_conv_fwd_bias_relu_add_wmma_fp16 grouped_conv_fwd_bias_relu_add_wmma_fp16.cpp) endif() diff --git a/include/ck/ck.hpp b/include/ck/ck.hpp index 1257a77649..e7950af7ad 100644 --- a/include/ck/ck.hpp +++ b/include/ck/ck.hpp @@ -27,14 +27,6 @@ #define CK_WAVELET_MIN_BLOCK_PER_CU 2 #endif -// check GPU target -#ifdef __HIP_DEVICE_COMPILE__ -#if !(defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) || defined(__gfx908__) || \ - defined(__gfx90a__) || defined(__gfx1030__) || defined(__gfx1100__)) -#error Not supported target -#endif -#endif - // buffer resource #ifndef __HIP_DEVICE_COMPILE__ // for host code #define CK_BUFFER_RESOURCE_3RD_DWORD -1 @@ -43,7 +35,7 @@ #define CK_BUFFER_RESOURCE_3RD_DWORD 0x00020000 #elif defined(__gfx1030__) // for GPU code #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31014000 -#elif defined(__gfx1100__) // for GPU code +#elif defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) // for GPU code #define CK_BUFFER_RESOURCE_3RD_DWORD 0x10020000 #endif @@ -72,7 +64,7 @@ // WMMA instruction #ifndef __HIP_DEVICE_COMPILE__ // for host code #define CK_USE_AMD_WMMA -#elif defined(__gfx1100__) // for GPU code +#elif defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) // for GPU code #define CK_USE_AMD_WMMA #endif diff --git a/include/ck/tensor_operation/gpu/device/impl/device_batched_contraction_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_batched_contraction_multiple_d_wmma_cshuffle.hpp index b1a78dc99b..493822aeb2 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_batched_contraction_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_batched_contraction_multiple_d_wmma_cshuffle.hpp @@ -770,7 +770,8 @@ struct DeviceBatchedContractionMultipleD_Wmma_CShuffle static bool IsSupportedArgument(const Argument& arg) { - if(ck::get_device_name() == "gfx1100") + if(ck::get_device_name() == "gfx1100" || ck::get_device_name() == "gfx1101" || + ck::get_device_name() == "gfx1102") { if constexpr(!(is_same_v || is_same_v)) { diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp index 1d705a28b0..44ff068355 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp @@ -476,7 +476,8 @@ struct DeviceGemmMultipleD_Wmma_CShuffle : public DeviceGemmMultipleD || is_same_v)) { diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp index e8e67532be..d645c03113 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp @@ -404,7 +404,8 @@ struct DeviceGemmWmma_CShuffle : public DeviceGemm || is_same_v)) { diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp index e245902b6c..dd56b207bf 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp @@ -579,7 +579,8 @@ struct DeviceGroupedConvFwdMultipleD_Wmma_CShuffle namespace ctc = tensor_layout::convolution; // check device - if(get_device_name() == "gfx1100") + if(get_device_name() == "gfx1100" || get_device_name() == "gfx1101" || + ck::get_device_name() == "gfx1102") { if constexpr(!(is_same_v || is_same_v)) { diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp index 2ce4d8feb3..347f3e5f1f 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp @@ -54,7 +54,8 @@ __global__ void const Block2CTileMap block_2_ctile_map, const ComputePtrOffsetOfBatch compute_ptr_offset_of_batch) { -#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx1100__)) +#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx1100__) || defined(__gfx1101__) || \ + defined(__gfx1102__)) // offset base pointer for each work-group const index_t num_blocks_per_batch = __builtin_amdgcn_readfirstlane(get_grid_size() / batch_count); @@ -147,7 +148,8 @@ __global__ void const ComputePtrOffsetOfBatch compute_ptr_offset_of_batch, const Block2CTileMap block_2_etile_map) { -#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx1100__)) +#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx1100__) || defined(__gfx1101__) || \ + defined(__gfx1102__)) // printf("entry kernel launch"); __shared__ char p_shared[GridwiseOp::GetSharedMemoryNumberOfByte()]; @@ -242,7 +244,8 @@ __global__ void const CDEElementwiseOperation cde_element_op, const Block2CTileMap block_2_ctile_map) { -#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx1100__)) +#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx1100__) || defined(__gfx1101__) || \ + defined(__gfx1102__)) __shared__ char p_shared[GridwiseOp::GetSharedMemoryNumberOfByte()]; GridwiseOp::template Run(p_a_grid, @@ -271,7 +274,7 @@ __global__ void ignore = b_element_op; ignore = cde_element_op; ignore = block_2_ctile_map; -#endif // end of if (defined(__gfx1100__)) +#endif // end of if (defined(__gfx1100__ )) } template < // DataType Family diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp index fda0464caa..1fee302c3c 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_wmma.hpp @@ -49,7 +49,8 @@ __global__ void const CElementwiseOperation c_element_op, const Block2CTileMap block_2_ctile_map) { -#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx1100__)) +#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx1100__) || defined(__gfx1101__) || \ + defined(__gfx1102__)) __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()]; GridwiseGemm::template Run(p_a_grid, From 76fcdc60e9ef848d23527079ad59ca6f4040ca59 Mon Sep 17 00:00:00 2001 From: carlushuang Date: Thu, 9 Mar 2023 22:07:24 +0800 Subject: [PATCH 10/31] fix a bug with non-dword-aligned offset when OOB, in case crash (#616) Co-authored-by: zjing14 --- include/ck/utility/amd_buffer_addressing.hpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/ck/utility/amd_buffer_addressing.hpp b/include/ck/utility/amd_buffer_addressing.hpp index 79295356df..bdfb4f2758 100644 --- a/include/ck/utility/amd_buffer_addressing.hpp +++ b/include/ck/utility/amd_buffer_addressing.hpp @@ -1030,7 +1030,7 @@ amd_buffer_load_invalid_element_return_zero(const T* p_src_wave, constexpr index_t vector_size = scalar_type::vector_size; #if CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK - uint32_t src_addr_shift = src_thread_element_valid ? 0 : 0x7fffffff; + uint32_t src_addr_shift = src_thread_element_valid ? 0 : 0x80000000; return amd_buffer_load_impl( src_wave_buffer_resource, src_addr_shift + src_thread_addr_offset, 0); @@ -1091,7 +1091,7 @@ __device__ void amd_buffer_store(const typename vector_type_maker::type::t constexpr index_t vector_size = scalar_type::vector_size; #if CK_EXPERIMENTAL_USE_BUFFER_STORE_OOB_CHECK_OFFSET_TRICK - uint32_t dst_addr_shift = dst_thread_element_valid ? 0 : 0x7fffffff; + uint32_t dst_addr_shift = dst_thread_element_valid ? 0 : 0x80000000; amd_buffer_store_impl( src_thread_data, dst_wave_buffer_resource, dst_addr_shift + dst_thread_addr_offset, 0); @@ -1126,7 +1126,7 @@ amd_buffer_atomic_add(const typename vector_type_maker::type::type src_thr constexpr index_t vector_size = scalar_type::vector_size; #if CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_ADD_OOB_CHECK_OFFSET_TRICK - uint32_t dst_addr_shift = dst_thread_element_valid ? 0 : 0x7fffffff; + uint32_t dst_addr_shift = dst_thread_element_valid ? 0 : 0x80000000; amd_buffer_atomic_add_impl( src_thread_data, dst_wave_buffer_resource, dst_addr_shift + dst_thread_addr_offset, 0); @@ -1161,7 +1161,7 @@ amd_buffer_atomic_max(const typename vector_type_maker::type::type src_thr constexpr index_t vector_size = scalar_type::vector_size; #if CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_MAX_OOB_CHECK_OFFSET_TRICK - uint32_t dst_addr_shift = dst_thread_element_valid ? 0 : 0x7fffffff; + uint32_t dst_addr_shift = dst_thread_element_valid ? 0 : 0x80000000; amd_buffer_atomic_max_impl( src_thread_data, dst_wave_buffer_resource, dst_addr_shift + dst_thread_addr_offset, 0); From 087e3105892dec2fa10d2ef6c3419a4f9fe0b84f Mon Sep 17 00:00:00 2001 From: Haocong WANG Date: Sat, 11 Mar 2023 07:04:28 +0800 Subject: [PATCH 11/31] [Navi3x] Multiple issue fix (#612) * Change gridwise gemm mD blockwise gemm to naive * RRR Gemm fix * Fix RCR gemm bug * Isolate wmma instructions * Update amd_inline_asm.hpp * Update amd_wmma.hpp * Update amd_wmma.hpp * fix syntax and update Jenkinsfile --------- Co-authored-by: zjing14 Co-authored-by: Illia Silin <98187287+illsilin@users.noreply.github.com> Co-authored-by: illsilin --- Jenkinsfile | 2 +- .../device_gemm_multiple_d_wmma_cshuffle.hpp | 12 ++-- .../gpu/device/impl/device_gemm_wmma.hpp | 12 ++-- ...gridwise_gemm_multiple_d_wmma_cshuffle.hpp | 3 +- include/ck/utility/amd_inline_asm.hpp | 18 +++-- include/ck/utility/amd_wmma.hpp | 69 +++++++++++++++++-- 6 files changed, 90 insertions(+), 26 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index bf50033593..bb0b352d75 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -685,7 +685,7 @@ pipeline { agent{ label rocmnode("navi21") } environment{ setup_args = """ -DCMAKE_INSTALL_PREFIX=../install """ - execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DCMAKE_CXX_FLAGS=" --offload-arch=gfx1030 --offload-arch=gfx1100 --offload-arch=gfx1101 --offload-arch=gfx1102" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ + execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && cmake -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" -DGPU_TARGETS="gfx1030;gfx1100;gfx1101;gfx1102" -D CMAKE_CXX_COMPILER="${build_compiler()}" .. && make -j """ } steps{ diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp index 44ff068355..b59bb2b303 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_wmma_cshuffle.hpp @@ -121,16 +121,16 @@ struct DeviceGemmMultipleD_Wmma_CShuffle : public DeviceGemmMultipleD) - { - return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), - make_tuple(StrideB, I1)); - } - else if constexpr(is_same_v) + if constexpr(is_same_v) { return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), make_tuple(I1, StrideB)); } + else if constexpr(is_same_v) + { + return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), + make_tuple(StrideB, I1)); + } }(); const auto b_grid_desc_n_k = matrix_padder.PadBDescriptor_N_K(b_grid_desc_nraw_kraw); diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp index d645c03113..9b0ff3f46f 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp @@ -114,16 +114,16 @@ struct DeviceGemmWmma_CShuffle : public DeviceGemm) - { - return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), - make_tuple(StrideB, I1)); - } - else if constexpr(is_same_v) + if constexpr(is_same_v) { return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), make_tuple(I1, StrideB)); } + else if constexpr(is_same_v) + { + return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw), + make_tuple(StrideB, I1)); + } }(); const auto b_grid_desc_n_k = matrix_padder.PadBDescriptor_N_K(b_grid_desc_nraw_kraw); diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp index 347f3e5f1f..1e8f8ff9fe 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp @@ -676,7 +676,7 @@ struct GridwiseGemmMultipleD_k0mk1_k0nk1_mn_wmma_cshuffle constexpr auto KPack = math::integer_least_multiple(K1, WmmaK); auto blockwise_gemm = - BlockwiseGemmWMMA_k0mk1_k0nk1_m0m1m2n0n1n2m3_CShuffle_FIFO(a), bit_cast(b0), c0, false); - c1 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b1), c1, false); + c0 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b0), c0, false); + c1 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b1), c1, false); #endif } @@ -257,10 +257,10 @@ __device__ void amd_assembly_outer_product_1x4(int8x4_t a, "2"(c2), "3"(c3)); #else - c0 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b0), c0, false); - c1 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b1), c1, false); - c2 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b2), c2, false); - c3 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b3), c3, false); + c0 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b0), c0, false); + c1 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b1), c1, false); + c2 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b2), c2, false); + c3 = __builtin_amdgcn_sdot4(bit_cast(a), bit_cast(b3), c3, false); #endif } @@ -358,7 +358,13 @@ __device__ void amd_assembly_outer_product_1x4(int8x16_t a, // Ranged input operand __device__ void amd_assembly_wmma_f32_16x16x16_f16_w32(half16_t a, half16_t b, float8_t& c) { +#if defined(__gfx11__) asm volatile("v_wmma_f32_16x16x16_f16 %0, %1, %2, %0" : "=v"(c) : "v"(a), "v"(b), "0"(c)); +#else + ignore = a; + ignore = b; + ignore = c; +#endif } } // namespace ck diff --git a/include/ck/utility/amd_wmma.hpp b/include/ck/utility/amd_wmma.hpp index a0e79220e0..7598fe667b 100644 --- a/include/ck/utility/amd_wmma.hpp +++ b/include/ck/utility/amd_wmma.hpp @@ -23,11 +23,16 @@ struct intrin_wmma_f32_16x16x16_f16_w32<16, 16> { // * Inline assembly need to elimate the duplicated data load, compiler won't help you // delete them. - amd_assembly_wmma_f32_16x16x16_f16_w32( - reg_a, reg_b, reg_c.template AsType()(Number<0>{})); - // reg_c.template AsType()(Number<0>{}) = - // __builtin_amdgcn_wmma_f32_16x16x16_f16_w32( reg_a, reg_b, reg_c.template - // AsType()[Number<0>{}]); + // amd_assembly_wmma_f32_16x16x16_f16_w32( + // reg_a, reg_b, reg_c.template AsType()(Number<0>{})); +#if defined(__gfx11__) + reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f32_16x16x16_f16_w32( + reg_a, reg_b, reg_c.template AsType()[Number<0>{}]); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -41,9 +46,15 @@ struct intrin_wmma_f32_16x16x16_bf16_w32<16, 16> template __device__ static void Run(const bhalf16_t& reg_a, const bhalf16_t& reg_b, FloatC& reg_c) { +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w32( reg_a, reg_b, reg_c.template AsType()[Number<0>{}]); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -60,8 +71,14 @@ struct intrin_wmma_f16_16x16x16_f16_w32<16, 16, Opsel> // opsel usage // false: D0.[0:15] = result // true : D0.[16:31]= result +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f16_16x16x16_f16_w32( reg_a, reg_b, reg_c.template AsType()[Number<0>{}], Opsel); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -78,9 +95,15 @@ struct intrin_wmma_bf16_16x16x16_bf16_w32<16, 16, Opsel> // opsel usage // false: D0.[0:15] = result // true : D0.[16:31]= result +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32( reg_a, reg_b, reg_c.template AsType()[Number<0>{}], Opsel); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -94,6 +117,7 @@ struct intrin_wmma_i32_16x16x16_iu8_w32<16, 16, neg_a, neg_b, clamp> template __device__ static void Run(const int8x16_t& reg_a, const int8x16_t& reg_b, FloatC& reg_c) { +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32( neg_a, @@ -102,6 +126,11 @@ struct intrin_wmma_i32_16x16x16_iu8_w32<16, 16, neg_a, neg_b, clamp> bit_cast(reg_b), reg_c.template AsType()[Number<0>{}], clamp); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -116,8 +145,14 @@ struct intrin_wmma_f32_16x16x16_f16_w64<16, 16> template __device__ static void Run(const half16_t& reg_a, const half16_t& reg_b, FloatC& reg_c) { +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f32_16x16x16_f16_w64( reg_a, reg_b, reg_c.template AsType()[Number<0>{}]); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -131,9 +166,15 @@ struct intrin_wmma_f32_16x16x16_bf16_w64<16, 16> template __device__ static void Run(const bhalf16_t& reg_a, const bhalf16_t& reg_b, FloatC& reg_c) { +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w64( reg_a, reg_b, reg_c.template AsType()[Number<0>{}]); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -150,8 +191,14 @@ struct intrin_wmma_f16_16x16x16_f16_w64<16, 16, Opsel> // opsel usage // false: D0.[0:15] = result // true : D0.[16:31]= result +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f16_16x16x16_f16_w64( reg_a, reg_b, reg_c.template AsType()[Number<0>{}], Opsel); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -168,9 +215,15 @@ struct intrin_wmma_bf16_16x16x16_bf16_w64<16, 16, Opsel> // opsel usage // false: D0.[0:15] = result // true : D0.[16:31]= result +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64( reg_a, reg_b, reg_c.template AsType()[Number<0>{}], Opsel); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; @@ -184,6 +237,7 @@ struct intrin_wmma_i32_16x16x16_iu8_w64<16, 16, neg_a, neg_b, clamp> template __device__ static void Run(const int8x16_t& reg_a, const int8x16_t& reg_b, FloatC& reg_c) { +#if defined(__gfx11__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w64( neg_a, @@ -192,6 +246,11 @@ struct intrin_wmma_i32_16x16x16_iu8_w64<16, 16, neg_a, neg_b, clamp> bit_cast(reg_b), reg_c.template AsType()[Number<0>{}], clamp); +#else + ignore = reg_a; + ignore = reg_b; + ignore = reg_c; +#endif } }; From 5b57ab96a8208eec1969a3dcadb555a6246ddb95 Mon Sep 17 00:00:00 2001 From: Rostyslav Geyyer <46627076+geyyer@users.noreply.github.com> Date: Fri, 10 Mar 2023 17:34:44 -0600 Subject: [PATCH 12/31] Remove debug asserts (#629) Co-authored-by: Rosty Geyyer --- .../ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp index ec6792cfaf..6bc83cae42 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp @@ -77,8 +77,6 @@ struct DeviceGemmXdl : public DeviceGemm Date: Wed, 15 Mar 2023 22:44:13 +0800 Subject: [PATCH 13/31] Fix arch limitation bug (#639) --- include/ck/utility/amd_wmma.hpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/include/ck/utility/amd_wmma.hpp b/include/ck/utility/amd_wmma.hpp index 7598fe667b..bf09142548 100644 --- a/include/ck/utility/amd_wmma.hpp +++ b/include/ck/utility/amd_wmma.hpp @@ -25,7 +25,7 @@ struct intrin_wmma_f32_16x16x16_f16_w32<16, 16> // delete them. // amd_assembly_wmma_f32_16x16x16_f16_w32( // reg_a, reg_b, reg_c.template AsType()(Number<0>{})); -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f32_16x16x16_f16_w32( reg_a, reg_b, reg_c.template AsType()[Number<0>{}]); #else @@ -46,7 +46,7 @@ struct intrin_wmma_f32_16x16x16_bf16_w32<16, 16> template __device__ static void Run(const bhalf16_t& reg_a, const bhalf16_t& reg_b, FloatC& reg_c) { -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w32( reg_a, reg_b, reg_c.template AsType()[Number<0>{}]); @@ -71,7 +71,7 @@ struct intrin_wmma_f16_16x16x16_f16_w32<16, 16, Opsel> // opsel usage // false: D0.[0:15] = result // true : D0.[16:31]= result -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f16_16x16x16_f16_w32( reg_a, reg_b, reg_c.template AsType()[Number<0>{}], Opsel); #else @@ -95,7 +95,7 @@ struct intrin_wmma_bf16_16x16x16_bf16_w32<16, 16, Opsel> // opsel usage // false: D0.[0:15] = result // true : D0.[16:31]= result -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32( reg_a, reg_b, reg_c.template AsType()[Number<0>{}], Opsel); @@ -117,7 +117,7 @@ struct intrin_wmma_i32_16x16x16_iu8_w32<16, 16, neg_a, neg_b, clamp> template __device__ static void Run(const int8x16_t& reg_a, const int8x16_t& reg_b, FloatC& reg_c) { -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32( neg_a, @@ -145,7 +145,7 @@ struct intrin_wmma_f32_16x16x16_f16_w64<16, 16> template __device__ static void Run(const half16_t& reg_a, const half16_t& reg_b, FloatC& reg_c) { -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f32_16x16x16_f16_w64( reg_a, reg_b, reg_c.template AsType()[Number<0>{}]); #else @@ -166,7 +166,7 @@ struct intrin_wmma_f32_16x16x16_bf16_w64<16, 16> template __device__ static void Run(const bhalf16_t& reg_a, const bhalf16_t& reg_b, FloatC& reg_c) { -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w64( reg_a, reg_b, reg_c.template AsType()[Number<0>{}]); @@ -191,7 +191,7 @@ struct intrin_wmma_f16_16x16x16_f16_w64<16, 16, Opsel> // opsel usage // false: D0.[0:15] = result // true : D0.[16:31]= result -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_f16_16x16x16_f16_w64( reg_a, reg_b, reg_c.template AsType()[Number<0>{}], Opsel); #else @@ -215,7 +215,7 @@ struct intrin_wmma_bf16_16x16x16_bf16_w64<16, 16, Opsel> // opsel usage // false: D0.[0:15] = result // true : D0.[16:31]= result -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64( reg_a, reg_b, reg_c.template AsType()[Number<0>{}], Opsel); @@ -237,7 +237,7 @@ struct intrin_wmma_i32_16x16x16_iu8_w64<16, 16, neg_a, neg_b, clamp> template __device__ static void Run(const int8x16_t& reg_a, const int8x16_t& reg_b, FloatC& reg_c) { -#if defined(__gfx11__) +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) reg_c.template AsType()(Number<0>{}) = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w64( neg_a, From 14b3504d95082ffd466ae43a05951053f36718a8 Mon Sep 17 00:00:00 2001 From: Illia Silin <98187287+illsilin@users.noreply.github.com> Date: Wed, 15 Mar 2023 08:44:42 -0700 Subject: [PATCH 14/31] Update GetTypeString function to generate unique kernel IDs. (#638) * make conv_fwd_bias_activation kernel id unique * add more parameters to conv and gemm kernel names * update GetTypeString for conv and gemm kernels * fix two more kernel strings --- ...ouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp | 3 ++- .../device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp | 14 +++++++++++++- ...uped_gemm_softmax_gemm_permute_xdl_cshuffle.hpp | 11 ++++++++++- ...ackward_weight_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp | 14 +++++++++++++- .../device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk.hpp | 9 ++++++++- ..._shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp | 14 +++++++++++++- ...dl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp | 14 +++++++++++++- ...ice_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp | 12 +++++++++++- .../impl/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp | 11 ++++++++++- .../device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp | 14 +++++++++++++- .../impl/device_convnd_bwd_data_nwc_kxc_nwk_dl.hpp | 3 ++- .../device_convnd_bwd_data_nwc_kxc_nwk_xdl.hpp | 9 ++++++++- .../device_gemm_bias_add_reduce_xdl_cshuffle.hpp | 10 +++++++++- .../device/impl/device_gemm_bias_e_permute_xdl.hpp | 14 +++++++++++++- ...ice_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp | 10 +++++++++- .../impl/device_gemm_multiple_d_xdl_cshuffle.hpp | 8 ++++++++ .../impl/device_gemm_reduce_xdl_cshuffle.hpp | 10 +++++++++- .../gpu/device/impl/device_gemm_xdl.hpp | 6 +++++- .../gpu/device/impl/device_gemm_xdl_cshuffle.hpp | 10 +++++++++- .../impl/device_gemm_xdl_layernorm_cshuffle.hpp | 10 +++++++++- .../impl/device_gemm_xdl_splitk_c_shuffle.hpp | 11 ++++++++++- ...ed_conv_bwd_data_multiple_d_xdl_cshuffle_v1.hpp | 10 +++++++++- ...e_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_dl.hpp | 3 ++- ...conv_bwd_weight_gnwc_gkxc_gnwk_xdl_cshuffle.hpp | 12 +++++++++++- ...conv_fwd_multiple_d_multiple_r_xdl_cshuffle.hpp | 10 +++++++++- ...e_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp | 14 +++++++++++++- ...ce_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp | 10 +++++++++- .../gpu/device/impl/device_grouped_gemm_xdl.hpp | 4 ++++ 28 files changed, 254 insertions(+), 26 deletions(-) diff --git a/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp b/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp index 079135e5b2..745d248c7c 100644 --- a/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp @@ -946,7 +946,8 @@ struct DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK << MPerBlock << ", " << NPerBlock << ", " << K0PerBlock << ", " - << getConvForwardSpecializationString(ConvForwardSpecialization) + << getConvForwardSpecializationString(ConvForwardSpecialization) << ", " + << K1 << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp b/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp index 47c821171b..c77772faa4 100644 --- a/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp @@ -824,7 +824,19 @@ struct DeviceGroupedConvFwdDl_NHWC_KYXC_NHWK : public DeviceGroupedConvFwd"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/device_grouped_gemm_softmax_gemm_permute_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/device_grouped_gemm_softmax_gemm_permute_xdl_cshuffle.hpp index 946a757cee..70f3d0277d 100644 --- a/include/ck/tensor_operation/gpu/device/device_grouped_gemm_softmax_gemm_permute_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/device_grouped_gemm_softmax_gemm_permute_xdl_cshuffle.hpp @@ -864,7 +864,16 @@ struct DeviceGroupedGemmSoftmaxGemmPermute_Xdl_CShuffle << "B0Spec" << getTensorSpecializationString(BSpec) << ", " << "B1Spec" << getTensorSpecializationString(B1Spec) << ", " << "CSpec" << getTensorSpecializationString(CSpec) << ", " - << getMaskingSpecializationString(MaskingSpec) << ">"; + << getMaskingSpecializationString(MaskingSpec) << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << BBlockTransferSrcScalarPerVector << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle + << ">"; // clang-format on return str.str(); diff --git a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_backward_weight_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_backward_weight_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp index 9f9967c96c..b65afce8df 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_backward_weight_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_backward_weight_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp @@ -777,7 +777,19 @@ struct DeviceConv2dBwdWeightXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_ << BlockSize << ", " << MPerBlock << ", " << NPerBlock << ", " - << K0PerBlock + << K0PerBlock << ", " + << K1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ", " + << CBlockTransferScalarPerVector_NWaveNPerXdl << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk.hpp b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk.hpp index 806b0c5925..ea3020663a 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk.hpp @@ -822,7 +822,14 @@ struct DeviceConv2dBwdDataXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K << BlockSize << ", " << MPerBlock << ", " << NPerBlock << ", " - << K0PerBlock + << K0PerBlock << ", " + << K1 << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp index ff49d3b82e..e7e2bf3354 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp @@ -956,7 +956,19 @@ struct << BlockSize << ", " << MPerBlock << ", " << NPerBlock << ", " - << K0PerBlock + << K0PerBlock << ", " + << K1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ", " + << CBlockTransferScalarPerVector_NWaveNPerXdl << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp index 4934599ee4..6c4957b9b1 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp @@ -913,7 +913,19 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Bias_Activation_Input_N_Hi_Wi_C_Weight_K_Y_X << BlockSize << ", " << MPerBlock << ", " << NPerBlock << ", " - << K0PerBlock + << K0PerBlock << ", " + << K1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ", " + << CBlockTransferScalarPerVector_NWaveNPerXdl << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp index 731dfc5ea1..027f1a1954 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp @@ -880,7 +880,17 @@ struct DeviceConv2dFwdXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_W << MPerBlock << ", " << NPerBlock << ", " << K0PerBlock << ", " - << getConvForwardSpecializationString(ConvForwardSpecialization) + << getConvForwardSpecializationString(ConvForwardSpecialization) << ", " + << K1 << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ", " + << CBlockTransferScalarPerVector_NWaveNPerXdl << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp index 5848006931..6278220c2f 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_conv2d_fwd_xdl_nhwc_kyxc_nhwk.hpp @@ -720,7 +720,16 @@ struct DeviceConv2dFwdXdl_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K << MPerBlock << ", " << NPerBlock << ", " << K0PerBlock << ", " - << getConvForwardSpecializationString(ConvForwardSpecialization) + << getConvForwardSpecializationString(ConvForwardSpecialization) << ", " + << K1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp b/include/ck/tensor_operation/gpu/device/impl/device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp index fe2e6c3089..31c761e09f 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_conv3d_fwd_xdl_ndhwc_kzyxc_ndhwk.hpp @@ -630,7 +630,19 @@ struct DeviceConv3dFwdXdl_Input_N_Di_Hi_Wi_C_Weight_K_Z_Y_X_C_Output_N_Do_Ho_Wo_ << BlockSize << ", " << MPerBlock << ", " << NPerBlock << ", " - << K0PerBlock + << K0PerBlock << ", " + << K1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ", " + << CBlockTransferScalarPerVector_NWaveNPerXdl << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_dl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_dl.hpp index 5bef0e2a3e..2a2edc29ba 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_dl.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_dl.hpp @@ -1567,7 +1567,8 @@ struct DeviceConvNdBwdDataNwcKxcNwk_Dl << BlockSize << ", " << MPerBlock << ", " << NPerBlock << ", " - << K0PerBlock + << K0PerBlock << ", " + << K1 << ">"; if constexpr(ConvBackwardDataSpecialization == ConvolutionBackwardDataSpecialization::Filter1x1Stride1Pad0){ diff --git a/include/ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_xdl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_xdl.hpp index 7951713938..1fd4b76cec 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_xdl.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_xdl.hpp @@ -1552,7 +1552,14 @@ struct DeviceConvNdBwdDataNwcKxcNwk_Xdl << BlockSize << ", " << MPerBlock << ", " << NPerBlock << ", " - << K0PerBlock + << K0PerBlock << ", " + << K1 << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ">"; if constexpr(ConvBackwardDataSpecialization == ConvolutionBackwardDataSpecialization::Filter1x1Stride1Pad0){ diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_bias_add_reduce_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_bias_add_reduce_xdl_cshuffle.hpp index b9a64e8c4b..a9a58c8ac4 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_bias_add_reduce_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_bias_add_reduce_xdl_cshuffle.hpp @@ -862,7 +862,15 @@ struct DeviceGemmBiasAddReduce_Xdl_CShuffle : public DeviceGemmReduce<1, ReduceO << NPerBlock << ", " << KPerBlock << ", " << AK1 << ", " - << BK1 + << BK1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << BBlockTransferSrcScalarPerVector << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_bias_e_permute_xdl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_bias_e_permute_xdl.hpp index 1914068828..e95bf2f81d 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_bias_e_permute_xdl.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_bias_e_permute_xdl.hpp @@ -559,7 +559,19 @@ struct DeviceGemmBiasEPermute_Xdl : public DeviceGemmBiasCPermute"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp index f1fb4ab4b1..f1185357a4 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_multiple_r_xdl_cshuffle.hpp @@ -669,7 +669,15 @@ struct DeviceGemmMultipleDMultipleR_Xdl_CShuffle << KPerBlock << ", " << AK1 << ", " << BK1 << ", " - << getGemmSpecializationString(GemmSpec) + << getGemmSpecializationString(GemmSpec) << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << BBlockTransferSrcScalarPerVector << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle.hpp index 9bf5da09da..36e8105122 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle.hpp @@ -680,6 +680,14 @@ struct DeviceGemmMultipleD_Xdl_CShuffle : public DeviceGemmMultipleD" << " LoopScheduler: " diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_reduce_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_reduce_xdl_cshuffle.hpp index 3f62601f96..f358bd176f 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_reduce_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_reduce_xdl_cshuffle.hpp @@ -822,7 +822,15 @@ struct DeviceGemmReduce_Xdl_CShuffle : public DeviceGemmReduce<0, ReduceOperatio << NPerBlock << ", " << KPerBlock << ", " << AK1 << ", " - << BK1 + << BK1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << BBlockTransferSrcScalarPerVector << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp index 6bc83cae42..48fb646375 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl.hpp @@ -547,7 +547,11 @@ struct DeviceGemmXdl : public DeviceGemm" << " NumPrefetch: " << NumPrefetch << ", " diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp index 85e730f40c..6621f25bde 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp @@ -682,7 +682,15 @@ struct DeviceGemm_Xdl_CShuffle : public DeviceGemm" << " LoopScheduler: " << LoopSchedToString[LoopSched] << ", " diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_layernorm_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_layernorm_cshuffle.hpp index ed3e6900bd..2a3e74a4cb 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_layernorm_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_layernorm_cshuffle.hpp @@ -760,7 +760,15 @@ struct DeviceGemmLayerNorm_Xdl_CShuffle : public BaseOperator << NPerBlock << ", " << KPerBlock << ", " << AK1 << ", " - << BK1 + << BK1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << BBlockTransferSrcScalarPerVector << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp index c85b805f59..0d2aeaeb7e 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp @@ -640,7 +640,16 @@ struct DeviceGemmXdlSplitKCShuffle : public DeviceGemmSplitK"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_data_multiple_d_xdl_cshuffle_v1.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_data_multiple_d_xdl_cshuffle_v1.hpp index 682aba0860..4cef404447 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_data_multiple_d_xdl_cshuffle_v1.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_data_multiple_d_xdl_cshuffle_v1.hpp @@ -1003,7 +1003,15 @@ struct DeviceGroupedConvBwdDataMultipleD_Xdl_CShuffle_v1 << KPerBlock << ", " << AK1 << ", " << BK1 << ", " - << getConvBackwardDataSpecializationString(ConvBackwardDataSpecialization) + << getConvBackwardDataSpecializationString(ConvBackwardDataSpecialization) << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << BBlockTransferSrcScalarPerVector << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ">"; return str.str(); diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_dl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_dl.hpp index b0681b724e..9529744f31 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_dl.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_dl.hpp @@ -1203,7 +1203,8 @@ struct DeviceGroupedConvBwdWeightGnwcGkxcGnwk_Dl << MPerBlock << ", " << NPerBlock << ", " << K0PerBlock << ", " - << getConvBackwardWeightSpecializationString(ConvBackwardWeightSpecialization) + << getConvBackwardWeightSpecializationString(ConvBackwardWeightSpecialization) << ", " + << K1 << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_xdl_cshuffle.hpp index d9e7b54cc8..34db9f2a52 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_gnwc_gkxc_gnwk_xdl_cshuffle.hpp @@ -1231,7 +1231,17 @@ struct DeviceGroupedConvBwdWeightGnwcGkxcGnwk_Xdl_CShuffle << MPerBlock << ", " << NPerBlock << ", " << K0PerBlock << ", " - << getConvBackwardWeightSpecializationString(ConvBackwardWeightSpecialization) + << getConvBackwardWeightSpecializationString(ConvBackwardWeightSpecialization) << ", " + << K1 << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ", " + << CBlockTransferScalarPerVector_NWaveNPerXdl << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_multiple_r_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_multiple_r_xdl_cshuffle.hpp index 8b54ee493c..b60f94d141 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_multiple_r_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_multiple_r_xdl_cshuffle.hpp @@ -1092,7 +1092,15 @@ struct DeviceGroupedConvFwdMultipleDMultipleR_Xdl_CShuffle << MPerBlock << ", " << NPerBlock << ", " << KPerBlock << ", " - << getConvForwardSpecializationString(ConvForwardSpecialization) + << getConvForwardSpecializationString(ConvForwardSpecialization) << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << BBlockTransferSrcScalarPerVector << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp index dd56b207bf..a1ee1902b5 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp @@ -838,7 +838,19 @@ struct DeviceGroupedConvFwdMultipleD_Wmma_CShuffle << MPerBlock << ", " << NPerBlock << ", " << KPerBlock << ", " - << getConvForwardSpecializationString(ConvForwardSpecialization) + << getConvForwardSpecializationString(ConvForwardSpecialization) << ", " + << K1 << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << ABlockTransferDstScalarPerVector_K1 << ", " + << BBlockTransferSrcScalarPerVector << ", " + << BBlockTransferDstScalarPerVector_K1 << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ", " + << CBlockTransferScalarPerVector_NWaveNPerXdl << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp index bb7a2f8c0a..8de81285d9 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp @@ -939,7 +939,15 @@ struct DeviceGroupedConvFwdMultipleD_Xdl_CShuffle << MPerBlock << ", " << NPerBlock << ", " << KPerBlock << ", " - << getConvForwardSpecializationString(ConvForwardSpecialization) + << getConvForwardSpecializationString(ConvForwardSpecialization) << ", " + << MPerXDL << ", " + << NPerXDL << ", " + << MXdlPerWave << ", " + << NXdlPerWave << ", " + << ABlockTransferSrcScalarPerVector << ", " + << BBlockTransferSrcScalarPerVector << ", " + << CShuffleMXdlPerWavePerShuffle << ", " + << CShuffleNXdlPerWavePerShuffle << ">"; // clang-format on diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp index 88750778cf..a009bcb3bf 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_xdl.hpp @@ -688,6 +688,10 @@ struct DeviceGroupedGemm_Xdl : public DeviceGroupedGemm"; // clang-format on From c10a6e8293ade863a9b177de956d31eb86f4b128 Mon Sep 17 00:00:00 2001 From: Rostyslav Geyyer <46627076+geyyer@users.noreply.github.com> Date: Wed, 15 Mar 2023 11:12:12 -0500 Subject: [PATCH 15/31] Add layout check to IsSupportedArgument (#627) * Add layout check to IsSupportedArgument * Format --------- Co-authored-by: Rosty Geyyer Co-authored-by: zjing14 --- ...ultiple_d_gemm_multiple_d_xdl_cshuffle.hpp | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_gemm_multiple_d_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_gemm_multiple_d_xdl_cshuffle.hpp index 8142c9253b..4fc8e69d2c 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_gemm_multiple_d_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_gemm_multiple_d_xdl_cshuffle.hpp @@ -788,6 +788,20 @@ struct DeviceBatchedGemmMultipleDGemmMultipleD_Xdl_CShuffle return true; } + // check if DsLayout is supported + template + static bool CheckDLayout() + { + static bool valid = true; + // iterate over DLayout tuple + static_for<0, NumDTensor, 1>{}([&](auto i) { + using DLayout = remove_cvref_t>; + // if RefLayout and DLayout are same, keep valid true, otherwise false + valid = valid && is_same_v; + }); + return valid; + } + static bool IsSupportedArgument(const Argument& arg) { if(!(ck::get_device_name() == "gfx908" || ck::get_device_name() == "gfx90a")) @@ -795,6 +809,26 @@ struct DeviceBatchedGemmMultipleDGemmMultipleD_Xdl_CShuffle return false; } + // Check supported layouts + // A0 - Row + // B0 - Col + // D0s - Rows + // B1 - Row or Col + // D1s - Rows + // E1 - Row + if(!(is_same_v && + is_same_v && + CheckDLayout() && + (is_same_v || + is_same_v)&&CheckDLayout() && + is_same_v)) + { + return false; + } + return GridwiseGemm::CheckValidity(arg.a0_grid_desc_m_k_, arg.b0_grid_desc_n_k_, arg.b1_grid_desc_n_k_, From a2d5ca8e951f9d1a1837cfd805b555cc41c4d8b1 Mon Sep 17 00:00:00 2001 From: Adam Osewski <19374865+aosewski@users.noreply.github.com> Date: Wed, 15 Mar 2023 17:22:59 +0100 Subject: [PATCH 16/31] Device Op GroupedGemmMultipleD + example fp16 (#633) * Pass shared mem pointer as pointer to void. * Device Op GroupedGEMM Multiple D * Example for grouped gemm multiple d. * Add MI200 to supported archs. --------- Co-authored-by: Adam Osewski Co-authored-by: zjing14 --- example/15_grouped_gemm/CMakeLists.txt | 5 +- .../grouped_gemm_multiple_d_dl_fp16.cpp | 67 ++ .../device_grouped_gemm_multiple_d_dl.hpp | 763 ++++++++++++++++++ .../gpu/grid/gridwise_gemm_dl_multiple_d.hpp | 11 +- 4 files changed, 840 insertions(+), 6 deletions(-) create mode 100644 example/15_grouped_gemm/grouped_gemm_multiple_d_dl_fp16.cpp create mode 100644 include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp diff --git a/example/15_grouped_gemm/CMakeLists.txt b/example/15_grouped_gemm/CMakeLists.txt index 67f6160873..e3080c4a9a 100644 --- a/example/15_grouped_gemm/CMakeLists.txt +++ b/example/15_grouped_gemm/CMakeLists.txt @@ -4,12 +4,15 @@ add_example_executable(example_grouped_gemm_xdl_fp32 grouped_gemm_xdl_fp32.cpp) add_example_executable(example_grouped_gemm_xdl_fp16 grouped_gemm_xdl_fp16.cpp) add_example_executable(example_grouped_gemm_xdl_bfp16 grouped_gemm_xdl_bfp16.cpp) add_example_executable(example_grouped_gemm_xdl_int8 grouped_gemm_xdl_int8.cpp) +add_example_executable(example_grouped_gemm_multiple_d_dl_fp16 grouped_gemm_multiple_d_dl_fp16.cpp) + add_dependencies(example_grouped_gemm_xdl example_grouped_gemm_xdl_fp32 example_grouped_gemm_xdl_fp16 example_grouped_gemm_xdl_bfp16 - example_grouped_gemm_xdl_int8) + example_grouped_gemm_xdl_int8 + example_grouped_gemm_multiple_d_dl_fp16) if(USE_BITINT_EXTENSION_INT4) add_example_executable(example_grouped_gemm_xdl_int4 grouped_gemm_xdl_int4.cpp) diff --git a/example/15_grouped_gemm/grouped_gemm_multiple_d_dl_fp16.cpp b/example/15_grouped_gemm/grouped_gemm_multiple_d_dl_fp16.cpp new file mode 100644 index 0000000000..a5c51ceb0c --- /dev/null +++ b/example/15_grouped_gemm/grouped_gemm_multiple_d_dl_fp16.cpp @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/utility/check_err.hpp" +#include "ck/library/utility/device_memory.hpp" +#include "ck/library/utility/host_tensor.hpp" +#include "ck/library/utility/host_tensor_generator.hpp" +#include "ck/library/utility/literals.hpp" +#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp" +#include "ck/utility/data_type.hpp" +#include "ck/utility/tuple.hpp" +#include "ck/utility/sequence.hpp" + +template +using S = ck::Sequence; + +using F16 = ck::half_t; +using F32 = float; + +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; + +using ADataType = F16; +using BDataType = F16; +using AccDataType = F32; +using DsDataType = ck::Tuple<>; +using EDataType = F16; + +using ALayout = Row; +using BLayout = Row; +using DsLayout = ck::Tuple<>; +using ELayout = Row; + +using AElementOp = PassThrough; +using BElementOp = PassThrough; +using CDEElementOp = PassThrough; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNPadding; + +// clang-format off +using DeviceGemmInstance = ck::tensor_operation::device:: + // ##################| ALayout| BLayout| DsLayout| ELayout| AData| BData| AccData| DsData| EData| A| B| CDE| GEMM| Block| MPer| NPer| K0Per| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer| + // ##################| | | | | Type| Type| Type| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Size| Block| Block| Block| | ThreadM111| ThreadN111| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector| + // ##################| | | | | | | | | | Operation| Operation| Operation| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | | + // ##################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedGemmMultipleD_Dl< ALayout, BLayout, DsLayout, ELayout, ADataType, BDataType, AccDataType, DsDataType, EDataType, AElementOp, BElementOp, CDEElementOp, GemmSpec, 256, 128, 128, 16, 2, 4, 4, 1, S<8, 2>, S<8, 2>, S<8, 1, 1, 2>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 2>, S<1, 2, 0, 3>, S<1, 1, 1, 2>, S<2, 1, 4, 2>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 2>, S<0, 1, 2, 3, 4, 5>, 5, 4>; +// clang-format on + +#include "run_grouped_gemm_example.inc" + +int main(int argc, char* argv[]) { return !run_grouped_gemm_example(argc, argv); } diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp new file mode 100644 index 0000000000..d424f29920 --- /dev/null +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp @@ -0,0 +1,763 @@ +#pragma once +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include +#include + +#include "ck/utility/common_header.hpp" +#include "ck/tensor_description/tensor_descriptor.hpp" +#include "ck/tensor_description/tensor_descriptor_helper.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_gemm.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/matrix_padder.hpp" +#include "ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp" +#include "ck/host_utility/device_prop.hpp" +#include "ck/host_utility/kernel_launch.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { + +template +__global__ void +#if CK_USE_LAUNCH_BOUNDS + __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) +#endif + kernel_grouped_gemm_multiple_d_dl(const void CK_CONSTANT_ADDRESS_SPACE* gemm_descs_const, + const index_t group_count, + const AElementwiseOperation a_element_op, + const BElementwiseOperation b_element_op, + const CDEElementwiseOperation cde_element_op) +{ +#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || defined(__gfx908__) || \ + defined(__gfx90a__) || defined(__gfx1030__)) + __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()]; + + const index_t block_id = get_block_1d_id(); + + const auto gemm_desc_ptr = + reinterpret_cast(cast_pointer_to_generic_address_space(gemm_descs_const)); + + index_t left = 0; + index_t right = group_count; + index_t group_id = index_t((left + right) / 2); + while((!(block_id >= gemm_desc_ptr[group_id].BlockStart_ && + block_id < gemm_desc_ptr[group_id].BlockEnd_)) && + left <= right) + { + if(block_id < gemm_desc_ptr[group_id].BlockStart_) + { + right = group_id; + } + else + { + left = group_id; + } + group_id = index_t((left + right) / 2); + } + + GridwiseGemm::Run(gemm_desc_ptr[group_id].a_ptr_, + gemm_desc_ptr[group_id].b_ptr_, + gemm_desc_ptr[group_id].ds_ptr_, + gemm_desc_ptr[group_id].e_ptr_, + p_shared, + a_element_op, + b_element_op, + cde_element_op, + gemm_desc_ptr[group_id].a_grid_desc_k0_m0_m1_k1_, + gemm_desc_ptr[group_id].b_grid_desc_k0_n0_n1_k1_, + gemm_desc_ptr[group_id].ds_grid_desc_m0_m10_m11_n0_n10_n11_, + gemm_desc_ptr[group_id].e_grid_desc_m0_m10_m11_n0_n10_n11_, + gemm_desc_ptr[group_id].block_2_etile_map_, + integral_constant{}, + integral_constant{}); +#else + ignore = gemm_descs_const; + ignore = group_count; + ignore = a_element_op; + ignore = b_element_op; + ignore = cde_element_op; +#endif +} + +template && + is_same_v, + bool> = false> +struct DeviceGroupedGemmMultipleD_Dl : public DeviceGroupedGemm +{ + using DeviceOp = DeviceGroupedGemmMultipleD_Dl; + static constexpr index_t NumDTensor = DsDataType::Size(); + + static constexpr auto I0 = Number<0>{}; + static constexpr auto I1 = Number<1>{}; + static constexpr auto I2 = Number<2>{}; + static constexpr auto I3 = Number<3>{}; + static constexpr auto I4 = Number<4>{}; + static constexpr auto I5 = Number<5>{}; + + static constexpr auto K1Number = Number{}; + + static auto MakeAGridDescriptor_K0_M_K1(index_t M, index_t K, index_t StrideA) + { + assert(K % K1 == 0); + + const index_t K0 = K / K1; + + const auto a_grid_desc_m_k = [&]() { + if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(StrideA, I1)); + } + else if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(I1, StrideA)); + } + }(); + + if constexpr(GemmSpec == GemmSpecialization::MNPadding) + { + const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; + + return transform_tensor_descriptor( + a_grid_desc_m_k, + make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), + make_right_pad_transform(M, PadM)), + make_tuple(Sequence<1>{}, Sequence<0>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); + } + else + { + return transform_tensor_descriptor( + a_grid_desc_m_k, + make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), + make_pass_through_transform(M)), + make_tuple(Sequence<1>{}, Sequence<0>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); + } + } + + static auto MakeBGridDescriptor_K0_N_K1(index_t K, index_t N, index_t StrideB) + { + assert(K % K1 == 0); + + const index_t K0 = K / K1; + + const auto b_grid_desc_k_n = [&]() { + if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(StrideB, I1)); + } + else if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(I1, StrideB)); + } + }(); + + if constexpr(GemmSpec == GemmSpecialization::MNPadding) + { + const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; + + return transform_tensor_descriptor( + b_grid_desc_k_n, + make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), + make_right_pad_transform(N, PadN)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); + } + else + { + return transform_tensor_descriptor( + b_grid_desc_k_n, + make_tuple(make_unmerge_transform(make_tuple(K0, K1Number)), + make_pass_through_transform(N)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0, 2>{}, Sequence<1>{})); + } + } + + template + static auto MakeEGridDescriptor_M_N(index_t M, index_t N, index_t StrideE) + { + const auto c_grid_desc_m_n = [&]() { + if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(StrideE, I1)); + } + else if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(I1, StrideE)); + } + }(); + + if constexpr(GemmSpec == GemmSpecialization::MNPadding) + { + const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; + const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; + + return transform_tensor_descriptor( + c_grid_desc_m_n, + make_tuple(make_right_pad_transform(M, PadM), make_right_pad_transform(N, PadN)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + } + else + { + + return transform_tensor_descriptor( + c_grid_desc_m_n, + make_tuple(make_pass_through_transform(M), make_pass_through_transform(N)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + } + } + + static auto MakeDsGridDescriptor_M_N(const std::array& MRaws, + const std::array& NRaws, + const std::array& DsStride) + { + return generate_tuple( + [&](auto i) { + using DLayout = remove_cvref_t>; + + return DeviceOp::MakeEGridDescriptor_M_N(MRaws[i], NRaws[i], DsStride[i]); + }, + Number{}); + } + + using AGridDesc_K0_M_K1 = decltype(MakeAGridDescriptor_K0_M_K1(1, 1, 1)); + using BGridDesc_K0_N_K1 = decltype(MakeBGridDescriptor_K0_N_K1(1, 1, 1)); + using DsGridDesc_M_N = decltype(MakeDsGridDescriptor_M_N({}, {}, {})); + using EGridDesc_M_N = decltype(MakeEGridDescriptor_M_N(1, 1, 1)); + + // GridwiseGemm + using GridwiseGemm = + GridwiseGemmDlMultipleD_km_kn_mn; + + using AGridDesc_K0_M0_M1_K1 = + decltype(GridwiseGemm::MakeAGridDescriptor_K0_M0_M1_K1(AGridDesc_K0_M_K1{})); + using BGridDesc_K0_N0_N1_K1 = + decltype(GridwiseGemm::MakeBGridDescriptor_K0_N0_N1_K1(BGridDesc_K0_N_K1{})); + using DsGridDesc_M0_M10_M11_N0_N10_N11 = + decltype(GridwiseGemm::MakeDsGridDescriptor_M0_M10_M11_N0_N10_N11(DsGridDesc_M_N{})); + using EGridDesc_M0_M10_M11_N0_N10_N11 = + decltype(GridwiseGemm::MakeCGridDescriptor_M0_M10_M11_N0_N10_N11(EGridDesc_M_N{})); + + struct GroupedGemmBlock2ETileMap + { + using Block2ETileMap = + remove_cvref_t; + + GroupedGemmBlock2ETileMap() + { + block_2_etile_map_ = GridwiseGemm::MakeDefaultBlock2CTileMap(EGridDesc_M_N{}); + BlockStart_ = -1; + } + + GroupedGemmBlock2ETileMap(const EGridDesc_M_N& e_grid_desc_m_n, ck::index_t BlockStart) + { + block_2_etile_map_ = GridwiseGemm::MakeDefaultBlock2CTileMap(e_grid_desc_m_n); + BlockStart_ = BlockStart; + } + + template + __host__ __device__ constexpr auto CalculateBottomIndex(const TopIdx& idx_top) const + { + return block_2_etile_map_.CalculateBottomIndex( + make_multi_index(idx_top[I0] - BlockStart_)); + } + + // it's actually E-Tile + template + __host__ __device__ bool ValidCTileIndex(const CTileIdx& c_tile_idx, + const CTileDim& c_tile_dim) const + { + return block_2_etile_map_.ValidCTileIndex(c_tile_idx, c_tile_dim); + } + + __host__ bool CheckValidity(const EGridDesc_M_N& e_grid_desc_m_n) const + { + return block_2_etile_map_.CheckValidity(e_grid_desc_m_n); + } + + Block2ETileMap block_2_etile_map_; + ck::index_t BlockStart_; + }; + + struct GemmKernelArg + { + // pointers + const ADataType* a_ptr_; + const BDataType* b_ptr_; + typename GridwiseGemm::DsGridPointer ds_ptr_; + EDataType* e_ptr_; + + // tensor descriptors for problem definiton + AGridDesc_K0_M_K1 a_grid_desc_k0_m_k1_; + BGridDesc_K0_N_K1 b_grid_desc_k0_n_k1_; + DsGridDesc_M_N ds_grid_desc_m_n_; + EGridDesc_M_N e_grid_desc_m_n_; + + // tensor descriptors for block/thread-wise copy + AGridDesc_K0_M0_M1_K1 a_grid_desc_k0_m0_m1_k1_; + BGridDesc_K0_N0_N1_K1 b_grid_desc_k0_n0_n1_k1_; + DsGridDesc_M0_M10_M11_N0_N10_N11 ds_grid_desc_m0_m10_m11_n0_n10_n11_; + EGridDesc_M0_M10_M11_N0_N10_N11 e_grid_desc_m0_m10_m11_n0_n10_n11_; + + // block-to-e-tile map + GroupedGemmBlock2ETileMap block_2_etile_map_; + ck::index_t BlockStart_, BlockEnd_; + }; + + // Argument + struct Argument : public BaseArgument + { + Argument(std::vector& p_As, + std::vector& p_Bs, + std::vector>& p_Ds, + std::vector& p_Es, + std::vector& gemm_descs, + AElementwiseOperation a_element_op, + BElementwiseOperation b_element_op, + CDEElementwiseOperation cde_element_op) + : a_element_op_{a_element_op}, + b_element_op_{b_element_op}, + cde_element_op_{cde_element_op} + { + grid_size_ = 0; + + group_count_ = ck::type_convert(gemm_descs.size()); + + if(!(group_count_ == ck::type_convert(p_As.size()) && + group_count_ == ck::type_convert(p_Bs.size()) && + group_count_ == ck::type_convert(p_Es.size()))) + { + throw std::runtime_error("wrong! group_count_ != p_As/b/c.size"); + } + + gemm_desc_kernel_arg_.reserve(group_count_); + + skipped_group_count_ = 0; + + for(std::size_t i = 0; i < gemm_descs.size(); i++) + { + const index_t M = gemm_descs[i].M_; + const index_t N = gemm_descs[i].N_; + const index_t K = gemm_descs[i].K_; + + a_mtx_mraw_kraw_.emplace_back(M, K); + b_mtx_nraw_kraw_.emplace_back(N, K); + + if(M == 0) + { + skipped_group_count_++; + continue; + } + + const index_t StrideA = gemm_descs[i].stride_A_; + const index_t StrideB = gemm_descs[i].stride_B_; + const index_t StrideE = gemm_descs[i].stride_C_; + + typename GridwiseGemm::DsGridPointer p_ds_grid{}; + DsGridDesc_M_N ds_grid_desc_m_n; + + static_for<0, NumDTensor, 1>{}([&](auto j) { + using DLayout = remove_cvref_t>; + using DDataType = remove_cvref_t>; + + p_ds_grid(j) = static_cast(p_Ds[i][j]); + ds_grid_desc_m_n(j) = DeviceOp::MakeEGridDescriptor_M_N( + M, N, gemm_descs[i].stride_Ds_[j]); + }); + + // tensor descriptors for problem definiton + const auto a_grid_desc_k0_m_k1 = + DeviceOp::MakeAGridDescriptor_K0_M_K1(M, K, StrideA); + const auto b_grid_desc_k0_n_k1 = + DeviceOp::MakeBGridDescriptor_K0_N_K1(K, N, StrideB); + const auto e_grid_desc_m_n = + DeviceOp::MakeEGridDescriptor_M_N(M, N, StrideE); + + if(GridwiseGemm::CheckValidity( + a_grid_desc_k0_m_k1, b_grid_desc_k0_n_k1, e_grid_desc_m_n)) + { + + const index_t grid_size_grp = + GroupedGemmBlock2ETileMap(e_grid_desc_m_n, 0) + .block_2_etile_map_.CalculateGridSize(e_grid_desc_m_n); + + const index_t BlockStart = grid_size_; + const index_t BlockEnd = grid_size_ + grid_size_grp; + + grid_size_ += grid_size_grp; + + // block-to-e-tile map + const auto block_2_etile_map = + GroupedGemmBlock2ETileMap(e_grid_desc_m_n, BlockStart); + + // tensor descriptors for block/thread-wise copy + const auto a_grid_desc_k0_m0_m1_k1 = + GridwiseGemm::MakeAGridDescriptor_K0_M0_M1_K1(a_grid_desc_k0_m_k1); + const auto b_grid_desc_k0_n0_n1_k1 = + GridwiseGemm::MakeBGridDescriptor_K0_N0_N1_K1(b_grid_desc_k0_n_k1); + const auto ds_grid_desc_m0_m10_m11_n0_n10_n11 = + GridwiseGemm::MakeDsGridDescriptor_M0_M10_M11_N0_N10_N11(ds_grid_desc_m_n); + const auto e_grid_desc_m0_m10_m11_n0_n10_n11 = + GridwiseGemm::MakeCGridDescriptor_M0_M10_M11_N0_N10_N11(e_grid_desc_m_n); + + gemm_desc_kernel_arg_.push_back( + GemmKernelArg{static_cast(p_As[i]), + static_cast(p_Bs[i]), + p_ds_grid, + static_cast(p_Es[i]), + a_grid_desc_k0_m_k1, + b_grid_desc_k0_n_k1, + ds_grid_desc_m_n, + e_grid_desc_m_n, + a_grid_desc_k0_m0_m1_k1, + b_grid_desc_k0_n0_n1_k1, + ds_grid_desc_m0_m10_m11_n0_n10_n11, + e_grid_desc_m0_m10_m11_n0_n10_n11, + block_2_etile_map, + BlockStart, + BlockEnd}); + } + } + } + + // private: + index_t group_count_; + index_t skipped_group_count_; + + // TODO: A,B element op is unused since gridwise_gemm_dl_v1r3 does NOT support prologue + // for the time being. + AElementwiseOperation a_element_op_; + BElementwiseOperation b_element_op_; + CDEElementwiseOperation cde_element_op_; + + std::vector gemm_desc_kernel_arg_; + std::vector> a_mtx_mraw_kraw_; + std::vector> b_mtx_nraw_kraw_; + + index_t grid_size_; + }; + + // Invoker + struct Invoker : public BaseInvoker + { + using Argument = DeviceOp::Argument; + + float Run(const Argument& arg, const StreamConfig& stream_config = StreamConfig{}) + { + auto K0 = arg.gemm_desc_kernel_arg_[0].a_grid_desc_k0_m_k1_.GetLength(I0); + bool all_has_main_k_block_loop = GridwiseGemm::CalculateHasMainKBlockLoop(K0); + bool all_has_double_tail_k_block_loop = + GridwiseGemm::CalculateHasDoubleTailKBlockLoop(K0); + + for(std::size_t i = 0; i < arg.gemm_desc_kernel_arg_.size(); i++) + { +#if DEBUG_LOG + std::cout << "group: " << i << " arg.a_grid_desc_k0_m_k1_{" + << arg.gemm_desc_kernel_arg_[i].a_grid_desc_k0_m_k1_.GetLength(I0) << ", " + << arg.gemm_desc_kernel_arg_[i].a_grid_desc_k0_m_k1_.GetLength(I1) << ", " + << arg.gemm_desc_kernel_arg_[i].a_grid_desc_k0_m_k1_.GetLength(I2) << "}" + << std::endl; + + std::cout << ", arg.b_grid_desc_k0_n_k1_{" + << arg.gemm_desc_kernel_arg_[i].b_grid_desc_k0_n_k1_.GetLength(I0) << ", " + << arg.gemm_desc_kernel_arg_[i].b_grid_desc_k0_n_k1_.GetLength(I1) << ", " + << arg.gemm_desc_kernel_arg_[i].b_grid_desc_k0_n_k1_.GetLength(I2) << "}" + << std::endl; + + std::cout << ", arg.e_grid_desc_m_n_{ " + << arg.gemm_desc_kernel_arg_[i].e_grid_desc_m_n_.GetLength(I0) << ", " + << arg.gemm_desc_kernel_arg_[i].e_grid_desc_m_n_.GetLength(I1) << "}" + << std::endl; +#endif + + if(!GridwiseGemm::CheckValidity(arg.gemm_desc_kernel_arg_[i].a_grid_desc_k0_m_k1_, + arg.gemm_desc_kernel_arg_[i].b_grid_desc_k0_n_k1_, + arg.gemm_desc_kernel_arg_[i].e_grid_desc_m_n_)) + { + throw std::runtime_error( + "wrong! GridwiseGemmDlMultipleD_km_kn_mn has invalid setting"); + } + + K0 = arg.gemm_desc_kernel_arg_[i].a_grid_desc_k0_m0_m1_k1_.GetLength(I0); + bool not_all_has_main_k_block_loop_same = + all_has_main_k_block_loop xor GridwiseGemm::CalculateHasMainKBlockLoop(K0); + bool not_all_has_double_tail_k_block_loop_same = + all_has_double_tail_k_block_loop xor + GridwiseGemm::CalculateHasDoubleTailKBlockLoop(K0); + + if(not_all_has_main_k_block_loop_same or not_all_has_double_tail_k_block_loop_same) + { + std::ostringstream err; + err << "Not all gemms have same value for [main|double_tail]_k_block_loop! in " + << __FILE__ << ":" << __LINE__ << ", in function: " << __func__; + throw std::runtime_error(err.str()); + } + } + + hipGetErrorString(hipMemcpy(arg.p_workspace_, + arg.gemm_desc_kernel_arg_.data(), + arg.gemm_desc_kernel_arg_.size() * sizeof(GemmKernelArg), + hipMemcpyHostToDevice)); + + auto launch_kernel = [&](auto has_main_k_block_loop, + auto has_double_tail_k_block_loop) { + constexpr bool has_main_loop = has_main_k_block_loop.value; + constexpr bool has_double_loop = has_double_tail_k_block_loop.value; + + const auto kernel = kernel_grouped_gemm_multiple_d_dl; + + return launch_and_time_kernel( + stream_config, + kernel, + dim3(arg.grid_size_), + dim3(BlockSize), + 0, + cast_pointer_to_constant_address_space(arg.p_workspace_), + arg.gemm_desc_kernel_arg_.size(), + arg.a_element_op_, + arg.b_element_op_, + arg.cde_element_op_); + }; + + if(all_has_main_k_block_loop && all_has_double_tail_k_block_loop) + { + return launch_kernel(integral_constant{}, + integral_constant{}); + } + else if(all_has_main_k_block_loop && !all_has_double_tail_k_block_loop) + { + return launch_kernel(integral_constant{}, + integral_constant{}); + } + else if(!all_has_main_k_block_loop && all_has_double_tail_k_block_loop) + { + return launch_kernel(integral_constant{}, + integral_constant{}); + } + else + { + return launch_kernel(integral_constant{}, + integral_constant{}); + } + } + + // polymorphic + float Run(const BaseArgument* p_arg, + const StreamConfig& stream_config = StreamConfig{}) override + { + return Run(*dynamic_cast(p_arg), stream_config); + } + }; + + static bool IsSupportedArgument(const Argument& arg) + { + if((ck::type_convert(arg.gemm_desc_kernel_arg_.size()) + + arg.skipped_group_count_) != arg.group_count_) + { + return false; + } + + const std::string device_name = ck::get_device_name(); + + // TODO add newer Navi arch + if(device_name != "gfx906" and device_name != "gfx908" and device_name != "gfx90a" and + device_name != "gfx1030") + { + return false; + } + + for(std::size_t i = 0; i < arg.gemm_desc_kernel_arg_.size(); i++) + { + if(!GridwiseGemm::CheckValidity(arg.gemm_desc_kernel_arg_[i].a_grid_desc_k0_m_k1_, + arg.gemm_desc_kernel_arg_[i].b_grid_desc_k0_n_k1_, + arg.gemm_desc_kernel_arg_[i].e_grid_desc_m_n_)) + { + return false; + } + } + + return true; + } + + // polymorphic + bool IsSupportedArgument(const BaseArgument* p_arg) override + { + return IsSupportedArgument(*dynamic_cast(p_arg)); + } + + static auto MakeArgument(std::vector& p_As, + std::vector& p_Bs, + std::vector>& p_Ds, + std::vector& p_Es, + std::vector gemm_descs, + AElementwiseOperation a_element_op, + BElementwiseOperation b_element_op, + CDEElementwiseOperation cde_element_op) + { + return Argument{ + p_As, p_Bs, p_Ds, p_Es, gemm_descs, a_element_op, b_element_op, cde_element_op}; + } + + static auto MakeInvoker() { return Invoker{}; } + + // polymorphic + std::unique_ptr + MakeArgumentPointer(std::vector& p_As, + std::vector& p_Bs, + std::vector>& p_Ds, + std::vector& p_Es, + std::vector& gemm_descs, + AElementwiseOperation a_element_op, + BElementwiseOperation b_element_op, + CDEElementwiseOperation cde_element_op) override + { + return std::make_unique( + p_As, p_Bs, p_Ds, p_Es, gemm_descs, a_element_op, b_element_op, cde_element_op); + } + + // polymorphic + std::unique_ptr MakeInvokerPointer() override + { + return std::make_unique(Invoker{}); + } + + // polymorphic + std::string GetTypeString() const override + { + auto str = std::stringstream(); + + // clang-format off + str << "DeviceGroupedGemmMultipleD_Dl" + << "<" + << BlockSize << ", " + << MPerBlock << ", " + << NPerBlock << ", " + << K0PerBlock << ", " + << K1 << ", " + << M1PerThread << ", " + << N1PerThread << ", " + << KPerThread + << getGemmSpecializationString(GemmSpec) + << ">"; + // clang-format on + + return str.str(); + } + + size_t GetWorkSpaceSize(const BaseArgument* p_arg) const override + { + return dynamic_cast(p_arg)->group_count_ * sizeof(GemmKernelArg); + } +}; + +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp index a9522a6691..6ade6eb226 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp @@ -238,19 +238,19 @@ struct GridwiseGemmDlMultipleD_km_kn_mn using BGridDesc_K0_N0_N1_K1 = decltype(MakeBGridDescriptor_K0_N0_N1_K1(BGridDesc_K0_N_K1{})); using CGridDesc_M0_M10_M11_N0_N10_N11 = decltype(MakeCGridDescriptor_M0_M10_M11_N0_N10_N11(CGridDesc_M_N{})); - using Block2CTileMap = decltype(MakeDefaultBlock2CTileMap(CGridDesc_M_N{})); using DsGridPointer = decltype(MakeDsGridPointer()); template + bool HasDoubleTailKBlockLoop, + typename Block2CTileMap> __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, DsGridPointer p_ds_grid, FloatC* __restrict__ p_c_grid, - FloatAB* __restrict__ p_shared_block, + void* __restrict__ p_shared_block, const AElementwiseOperation&, const BElementwiseOperation&, const CDEElementwiseOperation& cde_element_op, @@ -399,8 +399,9 @@ struct GridwiseGemmDlMultipleD_km_kn_mn constexpr auto b_block_aligned_space_size = math::integer_least_multiple( b_block_desc_k0_n0_n1_k1.GetElementSpaceSize(), max_lds_align); - FloatAB* p_a_block_double = p_shared_block; - FloatAB* p_b_block_double = p_shared_block + 2 * a_block_aligned_space_size; + FloatAB* p_a_block_double = static_cast(p_shared_block); + FloatAB* p_b_block_double = + static_cast(p_shared_block) + 2 * a_block_aligned_space_size; // register allocation for output auto c_thread_buf = make_static_buffer( From 16dc18e0f9f752be66d3e33e6e18dff894ab607a Mon Sep 17 00:00:00 2001 From: rocking5566 Date: Thu, 16 Mar 2023 04:29:40 +0800 Subject: [PATCH 17/31] gemm/Conv xdlops + dlops quantization (#625) * Add conv perlayer quantization * Add gemm_dlops quantization * Support int8 for innerproduct * Refine gemm dlops int8 kernel parameter * Support gfx908(MI100) and gfx90a(MI200) * clang-format * Rename example number * Support different layout for d tensor * Add conv dlops perchannel quantization example * Move to example 40 * Extract the common code for different platform (dlops and xdlops) * Move ot subfolder. Prepare to add other op of quantization * Refine the quantization instance library * Add conv dl instances and client example * Remove unnecessary type * Add gemm quantization instance * Add external api and client example * Refine num_bytes * Separete different layout to different cpp * Add more xdl instances * Revert "Remove unnecessary type" This reverts commit 820869182f6a8f62b2c9004101ba6bf76b96be14. * Remove CShuffleDataType in dlops Let acc and CShuffleDataType be the same in xdlops --------- Co-authored-by: zjing14 --- client_example/09_quantization/CMakeLists.txt | 3 + ..._fwd_bias_relu_perchannel_quantization.cpp | 39 +-- ...2d_fwd_bias_relu_perlayer_quantization.cpp | 37 +-- .../conv2d_fwd_perchannel_quantization.cpp | 37 +-- .../conv2d_fwd_perlayer_quantization.cpp | 30 +-- .../09_quantization/gemm_quantization.cpp | 193 ++++++++++++++ example/14_gemm_quantization/CMakeLists.txt | 4 + .../gemm_dl_quantization_int8.cpp | 204 +++++++++++++++ .../40_conv2d_fwd_quantization/CMakeLists.txt | 16 ++ example/40_conv2d_fwd_quantization/common.hpp | 18 ++ ...bias_relu_perchannel_quantization_int8.cpp | 81 ++++++ ...l_bias_relu_perlayer_quantization_int8.cpp | 79 ++++++ ...2d_fwd_dl_perchannel_quantization_int8.cpp | 79 ++++++ ...nv2d_fwd_dl_perlayer_quantization_int8.cpp | 74 ++++++ ...bias_relu_perchannel_quantization_int8.cpp | 85 +++++++ ...l_bias_relu_perlayer_quantization_int8.cpp | 83 +++++++ ...d_fwd_xdl_perchannel_quantization_int8.cpp | 83 +++++++ ...v2d_fwd_xdl_perlayer_quantization_int8.cpp | 78 ++++++ ..._relu_perchannel_quantization_example.inc} | 102 +------- ...as_relu_perlayer_quantization_example.inc} | 99 +------- ...2d_fwd_perchannel_quantization_example.inc | 235 ++++++++++++++++++ ...v2d_fwd_perlayer_quantization_example.inc} | 90 +------ .../44_conv2d_fwd_quantization/CMakeLists.txt | 3 - ..._conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp | 12 +- .../device/impl/device_gemm_multiple_d_dl.hpp | 4 +- .../gpu/element/quantization_operation.hpp | 61 ++++- .../gpu/grid/gridwise_gemm_dl_multiple_d.hpp | 4 +- include/ck/utility/inner_product.hpp | 22 ++ .../gpu/quantization/gemm_quantization.hpp | 224 +++++++++++++++++ ...n_bias_forward_perchannel_quantization.hpp | 45 +++- ...ion_bias_forward_perlayer_quantization.hpp | 45 +++- ...lution_forward_perchannel_quantization.hpp | 44 +++- ...volution_forward_perlayer_quantization.hpp | 44 +++- .../gpu/quantization/CMakeLists.txt | 42 +++- .../conv2d_fwd/conv2d_quantization_common.hpp | 59 +++++ ..._perchannel_quantization_int8_instance.cpp | 82 ++++++ ...as_perlayer_quantization_int8_instance.cpp | 82 ++++++ .../device_conv2d_dl_int8_instance.hpp | 33 +++ ..._perchannel_quantization_int8_instance.cpp | 80 ++++++ ...dl_perlayer_quantization_int8_instance.cpp | 80 ++++++ ..._perchannel_quantization_int8_instance.cpp | 58 +++-- ...as_perlayer_quantization_int8_instance.cpp | 82 ++++++ .../device_conv2d_xdl_int8_instance.hpp | 45 ++++ ..._perchannel_quantization_int8_instance.cpp | 80 ++++++ ...dl_perlayer_quantization_int8_instance.cpp | 80 ++++++ ...as_perlayer_quantization_int8_instance.cpp | 68 ----- .../device_conv2d_xdl_int8_instance.hpp | 111 --------- ..._perchannel_quantization_int8_instance.cpp | 62 ----- ...dl_perlayer_quantization_int8_instance.cpp | 62 ----- ...ization_dl_c_shuffle_i8_i8_i8_instance.hpp | 59 +++++ ...l_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp | 32 +++ ...l_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp | 32 +++ ...l_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp | 32 +++ ...l_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp | 32 +++ ...zation_xdl_c_shuffle_i8_i8_i8_instance.hpp | 116 +++++++++ ...l_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp | 49 ++++ ...l_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp | 49 ++++ ...l_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp | 49 ++++ ...l_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp | 49 ++++ .../gemm/gemm_quantization_common.hpp | 41 +++ 60 files changed, 3186 insertions(+), 717 deletions(-) create mode 100644 client_example/09_quantization/gemm_quantization.cpp create mode 100644 example/14_gemm_quantization/gemm_dl_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/CMakeLists.txt create mode 100644 example/40_conv2d_fwd_quantization/common.hpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perchannel_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perlayer_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perchannel_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp rename example/{44_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp => 40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc} (75%) rename example/{44_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp => 40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc} (74%) create mode 100644 example/40_conv2d_fwd_quantization/run_conv2d_fwd_perchannel_quantization_example.inc rename example/{44_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp => 40_conv2d_fwd_quantization/run_conv2d_fwd_perlayer_quantization_example.inc} (71%) delete mode 100644 example/44_conv2d_fwd_quantization/CMakeLists.txt create mode 100644 library/include/ck/library/tensor_operation_instance/gpu/quantization/gemm_quantization.hpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_int8_instance.hpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perchannel_quantization_int8_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perlayer_quantization_int8_instance.cpp rename library/src/tensor_operation_instance/gpu/quantization/{ => conv2d_fwd}/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp (67%) create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_int8_instance.hpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp delete mode 100644 library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp delete mode 100644 library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_int8_instance.hpp delete mode 100644 library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp delete mode 100644 library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_instance.hpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_instance.hpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/quantization/gemm/gemm_quantization_common.hpp diff --git a/client_example/09_quantization/CMakeLists.txt b/client_example/09_quantization/CMakeLists.txt index 7dc9b860c0..a4dd80cd3f 100644 --- a/client_example/09_quantization/CMakeLists.txt +++ b/client_example/09_quantization/CMakeLists.txt @@ -9,3 +9,6 @@ target_link_libraries(client_conv2d_fwd_perchannel_quantization PRIVATE composab add_executable(client_conv2d_fwd_perlayer_quantization conv2d_fwd_perlayer_quantization.cpp) target_link_libraries(client_conv2d_fwd_perlayer_quantization PRIVATE composable_kernel::device_operations) + +add_executable(client_gemm_quantization gemm_quantization.cpp) +target_link_libraries(client_gemm_quantization PRIVATE composable_kernel::device_operations) diff --git a/client_example/09_quantization/conv2d_fwd_bias_relu_perchannel_quantization.cpp b/client_example/09_quantization/conv2d_fwd_bias_relu_perchannel_quantization.cpp index bcb0cefa71..cf6807f0dd 100644 --- a/client_example/09_quantization/conv2d_fwd_bias_relu_perchannel_quantization.cpp +++ b/client_example/09_quantization/conv2d_fwd_bias_relu_perchannel_quantization.cpp @@ -28,16 +28,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Cla static constexpr ck::index_t NumDimSpatial = 2; static constexpr ck::index_t G = 1; -static constexpr ck::index_t N = 4; -static constexpr ck::index_t K = 64; -static constexpr ck::index_t C = 32; -static constexpr ck::index_t Y = 3; -static constexpr ck::index_t X = 3; -static constexpr ck::index_t Hi = 71; -static constexpr ck::index_t Wi = 71; -static constexpr ck::index_t Ho = 36; -static constexpr ck::index_t Wo = 36; - +static constexpr ck::index_t N = 4; // batch size +static constexpr ck::index_t K = 64; // output channel +static constexpr ck::index_t C = 192; // input channel +static constexpr ck::index_t Y = 3; // filter H +static constexpr ck::index_t X = 3; // filter W +static constexpr ck::index_t Hi = 71; // input H +static constexpr ck::index_t Wi = 71; // input W +static constexpr ck::index_t Ho = 36; // output H +static constexpr ck::index_t Wo = 36; // output W struct SimpleDeviceMem { SimpleDeviceMem() = delete; @@ -64,8 +63,8 @@ int main(int argc, char* argv[]) std::array bias_strides{K, 0, 1, 0, 0}; std::array requant_scale_lengths{G, N, K, Ho, Wo}; std::array requant_scale_strides{K, 0, 1, 0, 0}; - std::array out_lengths{G, N, C, Ho, Wo}; - std::array out_strides{N * Ho * Wo * C, Ho * Wo * C, 1, Wo * C, C}; + std::array out_lengths{G, N, K, Ho, Wo}; + std::array out_strides{N * Ho * Wo * K, Ho * Wo * K, 1, Wo * K, K}; std::array in_left_pad{1, 1}; std::array in_right_pad{1, 1}; std::array conv_strides{2, 2}; @@ -136,10 +135,11 @@ int main(int argc, char* argv[]) { float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); - std::size_t flop = G * 2 * N * K * C * Ho * Wo * Y * X; - std::size_t num_bytes = G * sizeof(InDataType) * N * Hi * Wi * C + - G * sizeof(WeiDataType) * K * Y * X * C + - G * sizeof(OutDataType) * N * Ho * Wo * K; + std::size_t flop = G * 2 * N * K * C * Ho * Wo * Y * X; + std::size_t num_bytes = + G * sizeof(InDataType) * N * Hi * Wi * C + G * sizeof(WeiDataType) * K * Y * X * C + + G * sizeof(BiasDataType) * K + G * sizeof(RequantScaleDataType) * K + + G * sizeof(OutDataType) * N * Ho * Wo * K; float tflops = static_cast(flop) / 1.E9 / avg_time; float gb_per_sec = num_bytes / 1.E6 / avg_time; @@ -162,11 +162,12 @@ int main(int argc, char* argv[]) } } - std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops - << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; - // run the best intance + if(best_op_id != -1) { + std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops + << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + auto& op_ptr = op_ptrs[best_op_id]; std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() << std::endl; diff --git a/client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp b/client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp index 26c7aa15e2..7cbbd28322 100644 --- a/client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp +++ b/client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp @@ -26,15 +26,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clam static constexpr ck::index_t NumDimSpatial = 2; static constexpr ck::index_t G = 1; -static constexpr ck::index_t N = 4; -static constexpr ck::index_t K = 64; -static constexpr ck::index_t C = 32; -static constexpr ck::index_t Y = 3; -static constexpr ck::index_t X = 3; -static constexpr ck::index_t Hi = 71; -static constexpr ck::index_t Wi = 71; -static constexpr ck::index_t Ho = 36; -static constexpr ck::index_t Wo = 36; +static constexpr ck::index_t N = 4; // batch size +static constexpr ck::index_t K = 64; // output channel +static constexpr ck::index_t C = 192; // input channel +static constexpr ck::index_t Y = 3; // filter H +static constexpr ck::index_t X = 3; // filter W +static constexpr ck::index_t Hi = 71; // input H +static constexpr ck::index_t Wi = 71; // input W +static constexpr ck::index_t Ho = 36; // output H +static constexpr ck::index_t Wo = 36; // output W struct SimpleDeviceMem { @@ -60,8 +60,8 @@ int main(int argc, char* argv[]) std::array weight_strides{K * Y * X * C, Y * X * C, 1, X * C, C}; std::array bias_lengths{G, N, K, Ho, Wo}; std::array bias_strides{K, 0, 1, 0, 0}; - std::array out_lengths{G, N, C, Ho, Wo}; - std::array out_strides{N * Ho * Wo * C, Ho * Wo * C, 1, Wo * C, C}; + std::array out_lengths{G, N, K, Ho, Wo}; + std::array out_strides{N * Ho * Wo * K, Ho * Wo * K, 1, Wo * K, K}; std::array in_left_pad{1, 1}; std::array in_right_pad{1, 1}; std::array conv_strides{2, 2}; @@ -130,10 +130,10 @@ int main(int argc, char* argv[]) { float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); - std::size_t flop = G * 2 * N * K * C * Ho * Wo * Y * X; - std::size_t num_bytes = G * sizeof(InDataType) * N * Hi * Wi * C + - G * sizeof(WeiDataType) * K * Y * X * C + - G * sizeof(OutDataType) * N * Ho * Wo * K; + std::size_t flop = G * 2 * N * K * C * Ho * Wo * Y * X; + std::size_t num_bytes = + G * sizeof(InDataType) * N * Hi * Wi * C + G * sizeof(WeiDataType) * K * Y * X * C + + G * sizeof(BiasDataType) * K + G * sizeof(OutDataType) * N * Ho * Wo * K; float tflops = static_cast(flop) / 1.E9 / avg_time; float gb_per_sec = num_bytes / 1.E6 / avg_time; @@ -156,11 +156,12 @@ int main(int argc, char* argv[]) } } - std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops - << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; - // run the best intance + if(best_op_id != -1) { + std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops + << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + auto& op_ptr = op_ptrs[best_op_id]; std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() << std::endl; diff --git a/client_example/09_quantization/conv2d_fwd_perchannel_quantization.cpp b/client_example/09_quantization/conv2d_fwd_perchannel_quantization.cpp index 475b2f03b4..c1c5a651eb 100644 --- a/client_example/09_quantization/conv2d_fwd_perchannel_quantization.cpp +++ b/client_example/09_quantization/conv2d_fwd_perchannel_quantization.cpp @@ -26,15 +26,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul2_C static constexpr ck::index_t NumDimSpatial = 2; static constexpr ck::index_t G = 1; -static constexpr ck::index_t N = 4; -static constexpr ck::index_t K = 64; -static constexpr ck::index_t C = 32; -static constexpr ck::index_t Y = 3; -static constexpr ck::index_t X = 3; -static constexpr ck::index_t Hi = 71; -static constexpr ck::index_t Wi = 71; -static constexpr ck::index_t Ho = 36; -static constexpr ck::index_t Wo = 36; +static constexpr ck::index_t N = 4; // batch size +static constexpr ck::index_t K = 64; // output channel +static constexpr ck::index_t C = 192; // input channel +static constexpr ck::index_t Y = 3; // filter H +static constexpr ck::index_t X = 3; // filter W +static constexpr ck::index_t Hi = 71; // input H +static constexpr ck::index_t Wi = 71; // input W +static constexpr ck::index_t Ho = 36; // output H +static constexpr ck::index_t Wo = 36; // output W struct SimpleDeviceMem { @@ -60,8 +60,8 @@ int main(int argc, char* argv[]) std::array weight_strides{K * Y * X * C, Y * X * C, 1, X * C, C}; std::array requant_scale_lengths{G, N, K, Ho, Wo}; std::array requant_scale_strides{K, 0, 1, 0, 0}; - std::array out_lengths{G, N, C, Ho, Wo}; - std::array out_strides{N * Ho * Wo * C, Ho * Wo * C, 1, Wo * C, C}; + std::array out_lengths{G, N, K, Ho, Wo}; + std::array out_strides{N * Ho * Wo * K, Ho * Wo * K, 1, Wo * K, K}; std::array in_left_pad{1, 1}; std::array in_right_pad{1, 1}; std::array conv_strides{2, 2}; @@ -130,10 +130,10 @@ int main(int argc, char* argv[]) { float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); - std::size_t flop = G * 2 * N * K * C * Ho * Wo * Y * X; - std::size_t num_bytes = G * sizeof(InDataType) * N * Hi * Wi * C + - G * sizeof(WeiDataType) * K * Y * X * C + - G * sizeof(OutDataType) * N * Ho * Wo * K; + std::size_t flop = G * 2 * N * K * C * Ho * Wo * Y * X; + std::size_t num_bytes = + G * sizeof(InDataType) * N * Hi * Wi * C + G * sizeof(WeiDataType) * K * Y * X * C + + G * sizeof(RequantScaleDataType) * K + G * sizeof(OutDataType) * N * Ho * Wo * K; float tflops = static_cast(flop) / 1.E9 / avg_time; float gb_per_sec = num_bytes / 1.E6 / avg_time; @@ -156,11 +156,12 @@ int main(int argc, char* argv[]) } } - std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops - << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; - // run the best intance + if(best_op_id != -1) { + std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops + << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + auto& op_ptr = op_ptrs[best_op_id]; std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() << std::endl; diff --git a/client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp b/client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp index da7b7e6abf..daeff4ff4f 100644 --- a/client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp +++ b/client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp @@ -24,15 +24,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul_Clamp in_strides{N * Hi * Wi * C, Hi * Wi * C, 1, Wi * C, C}; std::array weight_lengths{G, K, C, Y, X}; std::array weight_strides{K * Y * X * C, Y * X * C, 1, X * C, C}; - std::array out_lengths{G, N, C, Ho, Wo}; - std::array out_strides{N * Ho * Wo * C, Ho * Wo * C, 1, Wo * C, C}; + std::array out_lengths{G, N, K, Ho, Wo}; + std::array out_strides{N * Ho * Wo * K, Ho * Wo * K, 1, Wo * K, K}; std::array in_left_pad{1, 1}; std::array in_right_pad{1, 1}; std::array conv_strides{2, 2}; @@ -150,11 +150,11 @@ int main(int argc, char* argv[]) } } - std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops - << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; - - // run the best intance + if(best_op_id != -1) { + std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops + << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + auto& op_ptr = op_ptrs[best_op_id]; std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() << std::endl; diff --git a/client_example/09_quantization/gemm_quantization.cpp b/client_example/09_quantization/gemm_quantization.cpp new file mode 100644 index 0000000000..242504b44f --- /dev/null +++ b/client_example/09_quantization/gemm_quantization.cpp @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/gpu/quantization/gemm_quantization.hpp" + +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using AElementOp = PassThrough; +using BElementOp = PassThrough; +using ActivationOp = PassThrough; +using CDEElementOp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; + +using ADataType = int8_t; +using BDataType = int8_t; +using EDataType = int8_t; + +using ALayout = Row; +using BLayout = Col; +using ELayout = Row; + +struct SimpleDeviceMem +{ + SimpleDeviceMem() = delete; + + SimpleDeviceMem(std::size_t mem_size) : p_mem_{} + { + (void)hipMalloc(static_cast(&p_mem_), mem_size); + } + + void* GetDeviceBuffer() { return p_mem_; } + + ~SimpleDeviceMem() { (void)hipFree(p_mem_); } + + void* p_mem_; +}; + +int main(int argc, char* argv[]) +{ + ck::index_t M = 1024; + ck::index_t N = 1024; + ck::index_t K = 1024; + + ck::index_t StrideA = 1024; + ck::index_t StrideB = 1024; + ck::index_t StrideE = 1024; + + float requant_scale = 0.03; + + auto f_matrix_space_size = + [](std::size_t nRow, std::size_t nCol, std::size_t stride, auto layout) { + using Layout = decltype(layout); + + if constexpr(std::is_same::value) + { + return (nRow - 1) * stride + nCol; + } + else + { + return (nCol - 1) * stride + nRow; + } + }; + + SimpleDeviceMem a_device_buf(sizeof(ADataType) * f_matrix_space_size(M, K, StrideA, ALayout{})); + SimpleDeviceMem b_device_buf(sizeof(BDataType) * f_matrix_space_size(K, N, StrideB, BLayout{})); + SimpleDeviceMem e_device_buf(sizeof(EDataType) * f_matrix_space_size(M, N, StrideE, ELayout{})); + + using DeviceOp = ck::tensor_operation::device::DeviceGemmMultipleD, + ELayout, + ADataType, + BDataType, + ck::Tuple<>, + EDataType, + AElementOp, + BElementOp, + CDEElementOp>; + + // get device op instances + const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory< + DeviceOp>::GetInstances(); + + std::cout << "found " << op_ptrs.size() << " instances" << std::endl; + + const auto a_element_op = AElementOp{}; + const auto b_element_op = BElementOp{}; + const auto cde_element_op = CDEElementOp{requant_scale, ActivationOp{}}; + + std::string best_op_name; + int best_op_id = -1; + float best_avg_time = std::numeric_limits::max(); + float best_gb_per_sec = 0; + float best_tflops = 0; + + // profile device operation instances + std::cout << "Run all instances and do timing" << std::endl; + + for(int i = 0; i < op_ptrs.size(); ++i) + { + auto& op_ptr = op_ptrs[i]; + auto argument_ptr = op_ptr->MakeArgumentPointer(a_device_buf.GetDeviceBuffer(), + b_device_buf.GetDeviceBuffer(), + {}, + e_device_buf.GetDeviceBuffer(), + M, + N, + K, + StrideA, + StrideB, + {}, + StrideE, + a_element_op, + b_element_op, + cde_element_op); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + std::string op_name = op_ptr->GetTypeString(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); + + std::size_t flop = std::size_t(2) * M * N * K; + std::size_t num_bytes = + sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(EDataType) * M * N; + + float tflops = static_cast(flop) / 1.E9 / avg_time; + float gb_per_sec = num_bytes / 1.E6 / avg_time; + + std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, " + << gb_per_sec << " GB/s, " << op_name << std::endl; + + if(tflops > best_tflops) + { + best_op_id = i; + best_op_name = op_name; + best_avg_time = avg_time; + best_gb_per_sec = gb_per_sec; + best_tflops = tflops; + } + } + else + { + std::cout << op_name << " does not support this problem" << std::endl; + } + } + + if(best_op_id != -1) + { + std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops + << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + + auto& op_ptr = op_ptrs[best_op_id]; + std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() + << std::endl; + auto argument_ptr = op_ptr->MakeArgumentPointer(a_device_buf.GetDeviceBuffer(), + b_device_buf.GetDeviceBuffer(), + {}, + e_device_buf.GetDeviceBuffer(), + M, + N, + K, + StrideA, + StrideB, + {}, + StrideE, + a_element_op, + b_element_op, + cde_element_op); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false}); + } + + std::cout << "Done" << std::endl; + } + + return 0; +} \ No newline at end of file diff --git a/example/14_gemm_quantization/CMakeLists.txt b/example/14_gemm_quantization/CMakeLists.txt index ca09c48c10..8ea11df9ce 100644 --- a/example/14_gemm_quantization/CMakeLists.txt +++ b/example/14_gemm_quantization/CMakeLists.txt @@ -1,2 +1,6 @@ +# dlops +add_example_executable(example_gemm_dl_quantization_int8 gemm_dl_quantization_int8.cpp) + +# xdlops add_example_executable(example_gemm_xdl_bias_relu_quantization_int8 gemm_xdl_bias_relu_quantization_int8.cpp) add_example_executable(example_gemm_xdl_quantization_int8 gemm_xdl_quantization_int8.cpp) \ No newline at end of file diff --git a/example/14_gemm_quantization/gemm_dl_quantization_int8.cpp b/example/14_gemm_quantization/gemm_dl_quantization_int8.cpp new file mode 100644 index 0000000000..044f3c166a --- /dev/null +++ b/example/14_gemm_quantization/gemm_dl_quantization_int8.cpp @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_dl.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/utility/device_memory.hpp" +#include "ck/library/utility/host_tensor.hpp" +#include "ck/library/utility/host_tensor_generator.hpp" +#include "ck/library/utility/literals.hpp" +#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp" +#include "ck/library/utility/check_err.hpp" + +template +using S = ck::Sequence; + +using I8 = int8_t; +using I32 = int32_t; +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using AElementOp = PassThrough; +using BElementOp = PassThrough; +using ActivationOp = PassThrough; +using CDEElementOp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; + +using ADataType = I8; +using BDataType = I8; +using AccDataType = I32; +using CShuffleDataType = I32; +using DsDataType = ck::Tuple<>; +using EDataType = I8; + +using ALayout = Row; +using BLayout = Col; +using DsLayout = ck::Tuple<>; +using ELayout = Row; + +static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; + +using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemmMultipleD_Dl< + ALayout, + BLayout, + DsLayout, + ELayout, + ADataType, + BDataType, + AccDataType, + DsDataType, + EDataType, + AElementOp, + BElementOp, + CDEElementOp, + GemmDefault, + 256, // BlockSize + 128, // MPerBlock + 128, // NPerBlock + 16, // K0PerBlock + 4, // K1 + 4, // M1PerThread + 4, // N1PerThread + 1, // KPerThread + S<8, 2>, // M1N1ThreadClusterM1Xs + S<8, 2>, // M1N1ThreadClusterN1Xs + S<8, 1, 1, 4>, // ABlockTransferThreadSliceLengths_K0_M0_M1_K1 + S<2, 1, 128, 1>, // ABlockTransferThreadClusterLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // ABlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // ABlockTransferSrcVectorTensorLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // ABlockTransferDstVectorTensorLengths_K0_M0_M1_K1 + S<8, 1, 1, 4>, // BBlockTransferThreadSliceLengths_K0_N0_N1_K1 + S<2, 1, 128, 1>, // BBlockTransferThreadClusterLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // BBlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // BBlockTransferSrcVectorTensorLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // BBlockTransferDstVectorTensorLengths_K0_N0_N1_K1 + S<0, 1, 2, 3, 4, 5>, // CThreadTransferSrcDstAccessOrder + 5, // CThreadTransferSrcDstVectorDim + 4>; // CThreadTransferDstScalarPerVector + +using ReferenceGemmInstance = ck::tensor_operation::host:: + ReferenceGemm; + +int main() +{ + bool do_verification = true; + bool time_kernel = false; + + // GEMM shape + ck::index_t M = 1024; + ck::index_t N = 1024; + ck::index_t K = 1024; + + ck::index_t StrideA = 1024; + ck::index_t StrideB = 1024; + ck::index_t StrideE = 1024; + + float requant_scale = 0.03; + + auto f_host_tensor_descriptor = + [](std::size_t row, std::size_t col, std::size_t stride, auto layout) { + using namespace ck::literals; + + if(std::is_same::value) + { + return HostTensorDescriptor(std::vector({row, col}), + std::vector({stride, 1_uz})); + } + else + { + return HostTensorDescriptor(std::vector({row, col}), + std::vector({1_uz, stride})); + } + }; + + Tensor a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{})); + Tensor b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{})); + Tensor e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{})); + Tensor e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{})); + + std::cout << "a_m_k: " << a_m_k.mDesc << std::endl; + std::cout << "b_k_n: " << b_k_n.mDesc << std::endl; + std::cout << "e_m_n: " << e_m_n_host_result.mDesc << std::endl; + + a_m_k.GenerateTensorValue(GeneratorTensor_2{-5, 5}); + b_k_n.GenerateTensorValue(GeneratorTensor_2{-5, 5}); + + DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize()); + DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize()); + DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize()); + + a_device_buf.ToDevice(a_m_k.mData.data()); + b_device_buf.ToDevice(b_k_n.mData.data()); + + auto a_element_op = AElementOp{}; + auto b_element_op = BElementOp{}; + auto cde_element_op = CDEElementOp{requant_scale, ActivationOp{}}; + + // do GEMM + auto gemm = DeviceGemmInstance{}; + auto invoker = gemm.MakeInvoker(); + auto argument = gemm.MakeArgument(a_device_buf.GetDeviceBuffer(), + b_device_buf.GetDeviceBuffer(), + {}, + e_device_buf.GetDeviceBuffer(), + M, + N, + K, + StrideA, + StrideB, + {}, + StrideE, + a_element_op, + b_element_op, + cde_element_op); + + if(!gemm.IsSupportedArgument(argument)) + { + throw std::runtime_error( + "wrong! device_gemm with the specified compilation parameters does " + "not support this GEMM problem"); + } + + float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel}); + + std::size_t flop = std::size_t(2) * M * N * K; + std::size_t num_btype = + sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(EDataType) * M * N; + + float tflops = static_cast(flop) / 1.E9 / ave_time; + + float gb_per_sec = num_btype / 1.E6 / ave_time; + + std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, " + << gemm.GetTypeString() << std::endl; + + e_device_buf.FromDevice(e_m_n_device_result.mData.data()); + + if(do_verification) + { + auto ref_gemm = ReferenceGemmInstance{}; + auto ref_invoker = ref_gemm.MakeInvoker(); + + auto ref_argument = ref_gemm.MakeArgument( + a_m_k, b_k_n, e_m_n_host_result, a_element_op, b_element_op, cde_element_op); + + ref_invoker.Run(ref_argument); + + return ck::utils::check_err(e_m_n_device_result, e_m_n_host_result) ? 0 : 1; + } + + return 0; +} diff --git a/example/40_conv2d_fwd_quantization/CMakeLists.txt b/example/40_conv2d_fwd_quantization/CMakeLists.txt new file mode 100644 index 0000000000..c3540d6ee6 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/CMakeLists.txt @@ -0,0 +1,16 @@ +# Conv perlayer quantization +add_example_executable(example_conv2d_fwd_dl_perlayer_quantization_int8 conv2d_fwd_dl_perlayer_quantization_int8.cpp) +add_example_executable(example_conv2d_fwd_xdl_perlayer_quantization_int8 conv2d_fwd_xdl_perlayer_quantization_int8.cpp) + +# Conv perchannel quantization +add_example_executable(example_conv2d_fwd_dl_perchannel_quantization_int8 conv2d_fwd_dl_perchannel_quantization_int8.cpp) +add_example_executable(example_conv2d_fwd_xdl_perchannel_quantization_int8 conv2d_fwd_xdl_perchannel_quantization_int8.cpp) + +# Conv + bias + relu perlayer quantization +add_example_executable(example_conv2d_fwd_dl_bias_relu_perlayer_quantization_int8 conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp) +add_example_executable(example_conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8 conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp) + +# Conv + bias + relu perchannel quantization +add_example_executable(example_conv2d_fwd_dl_bias_relu_perchannel_quantization_int8 conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp) +add_example_executable(example_conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8 conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp) + diff --git a/example/40_conv2d_fwd_quantization/common.hpp b/example/40_conv2d_fwd_quantization/common.hpp new file mode 100644 index 0000000000..6ee14d750e --- /dev/null +++ b/example/40_conv2d_fwd_quantization/common.hpp @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/utility/algorithm.hpp" +#include "ck/library/utility/check_err.hpp" +#include "ck/library/utility/device_memory.hpp" +#include "ck/library/utility/host_tensor.hpp" +#include "ck/library/utility/host_tensor_generator.hpp" +#include "ck/library/utility/literals.hpp" +#include "ck/library/utility/convolution_parameter.hpp" +#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp" +#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp" diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp new file mode 100644 index 0000000000..df10e80396 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using BiasDataType = int32_t; +using RequantScaleDataType = float; +using AccDataType = int32_t; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::Relu; +using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK< + NDimSpatial, + InDataType, + WeiDataType, + ck::Tuple, + OutDataType, + AccDataType, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 256, // BlockSize + 128, // MPerBlock + 128, // NPerBlock + 16, // K0PerBlock + 4, // K1 + 4, // M1PerThread + 4, // N1PerThread + 1, // KPerThread + S<8, 2>, // M1N1ThreadClusterM1Xs + S<8, 2>, // M1N1ThreadClusterN1Xs + S<8, 1, 1, 4>, // ABlockTransferThreadSliceLengths_K0_M0_M1_K1 + S<2, 1, 128, 1>, // ABlockTransferThreadClusterLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // ABlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // ABlockTransferSrcVectorTensorLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // ABlockTransferDstVectorTensorLengths_K0_M0_M1_K1 + S<8, 1, 1, 4>, // BBlockTransferThreadSliceLengths_K0_N0_N1_K1 + S<2, 1, 128, 1>, // BBlockTransferThreadClusterLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // BBlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // BBlockTransferSrcVectorTensorLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // BBlockTransferDstVectorTensorLengths_K0_N0_N1_K1 + S<0, 1, 2, 3, 4, 5>, // CThreadTransferSrcDstAccessOrder + 5, // CThreadTransferSrcDstVectorDim + 4>; // CThreadTransferDstScalarPerVector + +#include "run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc" + +int main() { run_conv2d_fwd_bias_relu_perchannel_quantization_example(); }; diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp new file mode 100644 index 0000000000..18f9197b9c --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using BiasDataType = int32_t; +using AccDataType = int32_t; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::Relu; +using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK< + NDimSpatial, + InDataType, + WeiDataType, + ck::Tuple, + OutDataType, + AccDataType, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 256, // BlockSize + 128, // MPerBlock + 128, // NPerBlock + 16, // K0PerBlock + 4, // K1 + 4, // M1PerThread + 4, // N1PerThread + 1, // KPerThread + S<8, 2>, // M1N1ThreadClusterM1Xs + S<8, 2>, // M1N1ThreadClusterN1Xs + S<8, 1, 1, 4>, // ABlockTransferThreadSliceLengths_K0_M0_M1_K1 + S<2, 1, 128, 1>, // ABlockTransferThreadClusterLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // ABlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // ABlockTransferSrcVectorTensorLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // ABlockTransferDstVectorTensorLengths_K0_M0_M1_K1 + S<8, 1, 1, 4>, // BBlockTransferThreadSliceLengths_K0_N0_N1_K1 + S<2, 1, 128, 1>, // BBlockTransferThreadClusterLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // BBlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // BBlockTransferSrcVectorTensorLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // BBlockTransferDstVectorTensorLengths_K0_N0_N1_K1 + S<0, 1, 2, 3, 4, 5>, // CThreadTransferSrcDstAccessOrder + 5, // CThreadTransferSrcDstVectorDim + 4>; // CThreadTransferDstScalarPerVector + +#include "run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc" + +int main() { run_conv2d_fwd_bias_relu_perlayer_quantization_example(); } diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perchannel_quantization_int8.cpp new file mode 100644 index 0000000000..afff7f8b69 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perchannel_quantization_int8.cpp @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using RequantScaleDataType = float; +using AccDataType = int32_t; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::Relu; +using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK< + NDimSpatial, + InDataType, + WeiDataType, + ck::Tuple, + OutDataType, + AccDataType, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 256, // BlockSize + 128, // MPerBlock + 128, // NPerBlock + 16, // K0PerBlock + 4, // K1 + 4, // M1PerThread + 4, // N1PerThread + 1, // KPerThread + S<8, 2>, // M1N1ThreadClusterM1Xs + S<8, 2>, // M1N1ThreadClusterN1Xs + S<8, 1, 1, 4>, // ABlockTransferThreadSliceLengths_K0_M0_M1_K1 + S<2, 1, 128, 1>, // ABlockTransferThreadClusterLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // ABlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // ABlockTransferSrcVectorTensorLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // ABlockTransferDstVectorTensorLengths_K0_M0_M1_K1 + S<8, 1, 1, 4>, // BBlockTransferThreadSliceLengths_K0_N0_N1_K1 + S<2, 1, 128, 1>, // BBlockTransferThreadClusterLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // BBlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // BBlockTransferSrcVectorTensorLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // BBlockTransferDstVectorTensorLengths_K0_N0_N1_K1 + S<0, 1, 2, 3, 4, 5>, // CThreadTransferSrcDstAccessOrder + 5, // CThreadTransferSrcDstVectorDim + 4>; // CThreadTransferDstScalarPerVector + +#include "run_conv2d_fwd_perchannel_quantization_example.inc" + +int main() { run_conv2d_fwd_perchannel_quantization_example(); } diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perlayer_quantization_int8.cpp new file mode 100644 index 0000000000..a38fe2a6c3 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perlayer_quantization_int8.cpp @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using AccDataType = int32_t; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = PassThrough; +using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK< + NDimSpatial, + InDataType, + WeiDataType, + ck::Tuple<>, + OutDataType, + AccDataType, + InLayout, + WeiLayout, + ck::Tuple<>, + OutLayout, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 256, // BlockSize + 128, // MPerBlock + 128, // NPerBlock + 16, // K0PerBlock + 4, // K1 + 4, // M1PerThread + 4, // N1PerThread + 1, // KPerThread + S<8, 2>, // M1N1ThreadClusterM1Xs + S<8, 2>, // M1N1ThreadClusterN1Xs + S<8, 1, 1, 4>, // ABlockTransferThreadSliceLengths_K0_M0_M1_K1 + S<2, 1, 128, 1>, // ABlockTransferThreadClusterLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // ABlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // ABlockTransferSrcVectorTensorLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // ABlockTransferDstVectorTensorLengths_K0_M0_M1_K1 + S<8, 1, 1, 4>, // BBlockTransferThreadSliceLengths_K0_N0_N1_K1 + S<2, 1, 128, 1>, // BBlockTransferThreadClusterLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // BBlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // BBlockTransferSrcVectorTensorLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // BBlockTransferDstVectorTensorLengths_K0_N0_N1_K1 + S<0, 1, 2, 3, 4, 5>, // CThreadTransferSrcDstAccessOrder + 5, // CThreadTransferSrcDstVectorDim + 4>; // CThreadTransferDstScalarPerVector + +#include "run_conv2d_fwd_perlayer_quantization_example.inc" + +int main() { run_conv2d_fwd_perlayer_quantization_example(); } diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp new file mode 100644 index 0000000000..ba6990d938 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using BiasDataType = int32_t; +using RequantScaleDataType = float; +using AccDataType = int32_t; +using CShuffleDataType = AccDataType; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::Relu; +using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< + NDimSpatial, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InDataType, + WeiDataType, + AccDataType, + CShuffleDataType, + ck::Tuple, + OutDataType, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 1, // + 256, // BlockSize + 128, // MPerBlock + 256, // NPerBlock + 64, // KPerBlock + 16, // AK1 + 16, // BK1 + 32, // MPerXdl + 32, // NPerXdl + 2, // MXdlPerWave + 4, // NXdlPerWave + S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1 + S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // ABlockTransferSrcAccessOrder + 2, // ABlockTransferSrcVectorDim + 16, // ABlockTransferSrcScalarPerVector + 16, // ABlockTransferDstScalarPerVector_AK1 + 1, // ABlockLdsExtraM + S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1 + S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // BBlockTransferSrcAccessOrder + 2, // BBlockTransferSrcVectorDim + 16, // BBlockTransferSrcScalarPerVector + 16, // BBlockTransferDstScalarPerVector_BK1 + 1, // BBlockLdsExtraN + 1, + 1, + S<1, 64, 1, 4>, + 8>; + +#include "run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc" + +int main() { run_conv2d_fwd_bias_relu_perchannel_quantization_example(); }; diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp new file mode 100644 index 0000000000..690d70e112 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using BiasDataType = int32_t; +using AccDataType = int32_t; +using CShuffleDataType = AccDataType; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::Relu; +using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< + NDimSpatial, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InDataType, + WeiDataType, + AccDataType, + CShuffleDataType, + ck::Tuple, + OutDataType, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 1, // + 256, // BlockSize + 128, // MPerBlock + 256, // NPerBlock + 64, // KPerBlock + 16, // AK1 + 16, // BK1 + 32, // MPerXdl + 32, // NPerXdl + 2, // MXdlPerWave + 4, // NXdlPerWave + S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1 + S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // ABlockTransferSrcAccessOrder + 2, // ABlockTransferSrcVectorDim + 16, // ABlockTransferSrcScalarPerVector + 16, // ABlockTransferDstScalarPerVector_AK1 + 1, // ABlockLdsExtraM + S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1 + S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // BBlockTransferSrcAccessOrder + 2, // BBlockTransferSrcVectorDim + 16, // BBlockTransferSrcScalarPerVector + 16, // BBlockTransferDstScalarPerVector_BK1 + 1, // BBlockLdsExtraN + 1, + 1, + S<1, 64, 1, 4>, + 8>; + +#include "run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc" + +int main() { run_conv2d_fwd_bias_relu_perlayer_quantization_example(); } diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perchannel_quantization_int8.cpp new file mode 100644 index 0000000000..dd755ff065 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perchannel_quantization_int8.cpp @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using RequantScaleDataType = float; +using AccDataType = int32_t; +using CShuffleDataType = AccDataType; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::Relu; +using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< + NDimSpatial, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InDataType, + WeiDataType, + AccDataType, + CShuffleDataType, + ck::Tuple, + OutDataType, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 1, // + 256, // BlockSize + 128, // MPerBlock + 256, // NPerBlock + 64, // KPerBlock + 16, // AK1 + 16, // BK1 + 32, // MPerXdl + 32, // NPerXdl + 2, // MXdlPerWave + 4, // NXdlPerWave + S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1 + S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // ABlockTransferSrcAccessOrder + 2, // ABlockTransferSrcVectorDim + 16, // ABlockTransferSrcScalarPerVector + 16, // ABlockTransferDstScalarPerVector_AK1 + 1, // ABlockLdsExtraM + S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1 + S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // BBlockTransferSrcAccessOrder + 2, // BBlockTransferSrcVectorDim + 16, // BBlockTransferSrcScalarPerVector + 16, // BBlockTransferDstScalarPerVector_BK1 + 1, // BBlockLdsExtraN + 1, + 1, + S<1, 64, 1, 4>, + 8>; + +#include "run_conv2d_fwd_perchannel_quantization_example.inc" + +int main() { run_conv2d_fwd_perchannel_quantization_example(); } diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp new file mode 100644 index 0000000000..48617e4775 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using AccDataType = int32_t; +using CShuffleDataType = AccDataType; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = PassThrough; +using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< + NDimSpatial, + InLayout, + WeiLayout, + ck::Tuple<>, + OutLayout, + InDataType, + WeiDataType, + AccDataType, + CShuffleDataType, + ck::Tuple<>, + OutDataType, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 1, // + 256, // BlockSize + 128, // MPerBlock + 256, // NPerBlock + 64, // KPerBlock + 16, // AK1 + 16, // BK1 + 32, // MPerXdl + 32, // NPerXdl + 2, // MXdlPerWave + 4, // NXdlPerWave + S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1 + S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // ABlockTransferSrcAccessOrder + 2, // ABlockTransferSrcVectorDim + 16, // ABlockTransferSrcScalarPerVector + 16, // ABlockTransferDstScalarPerVector_AK1 + 1, // ABlockLdsExtraM + S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1 + S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder + S<1, 0, 2>, // BBlockTransferSrcAccessOrder + 2, // BBlockTransferSrcVectorDim + 16, // BBlockTransferSrcScalarPerVector + 16, // BBlockTransferDstScalarPerVector_BK1 + 1, // BBlockLdsExtraN + 1, + 1, + S<1, 64, 1, 4>, + 16>; + +#include "run_conv2d_fwd_perlayer_quantization_example.inc" + +int main() { run_conv2d_fwd_perlayer_quantization_example(); } diff --git a/example/44_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc similarity index 75% rename from example/44_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp rename to example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc index 832665edc0..822a1ed8b5 100644 --- a/example/44_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc @@ -1,97 +1,7 @@ // SPDX-License-Identifier: MIT // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. -#include "ck/ck.hpp" -#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" -#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" -#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" - -#include "ck/library/utility/algorithm.hpp" -#include "ck/library/utility/check_err.hpp" -#include "ck/library/utility/device_memory.hpp" -#include "ck/library/utility/host_tensor.hpp" -#include "ck/library/utility/host_tensor_generator.hpp" -#include "ck/library/utility/literals.hpp" -#include "ck/library/utility/convolution_parameter.hpp" -#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp" -#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp" - -using InDataType = int8_t; -using WeiDataType = int8_t; -using BiasDataType = int32_t; -using RequantScaleDataType = float; -using AccDataType = int32_t; -using CShuffleDataType = int32_t; -using OutDataType = int8_t; - -template -using S = ck::Sequence; - -using PassThrough = ck::tensor_operation::element_wise::PassThrough; -using InElementOp = PassThrough; -using WeiElementOp = PassThrough; -using ActivationOp = ck::tensor_operation::element_wise::Relu; -using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; - -static constexpr auto ConvSpec = - ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; - -static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; - -template -using DeviceGroupedConvNDFwdInstance = - ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< - NDimSpatial, - InLayout, - WeiLayout, - ck::Tuple, - OutLayout, - InDataType, - WeiDataType, - AccDataType, - CShuffleDataType, - ck::Tuple, - OutDataType, - InElementOp, - WeiElementOp, - OutElementOp, - ConvSpec, // ConvForwardSpecialization - GemmSpec, // GemmSpecialization - 1, // - 256, // BlockSize - 128, // MPerBlock - 256, // NPerBlock - 64, // KPerBlock - 16, // AK1 - 16, // BK1 - 32, // MPerXdl - 32, // NPerXdl - 2, // MXdlPerWave - 4, // NXdlPerWave - S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1 - S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder - S<1, 0, 2>, // ABlockTransferSrcAccessOrder - 2, // ABlockTransferSrcVectorDim - 16, // ABlockTransferSrcScalarPerVector - 16, // ABlockTransferDstScalarPerVector_AK1 - 1, // ABlockLdsExtraM - S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1 - S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder - S<1, 0, 2>, // BBlockTransferSrcAccessOrder - 2, // BBlockTransferSrcVectorDim - 16, // BBlockTransferSrcScalarPerVector - 16, // BBlockTransferDstScalarPerVector_BK1 - 1, // BBlockLdsExtraN - 1, - 1, - S<1, 64, 1, 4>, - 8>; - +#pragma once template c_host(out_g_n_k_wos_desc); + Tensor c_host(out_g_n_k_wos_desc); auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd(); @@ -257,7 +167,7 @@ bool run_grouped_conv_fwd(bool do_verification, return (pass ? 0 : 1); } -int main() +int run_conv2d_fwd_bias_relu_perchannel_quantization_example() { bool do_verification = true; bool time_kernel = true; @@ -268,7 +178,7 @@ int main() 1, // group 4, // batch 64, // output channels - 32, // input chanels + 192, // input chanels {3, 3}, // weight HW {71, 71}, // x HW {2, 2}, // strides @@ -312,8 +222,6 @@ int main() const auto out_g_n_k_wos_desc = ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed(conv_param); - std::cout << out_g_n_k_wos_desc << std::endl; - using deviceOp = DeviceGroupedConvNDFwdInstance -using S = ck::Sequence; - -using PassThrough = ck::tensor_operation::element_wise::PassThrough; -using InElementOp = PassThrough; -using WeiElementOp = PassThrough; -using ActivationOp = ck::tensor_operation::element_wise::Relu; -using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; - -static constexpr auto ConvSpec = - ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; - -static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; - -template -using DeviceGroupedConvNDFwdInstance = - ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< - NDimSpatial, - InLayout, - WeiLayout, - ck::Tuple, - OutLayout, - InDataType, - WeiDataType, - AccDataType, - CShuffleDataType, - ck::Tuple, - OutDataType, - InElementOp, - WeiElementOp, - OutElementOp, - ConvSpec, // ConvForwardSpecialization - GemmSpec, // GemmSpecialization - 1, // - 256, // BlockSize - 128, // MPerBlock - 256, // NPerBlock - 64, // KPerBlock - 16, // AK1 - 16, // BK1 - 32, // MPerXdl - 32, // NPerXdl - 2, // MXdlPerWave - 4, // NXdlPerWave - S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1 - S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder - S<1, 0, 2>, // ABlockTransferSrcAccessOrder - 2, // ABlockTransferSrcVectorDim - 16, // ABlockTransferSrcScalarPerVector - 16, // ABlockTransferDstScalarPerVector_AK1 - 1, // ABlockLdsExtraM - S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1 - S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder - S<1, 0, 2>, // BBlockTransferSrcAccessOrder - 2, // BBlockTransferSrcVectorDim - 16, // BBlockTransferSrcScalarPerVector - 16, // BBlockTransferDstScalarPerVector_BK1 - 1, // BBlockLdsExtraN - 1, - 1, - S<1, 64, 1, 4>, - 8>; +#pragma once template c_host(out_g_n_k_wos_desc); + Tensor c_host(out_g_n_k_wos_desc); auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd(); @@ -242,7 +155,7 @@ bool run_grouped_conv_fwd(bool do_verification, return (pass ? 0 : 1); } -int main() +int run_conv2d_fwd_bias_relu_perlayer_quantization_example() { bool do_verification = true; bool time_kernel = true; @@ -253,7 +166,7 @@ int main() 1, // group 4, // batch 64, // output channels - 32, // input chanels + 192, // input chanels {3, 3}, // weight HW {71, 71}, // x HW {2, 2}, // strides @@ -294,8 +207,6 @@ int main() const auto out_g_n_k_wos_desc = ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed(conv_param); - std::cout << out_g_n_k_wos_desc << std::endl; - return run_grouped_conv_fwd< ndim_spatial, InDataType, diff --git a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perchannel_quantization_example.inc b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perchannel_quantization_example.inc new file mode 100644 index 0000000000..2e0623028d --- /dev/null +++ b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perchannel_quantization_example.inc @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +template +bool run_grouped_conv_fwd(bool do_verification, + bool time_kernel, + const ck::utils::conv::ConvParam& conv_param, + const HostTensorDescriptor& in_g_n_c_wis_desc, + const HostTensorDescriptor& wei_g_k_c_xs_desc, + const HostTensorDescriptor& requant_scale_g_k_desc, + const HostTensorDescriptor& out_g_n_k_wos_desc, + const InElementOp& in_element_op, + const WeiElementOp& wei_element_op, + const OutElementOp& out_element_op) +{ + Tensor in(in_g_n_c_wis_desc); + Tensor wei(wei_g_k_c_xs_desc); + Tensor requant_scale(requant_scale_g_k_desc); + Tensor out_host(out_g_n_k_wos_desc); + Tensor out_device(out_g_n_k_wos_desc); + + std::cout << "in: " << in.mDesc << std::endl; + std::cout << "wei: " << wei.mDesc << std::endl; + std::cout << "requant_scale: " << requant_scale.mDesc << std::endl; + std::cout << "out: " << out_host.mDesc << std::endl; + + in.GenerateTensorValue(GeneratorTensor_2{-128, 127}); + wei.GenerateTensorValue(GeneratorTensor_2{-128, 127}); + requant_scale.GenerateTensorValue(GeneratorTensor_2{0, 1}); + + DeviceMem in_device_buf(sizeof(InDataType) * in.mDesc.GetElementSpaceSize()); + DeviceMem wei_device_buf(sizeof(WeiDataType) * wei.mDesc.GetElementSpaceSize()); + DeviceMem requant_scale_device_buf(sizeof(RequantScaleDataType) * + requant_scale.mDesc.GetElementSpaceSize()); + DeviceMem out_device_buf(sizeof(OutDataType) * out_device.mDesc.GetElementSpaceSize()); + + in_device_buf.ToDevice(in.mData.data()); + wei_device_buf.ToDevice(wei.mData.data()); + requant_scale_device_buf.ToDevice(requant_scale.mData.data()); + + std::array a_g_n_c_wis_lengths{}; + std::array a_g_n_c_wis_strides{}; + std::array b_g_k_c_xs_lengths{}; + std::array b_g_k_c_xs_strides{}; + std::array d0_g_n_k_wos_lengths{}; + std::array d0_g_n_k_wos_strides{}; + std::array e_g_n_k_wos_lengths{}; + std::array e_g_n_k_wos_strides{}; + std::array conv_filter_strides{}; + std::array conv_filter_dilations{}; + std::array input_left_pads{}; + std::array input_right_pads{}; + + auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); }; + + copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths); + copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides); + copy(wei_g_k_c_xs_desc.GetLengths(), b_g_k_c_xs_lengths); + copy(wei_g_k_c_xs_desc.GetStrides(), b_g_k_c_xs_strides); + copy(requant_scale_g_k_desc.GetLengths(), d0_g_n_k_wos_lengths); + copy(requant_scale_g_k_desc.GetStrides(), d0_g_n_k_wos_strides); + copy(out_g_n_k_wos_desc.GetLengths(), e_g_n_k_wos_lengths); + copy(out_g_n_k_wos_desc.GetStrides(), e_g_n_k_wos_strides); + copy(conv_param.conv_filter_strides_, conv_filter_strides); + copy(conv_param.conv_filter_dilations_, conv_filter_dilations); + copy(conv_param.input_left_pads_, input_left_pads); + copy(conv_param.input_right_pads_, input_right_pads); + + // do Conv + auto conv = DeviceConvNDFwdInstance{}; + auto invoker = conv.MakeInvoker(); + auto argument = conv.MakeArgument(in_device_buf.GetDeviceBuffer(), + wei_device_buf.GetDeviceBuffer(), + {requant_scale_device_buf.GetDeviceBuffer()}, + out_device_buf.GetDeviceBuffer(), + a_g_n_c_wis_lengths, + a_g_n_c_wis_strides, + b_g_k_c_xs_lengths, + b_g_k_c_xs_strides, + {d0_g_n_k_wos_lengths}, + {d0_g_n_k_wos_strides}, + e_g_n_k_wos_lengths, + e_g_n_k_wos_strides, + conv_filter_strides, + conv_filter_dilations, + input_left_pads, + input_right_pads, + in_element_op, + wei_element_op, + out_element_op); + + if(!conv.IsSupportedArgument(argument)) + { + throw std::runtime_error( + "wrong! device_conv with the specified compilation parameters does " + "not support this Conv problem"); + } + + float avg_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel}); + + std::size_t flop = conv_param.GetFlops(); + std::size_t num_btype = conv_param.GetByte(); + + float tflops = static_cast(flop) / 1.E9 / avg_time; + float gb_per_sec = num_btype / 1.E6 / avg_time; + std::cout << "Perf: " << avg_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, " + << conv.GetTypeString() << std::endl; + + bool pass = true; + + if(do_verification) + { + Tensor c_host(out_g_n_k_wos_desc); + + auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd(); + + auto ref_invoker = ref_conv.MakeInvoker(); + auto ref_argument = ref_conv.MakeArgument(in, + wei, + c_host, + conv_param.conv_filter_strides_, + conv_param.conv_filter_dilations_, + conv_param.input_left_pads_, + conv_param.input_right_pads_, + in_element_op, + wei_element_op, + PassThrough{}); + + ref_invoker.Run(ref_argument); + + // TODO: implement elementwise operation for host + out_host.ForEach([&](auto&, auto idx) { + out_element_op(out_host(idx), c_host(idx), requant_scale(idx)); + }); + + out_device_buf.FromDevice(out_device.mData.data()); + + pass &= + ck::utils::check_err(out_device, out_host, "Error: incorrect results!", 1e-5f, 1e-4f); + } + + return (pass ? 0 : 1); +} + +int run_conv2d_fwd_perchannel_quantization_example() +{ + bool do_verification = true; + bool time_kernel = true; + const ck::index_t ndim_spatial = 2; + + ck::utils::conv::ConvParam conv_param{ + ndim_spatial, // n_dim + 1, // group + 4, // batch + 64, // output channels + 192, // input chanels + {3, 3}, // weight HW + {71, 71}, // x HW + {2, 2}, // strides + {1, 1}, // dilations + {1, 1}, // left_pads + {1, 1} // right_pads + }; + + const auto in_element_op = InElementOp{}; + const auto wei_element_op = WeiElementOp{}; + const auto out_element_op = OutElementOp{ActivationOp{}}; + + using InLayout = ck::tensor_layout::convolution::GNHWC; + using WeiLayout = ck::tensor_layout::convolution::GKYXC; + using RequantScaleLayout = ck::tensor_layout::convolution::G_K; + using OutLayout = ck::tensor_layout::convolution::GNHWK; + + const auto in_g_n_c_wis_desc = + ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed(conv_param); + + const auto wei_g_k_c_xs_desc = + ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed(conv_param); + + const auto requant_scale_g_k_desc = + HostTensorDescriptor({conv_param.G_, + conv_param.N_, + conv_param.K_, + conv_param.output_spatial_lengths_[0], + conv_param.output_spatial_lengths_[1]}, + { + conv_param.K_, // g + 0, // n + 1, // k + 0, // ho + 0 // wo + }); + + const auto out_g_n_k_wos_desc = + ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed(conv_param); + + using deviceOp = DeviceGroupedConvNDFwdInstance; + + return run_grouped_conv_fwd(do_verification, + time_kernel, + conv_param, + in_g_n_c_wis_desc, + wei_g_k_c_xs_desc, + requant_scale_g_k_desc, + out_g_n_k_wos_desc, + in_element_op, + wei_element_op, + out_element_op); +} diff --git a/example/44_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perlayer_quantization_example.inc similarity index 71% rename from example/44_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp rename to example/40_conv2d_fwd_quantization/run_conv2d_fwd_perlayer_quantization_example.inc index 2d46d86655..aeccb30cf2 100644 --- a/example/44_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perlayer_quantization_example.inc @@ -1,89 +1,7 @@ // SPDX-License-Identifier: MIT // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. -#include "ck/ck.hpp" -#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" -#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" -#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" - -#include "ck/library/utility/algorithm.hpp" -#include "ck/library/utility/check_err.hpp" -#include "ck/library/utility/device_memory.hpp" -#include "ck/library/utility/host_tensor.hpp" -#include "ck/library/utility/host_tensor_generator.hpp" -#include "ck/library/utility/literals.hpp" -#include "ck/library/utility/convolution_parameter.hpp" -#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp" -#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp" - -using InDataType = int8_t; -using WeiDataType = int8_t; -using AccDataType = int32_t; -using CShuffleDataType = int32_t; -using OutDataType = int8_t; - -template -using S = ck::Sequence; - -using PassThrough = ck::tensor_operation::element_wise::PassThrough; -using InElementOp = PassThrough; -using WeiElementOp = PassThrough; -using ActivationOp = PassThrough; -using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; - -static constexpr auto ConvSpec = - ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; - -static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; - -template -using DeviceGroupedConvNDFwdInstance = - ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< - NDimSpatial, - InLayout, - WeiLayout, - ck::Tuple<>, - OutLayout, - InDataType, - WeiDataType, - AccDataType, - CShuffleDataType, - ck::Tuple<>, - OutDataType, - InElementOp, - WeiElementOp, - OutElementOp, - ConvSpec, // ConvForwardSpecialization - GemmSpec, // GemmSpecialization - 1, // - 256, // BlockSize - 128, // MPerBlock - 256, // NPerBlock - 64, // KPerBlock - 16, // AK1 - 16, // BK1 - 32, // MPerXdl - 32, // NPerXdl - 2, // MXdlPerWave - 4, // NXdlPerWave - S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1 - S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder - S<1, 0, 2>, // ABlockTransferSrcAccessOrder - 2, // ABlockTransferSrcVectorDim - 16, // ABlockTransferSrcScalarPerVector - 16, // ABlockTransferDstScalarPerVector_AK1 - 1, // ABlockLdsExtraM - S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1 - S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder - S<1, 0, 2>, // BBlockTransferSrcAccessOrder - 2, // BBlockTransferSrcVectorDim - 16, // BBlockTransferSrcScalarPerVector - 16, // BBlockTransferDstScalarPerVector_BK1 - 1, // BBlockLdsExtraN - 1, - 1, - S<1, 64, 1, 4>, - 16>; +#pragma once template (x); - activationOp_(x_fp32, x_fp32); - float y_fp32 = math::clamp(requantScale_ * x_fp32, -128.f, 127.f); - y = ck::type_convert(y_fp32); + float y_fp32 = ck::type_convert(x); + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(requantScale_ * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); } - __host__ __device__ constexpr void operator()(float& y, const int32_t& x) const + __device__ constexpr void operator()(int32_t& y, const int32_t& x) const { - // We might type_convert to int8 after lambda in someplace - float x_fp32 = ck::type_convert(x); - activationOp_(x_fp32, x_fp32); - y = math::clamp(requantScale_ * x_fp32, -128.f, 127.f); + // CAUSION - We might type_convert to int8 in threadwise copy + // eg. GridwiseGemmDlMultipleD_km_kn_mn + float y_fp32 = ck::type_convert(x); + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(requantScale_ * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); + } + + __host__ constexpr void operator()(float& y, const float& x) const + { + // CAUSION - We might float in & float out in reference code + activationOp_(y, x); + y = math::clamp(requantScale_ * y, -128.f, 127.f); } float requantScale_; @@ -51,6 +61,17 @@ struct Activation_Mul2_Clamp y = ck::type_convert(y_fp32); } + __device__ constexpr void + operator()(int32_t& y, const int32_t& x, const float& requantScale) const + { + // CAUSION - We might type_convert to int8 in threadwise copy + // eg. GridwiseGemmDlMultipleD_km_kn_mn + float y_fp32 = ck::type_convert(x); + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(requantScale * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); + } + Activation activationOp_; }; @@ -72,6 +93,17 @@ struct Add_Activation_Mul_Clamp y = ck::type_convert(y_fp32); } + __host__ __device__ constexpr void + operator()(int32_t& y, const int32_t& x, const int32_t& bias) const + { + // CAUSION - We might type_convert to int8 in threadwise copy + // eg. GridwiseGemmDlMultipleD_km_kn_mn + float y_fp32 = ck::type_convert(x + bias); + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(requantScale_ * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); + } + float requantScale_; Activation activationOp_; }; @@ -92,6 +124,17 @@ struct Add_Activation_Mul2_Clamp y = ck::type_convert(y_fp32); } + __host__ __device__ constexpr void + operator()(int32_t& y, const int32_t& x, const int32_t& bias, const float& requantScale) const + { + // CAUSION - We might type_convert to int8 in threadwise copy + // eg. GridwiseGemmDlMultipleD_km_kn_mn + float y_fp32 = ck::type_convert(x + bias); + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(requantScale * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); + } + Activation activationOp_; }; diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp index 6ade6eb226..9c68b4f5c3 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dl_multiple_d.hpp @@ -185,8 +185,10 @@ struct GridwiseGemmDlMultipleD_km_kn_mn return b_grid_desc_k0_n0_n1_k1; } + // E desc for destination in blockwise copy + template __host__ __device__ static constexpr auto - MakeCGridDescriptor_M0_M10_M11_N0_N10_N11(const CGridDesc_M_N& c_grid_desc_m_n) + MakeCGridDescriptor_M0_M10_M11_N0_N10_N11(const CGridDesc_M_N_& c_grid_desc_m_n) { const auto M = c_grid_desc_m_n.GetLength(I0); const auto N = c_grid_desc_m_n.GetLength(I1); diff --git a/include/ck/utility/inner_product.hpp b/include/ck/utility/inner_product.hpp index 0f45ec177a..b65640bfff 100644 --- a/include/ck/utility/inner_product.hpp +++ b/include/ck/utility/inner_product.hpp @@ -135,6 +135,28 @@ __device__ void inner_product(const half8_t& a, const h c); } +template <> +__device__ void inner_product(const int8_t& a, const int8_t& b, int32_t& c) +{ + c += type_convert(a) * type_convert(b); +} + +template <> +__device__ void +inner_product(const int8x2_t& a, const int8x2_t& b, int32_t& c) +{ + constexpr auto I0 = Number<0>{}; + constexpr auto I1 = Number<1>{}; + + inner_product(vector_type{a}.AsType()[I0], + vector_type{b}.AsType()[I0], + c); + + inner_product(vector_type{a}.AsType()[I1], + vector_type{b}.AsType()[I1], + c); +} + template <> __device__ void inner_product(const int8x4_t& a, const int8x4_t& b, int32_t& c) diff --git a/library/include/ck/library/tensor_operation_instance/gpu/quantization/gemm_quantization.hpp b/library/include/ck/library/tensor_operation_instance/gpu/quantization/gemm_quantization.hpp new file mode 100644 index 0000000000..2fd7ce22f7 --- /dev/null +++ b/library/include/ck/library/tensor_operation_instance/gpu/quantization/gemm_quantization.hpp @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/device_operation_instance_factory.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Col, Row, Row] +void add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instances( + std::vector>>>& + instances); + +// Layout(A, B, C) = [Col, Col, Row] +void add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instances( + std::vector>>>& + instances); + +// Layout(A, B, C) = [Row, Row, Row] +void add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instances( + std::vector>>>& + instances); + +// Layout(A, B, C) = [Row, Col, Row] +void add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instances( + std::vector>>>& + instances); + +// Layout(A, B, C) = [Col, Row, Row] +void add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instances( + std::vector>>>& + instances); + +// Layout(A, B, C) = [Col, Col, Row] +void add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instances( + std::vector>>>& + instances); + +// Layout(A, B, C) = [Row, Row, Row] +void add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instances( + std::vector>>>& + instances); + +// Layout(A, B, C) = [Row, Col, Row] +void add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instances( + std::vector>>>& + instances); + +template +struct DeviceOperationInstanceFactory>> +{ + using DeviceOp = DeviceGemmMultipleD>; + + static auto GetInstances() + { + std::vector> op_ptrs; + + if constexpr(is_same_v && is_same_v && + is_same_v) + { + if constexpr(is_same_v && is_same_v && + is_same_v) + { + if constexpr(is_same_v) + { + add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instances(op_ptrs); + add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instances(op_ptrs); + } + } + else if constexpr(is_same_v && is_same_v && + is_same_v) + { + if constexpr(is_same_v) + { + add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instances(op_ptrs); + add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instances(op_ptrs); + } + } + else if constexpr(is_same_v && is_same_v && + is_same_v) + { + if constexpr(is_same_v) + { + add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instances(op_ptrs); + add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instances(op_ptrs); + } + } + else if constexpr(is_same_v && is_same_v && + is_same_v) + { + if constexpr(is_same_v) + { + add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instances(op_ptrs); + add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instances(op_ptrs); + } + } + + return op_ptrs; + } + } +}; + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp index eda81a233c..57c971e52e 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp @@ -18,7 +18,7 @@ namespace device { namespace instance { // grouped conv2d forward, GNHWC/GKYXC/GNHWK -void add_device_conv2d_bias_perchannel_quantization_int8_instances( +void add_device_conv2d_dl_bias_perchannel_quantization_int8_instances( std::vector< std::unique_ptr>>>& instances); -void add_device_conv2d_bias_relu_perchannel_quantization_int8_instances( +void add_device_conv2d_dl_bias_relu_perchannel_quantization_int8_instances( + std::vector>>>& + instances); + +void add_device_conv2d_xdl_bias_perchannel_quantization_int8_instances( + std::vector< + std::unique_ptr>>>& + instances); + +void add_device_conv2d_xdl_bias_relu_perchannel_quantization_int8_instances( std::vector && is_same_v) { if constexpr(is_same_v) - add_device_conv2d_bias_perchannel_quantization_int8_instances(op_ptrs); + { + add_device_conv2d_dl_bias_perchannel_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_bias_perchannel_quantization_int8_instances(op_ptrs); + } else if constexpr(is_same_v) - add_device_conv2d_bias_relu_perchannel_quantization_int8_instances(op_ptrs); + { + add_device_conv2d_dl_bias_relu_perchannel_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_bias_relu_perchannel_quantization_int8_instances(op_ptrs); + } } } diff --git a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp index 1138402638..9f8ac9b7b1 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp @@ -18,7 +18,7 @@ namespace device { namespace instance { // grouped conv2d forward, GNHWC/GKYXC/GNHWK -void add_device_conv2d_bias_perlayer_quantization_int8_instances( +void add_device_conv2d_dl_bias_perlayer_quantization_int8_instances( std::vector< std::unique_ptr>>>& instances); -void add_device_conv2d_bias_relu_perlayer_quantization_int8_instances( +void add_device_conv2d_dl_bias_relu_perlayer_quantization_int8_instances( + std::vector>>>& + instances); + +void add_device_conv2d_xdl_bias_perlayer_quantization_int8_instances( + std::vector< + std::unique_ptr>>>& + instances); + +void add_device_conv2d_xdl_bias_relu_perlayer_quantization_int8_instances( std::vector && is_same_v) { if constexpr(is_same_v) - add_device_conv2d_bias_perlayer_quantization_int8_instances(op_ptrs); + { + add_device_conv2d_dl_bias_perlayer_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_bias_perlayer_quantization_int8_instances(op_ptrs); + } else if constexpr(is_same_v) - add_device_conv2d_bias_relu_perlayer_quantization_int8_instances(op_ptrs); + { + add_device_conv2d_dl_bias_relu_perlayer_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_bias_relu_perlayer_quantization_int8_instances(op_ptrs); + } } } diff --git a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perchannel_quantization.hpp b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perchannel_quantization.hpp index 1a67ce5688..089343fe6f 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perchannel_quantization.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perchannel_quantization.hpp @@ -18,7 +18,7 @@ namespace device { namespace instance { // grouped conv2d forward, GNHWC/GKYXC/GNHWK -void add_device_conv2d_perchannel_quantization_int8_instances( +void add_device_conv2d_dl_perchannel_quantization_int8_instances( std::vector>>>& instances); -void add_device_conv2d_relu_perchannel_quantization_int8_instances( +void add_device_conv2d_dl_relu_perchannel_quantization_int8_instances( + std::vector>>>& + instances); + +void add_device_conv2d_xdl_perchannel_quantization_int8_instances( + std::vector>>>& + instances); + +void add_device_conv2d_xdl_relu_perchannel_quantization_int8_instances( std::vector) { if constexpr(is_same_v) - add_device_conv2d_perchannel_quantization_int8_instances(op_ptrs); + { + add_device_conv2d_dl_perchannel_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_perchannel_quantization_int8_instances(op_ptrs); + } else if constexpr(is_same_v) - add_device_conv2d_relu_perchannel_quantization_int8_instances(op_ptrs); + { + add_device_conv2d_dl_relu_perchannel_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_relu_perchannel_quantization_int8_instances(op_ptrs); + } } } diff --git a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perlayer_quantization.hpp b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perlayer_quantization.hpp index 410be4a57b..e570027eb5 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perlayer_quantization.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perlayer_quantization.hpp @@ -18,7 +18,7 @@ namespace device { namespace instance { // grouped conv2d forward, GNHWC/GKYXC/GNHWK -void add_device_conv2d_perlayer_quantization_int8_instances( +void add_device_conv2d_dl_perlayer_quantization_int8_instances( std::vector>>>& instances); -void add_device_conv2d_relu_perlayer_quantization_int8_instances( +void add_device_conv2d_dl_relu_perlayer_quantization_int8_instances( + std::vector>>>& + instances); + +void add_device_conv2d_xdl_perlayer_quantization_int8_instances( + std::vector>>>& + instances); + +void add_device_conv2d_xdl_relu_perlayer_quantization_int8_instances( std::vector) { if constexpr(is_same_v) - add_device_conv2d_perlayer_quantization_int8_instances(op_ptrs); + { + add_device_conv2d_dl_perlayer_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_perlayer_quantization_int8_instances(op_ptrs); + } else if constexpr(is_same_v) - add_device_conv2d_relu_perlayer_quantization_int8_instances(op_ptrs); + { + add_device_conv2d_dl_relu_perlayer_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_relu_perlayer_quantization_int8_instances(op_ptrs); + } } } diff --git a/library/src/tensor_operation_instance/gpu/quantization/CMakeLists.txt b/library/src/tensor_operation_instance/gpu/quantization/CMakeLists.txt index 9f826afd68..db650dbfbd 100644 --- a/library/src/tensor_operation_instance/gpu/quantization/CMakeLists.txt +++ b/library/src/tensor_operation_instance/gpu/quantization/CMakeLists.txt @@ -1,6 +1,38 @@ -add_instance_library(device_quantization_instance - device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp - device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp - device_conv2d_xdl_perchannel_quantization_int8_instance.cpp - device_conv2d_xdl_perlayer_quantization_int8_instance.cpp +set(CONV2D_PERLAYER_QUANT_SRC + conv2d_fwd/device_conv2d_dl_perlayer_quantization_int8_instance.cpp + conv2d_fwd/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp +) + +set(CONV2D_PERCHANNEL_QUANT_SRC + conv2d_fwd/device_conv2d_dl_perchannel_quantization_int8_instance.cpp + conv2d_fwd/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp +) + +set(CONV2D_BIAS_PERLAYER_QUANT_SRC + conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp + conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp +) + +set(CONV2D_BIAS_PERCHANNEL_QUANT_SRC + conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp + conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp +) + +set(GEMM_QUANT_SRC + gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp + gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp + gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp + gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp + gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp + gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp + gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp + gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp +) + +add_instance_library(device_quantization_instance + ${CONV2D_PERLAYER_QUANT_SRC} + ${CONV2D_PERCHANNEL_QUANT_SRC} + ${CONV2D_BIAS_PERLAYER_QUANT_SRC} + ${CONV2D_BIAS_PERCHANNEL_QUANT_SRC} + ${GEMM_QUANT_SRC} ) diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp new file mode 100644 index 0000000000..7729e42638 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/convolution_forward_specialization.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using Empty_Tuple = ck::Tuple<>; +template +using S = ck::Sequence; + +using GNHWC = ck::tensor_layout::convolution::GNHWC; +using GKYXC = ck::tensor_layout::convolution::GKYXC; +using GNHWK = ck::tensor_layout::convolution::GNHWK; +using GK = ck::tensor_layout::convolution::G_K; +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Relu = ck::tensor_operation::element_wise::Relu; + +using GK_Tuple = ck::Tuple; +using GK_GK_Tuple = ck::Tuple; +using I32_Tuple = ck::Tuple; +using F32_Tuple = ck::Tuple; +using I32_F32_Tuple = ck::Tuple; + +using Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; +using Relu_Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; + +using Add_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; +using Add_Relu_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; + +using Mul2_Clamp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; +using Relu_Mul2_Clamp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; + +using Add_Mul2_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; +using Add_Relu_Mul2_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; + +static constexpr ck::index_t NDimSpatial = 2; +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; +static constexpr auto ConvFwdDefault = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; +static constexpr auto ConvFwd1x1P0 = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Filter1x1Pad0; +static constexpr auto ConvFwd1x1S1P0 = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0; + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp new file mode 100644 index 0000000000..ba2451101e --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_conv2d_dl_int8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { +void add_device_conv2d_dl_bias_perchannel_quantization_int8_instances( + std::vector>>& instances) +{ + // dl + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} + +void add_device_conv2d_dl_bias_relu_perchannel_quantization_int8_instances( + std::vector>>& instances) +{ + // dl + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp new file mode 100644 index 0000000000..ea1c953bb2 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_conv2d_dl_int8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { +void add_device_conv2d_dl_bias_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} + +void add_device_conv2d_dl_bias_relu_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_int8_instance.hpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_int8_instance.hpp new file mode 100644 index 0000000000..3c4987f159 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_int8_instance.hpp @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include "conv2d_quantization_common.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// clang-format off +template +using device_grouped_conv2d_dl_int8_instances = + std::tuple< + // ###########################################| NDim| InData| WeiData| MultpleD| OutData| AccData| InLayout| WeiLayout| MultipleD| OutLayout| In| Wei| Out| Convolution| GEMM| Block| MPer| NPer| K0Per| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer| + // ###########################################| Spatial| Type| Type| Type| Type| Type| | | Layout| | Elementwise| Elementwise| Elementwise| Forward| Spacialization| Size| Block| Block| Block| | ThreadM111| ThreadN111| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector| + // ###########################################| | | | | | | | | | | Operation| Operation| Operation| Specialization| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | | + // ###########################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK< NDimSpatial, int8_t, int8_t, DsDatatype, int8_t, int32_t, GNHWC, GKYXC, DsLayout, GNHWK, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 256, 128, 128, 16, 4, 4, 4, 1, S<8, 2>, S<8, 2>, S<8, 1, 1, 4>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 4>, S<1, 2, 0, 3>, S<1, 1, 1, 4>, S<8, 1, 1, 4>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 4>, S<1, 2, 0, 3>, S<1, 1, 1, 4>, S<0, 1, 2, 3, 4, 5>, 5, DstScalarPerVector> + >; +// clang-format on + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perchannel_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perchannel_quantization_int8_instance.cpp new file mode 100644 index 0000000000..d45c1c1ee7 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perchannel_quantization_int8_instance.cpp @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_conv2d_dl_int8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { +void add_device_conv2d_dl_perchannel_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} + +void add_device_conv2d_dl_relu_perchannel_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perlayer_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perlayer_quantization_int8_instance.cpp new file mode 100644 index 0000000000..f4b9479502 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perlayer_quantization_int8_instance.cpp @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_conv2d_dl_int8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { +void add_device_conv2d_dl_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} + +void add_device_conv2d_dl_relu_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp similarity index 67% rename from library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp rename to library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp index e87e987593..25e2cda9cb 100644 --- a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp @@ -7,7 +7,7 @@ namespace ck { namespace tensor_operation { namespace device { namespace instance { -void add_device_conv2d_bias_perchannel_quantization_int8_instances( +void add_device_conv2d_xdl_bias_perchannel_quantization_int8_instances( std::vector>>& instances) { add_device_operation_instances(instances, - device_conv2d_int8_32Ds_instances{}); + device_grouped_conv2d_xdl_int8_instances{}); add_device_operation_instances(instances, - device_conv2d_int8_32Ds_instances{}); + device_grouped_conv2d_xdl_int8_instances{}); add_device_operation_instances(instances, - device_conv2d_int8_32Ds_instances{}); + device_grouped_conv2d_xdl_int8_instances{}); } -void add_device_conv2d_bias_relu_perchannel_quantization_int8_instances( +void add_device_conv2d_xdl_bias_relu_perchannel_quantization_int8_instances( std::vector>>& instances) { add_device_operation_instances(instances, - device_conv2d_int8_32Ds_instances{}); + device_grouped_conv2d_xdl_int8_instances{}); add_device_operation_instances(instances, - device_conv2d_int8_32Ds_instances{}); + device_grouped_conv2d_xdl_int8_instances{}); add_device_operation_instances(instances, - device_conv2d_int8_32Ds_instances{}); + device_grouped_conv2d_xdl_int8_instances{}); } } // namespace instance } // namespace device diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp new file mode 100644 index 0000000000..d598d3d38e --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_conv2d_xdl_int8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { +void add_device_conv2d_xdl_bias_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); +} + +void add_device_conv2d_xdl_bias_relu_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); +} +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_int8_instance.hpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_int8_instance.hpp new file mode 100644 index 0000000000..262ec06b7f --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_int8_instance.hpp @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include "conv2d_quantization_common.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// clang-format off +template +using device_grouped_conv2d_xdl_int8_instances = + std::tuple < + //########################################| NumDim| A| B| Ds| E| AData| BData| AccData| CShuffle| Ds| EData| A| B| CDE| ConvForward| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //########################################| Spatial| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //########################################| | | | | | | | | | | | Operation| Operation| Operation| | | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //########################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 2>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 2>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, DstScalarPerVector>, + DeviceGroupedConvFwdMultipleD_Xdl_CShuffle, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, DstScalarPerVector> + >; +// clang-format on + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp new file mode 100644 index 0000000000..b0f86b5c2e --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_conv2d_xdl_int8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { +void add_device_conv2d_xdl_perchannel_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); +} + +void add_device_conv2d_xdl_relu_perchannel_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); +} +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp new file mode 100644 index 0000000000..4812ceecfd --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_conv2d_xdl_int8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { +void add_device_conv2d_xdl_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); +} + +void add_device_conv2d_xdl_relu_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); +} +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp deleted file mode 100644 index 06eed76014..0000000000 --- a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. - -#include "device_conv2d_xdl_int8_instance.hpp" - -namespace ck { -namespace tensor_operation { -namespace device { -namespace instance { -void add_device_conv2d_bias_perlayer_quantization_int8_instances( - std::vector>>& instances) -{ - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); -} - -void add_device_conv2d_bias_relu_perlayer_quantization_int8_instances( - std::vector>>& instances) -{ - add_device_operation_instances(instances, - device_conv2d_int8_32Ds_instances{}); - - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); - - add_device_operation_instances(instances, - device_conv2d_int8_32Ds_instances{}); -} -} // namespace instance -} // namespace device -} // namespace tensor_operation -} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_int8_instance.hpp b/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_int8_instance.hpp deleted file mode 100644 index 6904e269f9..0000000000 --- a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_int8_instance.hpp +++ /dev/null @@ -1,111 +0,0 @@ -// SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. - -#include "ck/ck.hpp" -#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" -#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp" -#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" -#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" - -namespace ck { -namespace tensor_operation { -namespace device { -namespace instance { - -using Empty_Tuple = ck::Tuple<>; -template -using S = ck::Sequence; - -using GNHWC = ck::tensor_layout::convolution::GNHWC; -using GKYXC = ck::tensor_layout::convolution::GKYXC; -using GNHWK = ck::tensor_layout::convolution::GNHWK; -using GK = ck::tensor_layout::convolution::G_K; -using PassThrough = ck::tensor_operation::element_wise::PassThrough; -using Relu = ck::tensor_operation::element_wise::Relu; - -using GK_Tuple = ck::Tuple; -using GK_GK_Tuple = ck::Tuple; -using I32_Tuple = ck::Tuple; -using F32_Tuple = ck::Tuple; -using I32_F32_Tuple = ck::Tuple; - -using Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; -using Relu_Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; - -using Add_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; -using Add_Relu_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; - -using Mul2_Clamp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; -using Relu_Mul2_Clamp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; - -using Add_Mul2_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; -using Add_Relu_Mul2_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; - -static constexpr ck::index_t NDimSpatial = 2; -static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; -static constexpr auto ConvFwdDefault = - ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; -static constexpr auto ConvFwd1x1P0 = - ck::tensor_operation::device::ConvolutionForwardSpecialization::Filter1x1Pad0; -static constexpr auto ConvFwd1x1S1P0 = - ck::tensor_operation::device::ConvolutionForwardSpecialization::Filter1x1Stride1Pad0; - -template -// clang-format off -using device_conv2d_int8_instances = - std::tuple < - //########################################| NumDim| A| B| Ds| E| AData| BData| AccData| CShuffle| Ds| EData| A| B| CDE| ConvForward| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| - //########################################| Spatial| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| - //########################################| | | | | | | | | | | | Operation| Operation| Operation| | | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| - //########################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 128, 256, 64, 16, 16, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 128, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 128, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 128, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 2>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 64, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 64, 64, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 128, 64, 64, 16, 16, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 64, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 128, 32, 64, 16, 16, 32, 32, 2, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 2>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 32, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 64, 64, 32, 64, 16, 16, 32, 32, 2, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, 16>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 64, 32, 64, 64, 16, 16, 32, 32, 1, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, 16> - >; -// clang-format on - -// for conv + multiple of 32 bit Ds. bit of Ds will affect the ScalarPerVector of C -template -// clang-format off -using device_conv2d_int8_32Ds_instances = - std::tuple < - //########################################| NumDim| A| B| Ds| E| AData| BData| AccData| CShuffle| Ds| EData| A| B| CDE| ConvForward| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| - //########################################| Spatial| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| - //########################################| | | | | | | | | | | | Operation| Operation| Operation| | | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| - //########################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 128, 256, 64, 16, 16, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 128, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 128, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 128, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 2>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 64, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 64, 64, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 128, 64, 64, 16, 16, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 256, 64, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 128, 32, 64, 16, 16, 32, 32, 2, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 2>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 128, 32, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 64, 64, 32, 64, 16, 16, 32, 32, 2, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, 8>, - DeviceGroupedConvFwdMultipleD_Xdl_CShuffle< 2, GNHWC, GKYXC, DsLayout, GNHWK, int8_t, int8_t, int32_t, int32_t, DsDatatype, int8_t, PassThrough, PassThrough, OutElementOp, ConvSpec, GemmSpec, 1, 64, 32, 64, 64, 16, 16, 32, 32, 1, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 2>, 8> - >; -// clang-format on - -} // namespace instance -} // namespace device -} // namespace tensor_operation -} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp deleted file mode 100644 index 5f1aa0c5c7..0000000000 --- a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. - -#include "device_conv2d_xdl_int8_instance.hpp" - -namespace ck { -namespace tensor_operation { -namespace device { -namespace instance { -void add_device_conv2d_perchannel_quantization_int8_instances( - std::vector>>& instances) -{ - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); -} - -void add_device_conv2d_relu_perchannel_quantization_int8_instances( - std::vector>>& instances) -{ - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_32Ds_instances{}); -} -} // namespace instance -} // namespace device -} // namespace tensor_operation -} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp deleted file mode 100644 index 83435d8119..0000000000 --- a/library/src/tensor_operation_instance/gpu/quantization/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. - -#include "device_conv2d_xdl_int8_instance.hpp" - -namespace ck { -namespace tensor_operation { -namespace device { -namespace instance { -void add_device_conv2d_perlayer_quantization_int8_instances( - std::vector>>& instances) -{ - add_device_operation_instances( - instances, - device_conv2d_int8_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_instances{}); -} - -void add_device_conv2d_relu_perlayer_quantization_int8_instances( - std::vector>>& instances) -{ - add_device_operation_instances( - instances, - device_conv2d_int8_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_instances{}); - add_device_operation_instances( - instances, - device_conv2d_int8_instances{}); -} -} // namespace instance -} // namespace device -} // namespace tensor_operation -} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_instance.hpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_instance.hpp new file mode 100644 index 0000000000..9cad8d4c8e --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_instance.hpp @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "gemm_quantization_common.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_dl.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +template +using device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instances = std::tuple< + // clang-format off + //####################| A| B| Ds| E| AData| BData| AccData| DsData| EData| A| B| CDE| GEMM| Block| MPer| NPer| KPer| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer| + //####################| Layout| Layout| Layout| Layout| Type| Type| Type| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Size| Block| Block| Block| | ThreadM| Thread| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector| + //####################| | | | | | | | | | Operation| Operation| Operation| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | | + //####################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGemmMultipleD_Dl< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 256, 128, 128, 16, 4, 4, 4, 1, S<8, 2>, S<8, 2>, S<2, 1, 4, 4>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 4>, S<2, 1, 4, 4>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 4>, S<0, 1, 2, 3, 4, 5>, 5, 4> + // clang-format on + >; + +template +using device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instances = std::tuple< + // clang-format off + //####################| A| B| Ds| E| AData| BData| AccData| DsData| EData| A| B| CDE| GEMM| Block| MPer| NPer| KPer| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer| + //####################| Layout| Layout| Layout| Layout| Type| Type| Type| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Size| Block| Block| Block| | ThreadM| Thread| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector| + //####################| | | | | | | | | | Operation| Operation| Operation| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | | + //####################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGemmMultipleD_Dl< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 256, 128, 128, 16, 4, 4, 4, 1, S<8, 2>, S<8, 2>, S<2, 1, 4, 4>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 4>, S<8, 1, 1, 4>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 4>, S<1, 2, 0, 3>, S<1, 1, 1, 4>, S<0, 1, 2, 3, 4, 5>, 5, 4> + // clang-format on + >; + +template +using device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instances = std::tuple< + // clang-format off + //####################| A| B| Ds| E| AData| BData| AccData| DsData| EData| A| B| CDE| GEMM| Block| MPer| NPer| KPer| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer| + //####################| Layout| Layout| Layout| Layout| Type| Type| Type| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Size| Block| Block| Block| | ThreadM| Thread| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector| + //####################| | | | | | | | | | Operation| Operation| Operation| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | | + //####################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGemmMultipleD_Dl< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 256, 128, 128, 16, 4, 4, 4, 1, S<8, 2>, S<8, 2>, S<8, 1, 1, 4>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 4>, S<1, 2, 0, 3>, S<1, 1, 1, 4>, S<2, 1, 4, 4>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 4>, S<0, 1, 2, 3, 4, 5>, 5, 4> + // clang-format on + >; + +template +using device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instances = std::tuple< + // clang-format off + //####################| A| B| Ds| E| AData| BData| AccData| DsData| EData| A| B| CDE| GEMM| Block| MPer| NPer| KPer| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer| + //####################| Layout| Layout| Layout| Layout| Type| Type| Type| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Size| Block| Block| Block| | ThreadM| Thread| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector| + //####################| | | | | | | | | | Operation| Operation| Operation| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | | + //####################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGemmMultipleD_Dl< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 256, 128, 128, 16, 4, 4, 4, 1, S<8, 2>, S<8, 2>, S<8, 1, 1, 4>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 4>, S<1, 2, 0, 3>, S<1, 1, 1, 4>, S<8, 1, 1, 4>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 4>, S<1, 2, 0, 3>, S<1, 1, 1, 4>, S<0, 1, 2, 3, 4, 5>, 5, 4> + // clang-format on + >; + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp new file mode 100644 index 0000000000..ffe1efb80b --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_gemm_quantization_dl_c_shuffle_i8_i8_i8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Col, Row, Row] +void add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_kn_mn_instances{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp new file mode 100644 index 0000000000..7f24e5677c --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_gemm_quantization_dl_c_shuffle_i8_i8_i8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Col, Col, Row] +void add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_gemm_quantization_dl_c_shuffle_i8_i8_i8_km_nk_mn_instances{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp new file mode 100644 index 0000000000..06e66cfe03 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_gemm_quantization_dl_c_shuffle_i8_i8_i8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Row, Row, Row] +void add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_kn_mn_instances{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp new file mode 100644 index 0000000000..16635d1e93 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_gemm_quantization_dl_c_shuffle_i8_i8_i8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Row, Col, Row] +void add_device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_gemm_quantization_dl_c_shuffle_i8_i8_i8_mk_nk_mn_instances{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_instance.hpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_instance.hpp new file mode 100644 index 0000000000..dfb8dc29b4 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_instance.hpp @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "gemm_quantization_common.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +template +using device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instances = std::tuple< + // clang-format off + //##############################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| LoopScheduler| Pipeline| + //##############################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| | | + //##############################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| | | + //##############################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 256, 128, 64, 4, 4, 32, 32, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 256, 64, 4, 4, 32, 32, 2, 4, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 256, 64, 16, 16, 32, 32, 2, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 128, 64, 4, 4, 32, 32, 4, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 128, 64, 4, 4, 32, 32, 2, 2, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 64, 64, 4, 4, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, 1, 1, S<1, 64, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 64, 128, 64, 4, 4, 32, 32, 2, 2, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 64, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 64, 64, 4, 4, 32, 32, 2, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 64, 64, 16, 16, 32, 32, 2, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 64, 128, 64, 4, 4, 32, 32, 1, 2, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 64, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline> + // clang-format on + >; + +template +using device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instances = std::tuple< + // clang-format off + //##############################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| LoopScheduler| Pipeline| + //##############################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| | | + //##############################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| | | + //##############################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 256, 128, 64, 4, 16, 32, 32, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 256, 64, 4, 16, 32, 32, 2, 4, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 256, 64, 16, 16, 32, 32, 2, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 128, 64, 4, 16, 32, 32, 4, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 128, 64, 4, 16, 32, 32, 2, 2, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 64, 64, 4, 16, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 64, 128, 64, 4, 16, 32, 32, 2, 2, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 64, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 64, 64, 4, 16, 32, 32, 2, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 64, 64, 16, 16, 32, 32, 2, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 64, 128, 64, 4, 16, 32, 32, 1, 2, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Col, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 64, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline> + // clang-format on + >; + +template +using device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instances = std::tuple< + // clang-format off + //##############################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| LoopScheduler| Pipeline| + //##############################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| | | + //##############################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| | | + //##############################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 256, 128, 64, 16, 4, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 256, 64, 16, 4, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 256, 64, 16, 16, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 128, 64, 16, 4, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 128, 64, 16, 4, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 64, 64, 16, 4, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, 1, 1, S<1, 64, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 64, 128, 64, 16, 4, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 64, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 16, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 64, 64, 16, 4, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 64, 64, 16, 16, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 64, 128, 64, 16, 4, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 4, 0, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Row, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 64, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 2, 16, 1, 1, 1, S<1, 64, 1, 4>, 16, GemmLoopScheduler, GemmPipeline> + // clang-format on + >; + +template +using device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instances = std::tuple< + // clang-format off + //##############################| A| B| Ds| E| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| LoopScheduler| Pipeline| + //##############################| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| | | + //##############################| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| | | + //##############################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 256, 64, 16, 16, 32, 32, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 64, 128, 64, 16, 16, 32, 32, 2, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 64, 64, 64, 64, 16, 16, 32, 32, 2, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 128, 64, 64, 16, 16, 32, 32, 2, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 256, 64, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 128, 32, 64, 16, 16, 32, 32, 2, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 32, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 128, 32, 128, 64, 16, 16, 32, 32, 1, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 4>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 64, 64, 32, 64, 16, 16, 32, 32, 2, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 2>, 16, GemmLoopScheduler, GemmPipeline>, + DeviceGemmMultipleD_Xdl_CShuffle< Row, Col, Empty_Tuple, Row, int8_t, int8_t, int32_t, int32_t, Empty_Tuple, int8_t, PassThrough, PassThrough, OutElementOp, MNKPadding, 1, 64, 32, 64, 64, 16, 16, 32, 32, 1, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 16, 1, 2>, 16, GemmLoopScheduler, GemmPipeline> + // clang-format on + >; + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp new file mode 100644 index 0000000000..c153cdf9ea --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instance.cpp @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Col, Row, Row] +void add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instances{}); +#if CK_EXPERIMENTAL_INTER_WAVE_INSTANCES + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instances{}); +#endif +#if CK_EXPERIMENTAL_PIPELINE_V2_INSTANCES + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_kn_mn_instances{}); +#endif +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp new file mode 100644 index 0000000000..f6cd32026f --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instance.cpp @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Col, Col, Row] +void add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instances{}); +#if CK_EXPERIMENTAL_INTER_WAVE_INSTANCES + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instances{}); +#endif +#if CK_EXPERIMENTAL_PIPELINE_V2_INSTANCES + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_km_nk_mn_instances{}); +#endif +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp new file mode 100644 index 0000000000..45fbacc334 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instance.cpp @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Row, Row, Row] +void add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instances{}); +#if CK_EXPERIMENTAL_INTER_WAVE_INSTANCES + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instances{}); +#endif +#if CK_EXPERIMENTAL_PIPELINE_V2_INSTANCES + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_kn_mn_instances{}); +#endif +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp new file mode 100644 index 0000000000..257633fe19 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instance.cpp @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +// Layout(A, B, C) = [Row, Col, Row] +void add_device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instances( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instances{}); +#if CK_EXPERIMENTAL_INTER_WAVE_INSTANCES + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instances{}); +#endif +#if CK_EXPERIMENTAL_PIPELINE_V2_INSTANCES + add_device_operation_instances( + instances, + device_gemm_quantization_xdl_c_shuffle_i8_i8_i8_mk_nk_mn_instances{}); +#endif +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/quantization/gemm/gemm_quantization_common.hpp b/library/src/tensor_operation_instance/gpu/quantization/gemm/gemm_quantization_common.hpp new file mode 100644 index 0000000000..213f42b91b --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/quantization/gemm/gemm_quantization_common.hpp @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#pragma once + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +template +using S = ck::Sequence; + +using Row = ck::tensor_layout::gemm::RowMajor; +using Col = ck::tensor_layout::gemm::ColumnMajor; + +using Empty_Tuple = ck::Tuple<>; +using Row_Row_Tuple = ck::Tuple; +using Col_Col_Tuple = ck::Tuple; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Relu = ck::tensor_operation::element_wise::Relu; + +using Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; +using Relu_Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; + +using Add_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; +using Add_Relu_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; + +static constexpr auto MNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck From fa998675fc6def2bc4aa5b9ccdfeefcef476a2b1 Mon Sep 17 00:00:00 2001 From: Rostyslav Geyyer <46627076+geyyer@users.noreply.github.com> Date: Wed, 15 Mar 2023 18:38:11 -0500 Subject: [PATCH 18/31] Update cmake-ck-dev.sh script (#641) Co-authored-by: Rosty Geyyer --- script/cmake-ck-dev.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/cmake-ck-dev.sh b/script/cmake-ck-dev.sh index 2e605ce8de..375ff49310 100755 --- a/script/cmake-ck-dev.sh +++ b/script/cmake-ck-dev.sh @@ -8,7 +8,8 @@ MY_PROJECT_SOURCE=$1 cmake \ -D CMAKE_PREFIX_PATH=/opt/rocm \ -D CMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc \ --D CMAKE_CXX_FLAGS="-O3 -ftemplate-backtrace-limit=0 -gline-tables-only -save-temps=$PWD" \ +-D CMAKE_CXX_FLAGS="-std=c++17 -O3 -ftemplate-backtrace-limit=0 -fPIE -mcumode \ +-mno-wavefrontsize64 -Wno-gnu-line-marker -save-temps=$PWD" \ -D CMAKE_BUILD_TYPE=Release \ -D BUILD_DEV=ON \ -D GPU_TARGETS="gfx908;gfx90a" \ From 6ae12434d23a7b49eb380d27c262e3f22160f4be Mon Sep 17 00:00:00 2001 From: ltqin Date: Tue, 21 Mar 2023 00:49:31 +0800 Subject: [PATCH 19/31] workaround 637 (#640) * add workaround 637 * format * change id --------- Co-authored-by: zjing14 --- include/ck/ck.hpp | 2 ++ ...te_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp | 2 ++ ...te_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp | 2 ++ 3 files changed, 6 insertions(+) diff --git a/include/ck/ck.hpp b/include/ck/ck.hpp index e7950af7ad..5009dec5e9 100644 --- a/include/ck/ck.hpp +++ b/include/ck/ck.hpp @@ -163,6 +163,8 @@ // workaround: compiler not emiting reciprocal instruction frm __frcp_rn() #define CK_WORKAROUND_SWDEV_383542 1 +// workaround: compiler issue on gfx908 +#define CK_WORKAROUND_SWDEV_388832 1 // flag to enable (1) or disable (0) the debugging output in some kernels #define DEBUG_LOG 0 diff --git a/library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_bias_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp b/library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_bias_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp index 00b37d52b4..fd09471958 100644 --- a/library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_bias_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp +++ b/library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_bias_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp @@ -47,7 +47,9 @@ using device_batched_gemm_bias_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16 // #############################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Wave| Wave| Wave| | | | | | | | | | | | | | | | | | | | | | | | | | | DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple, ck::Tuple<>, F32, F16, PassThrough, PassThrough, ScaleAdd, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 256, 128, 32, 64, 32, 8, 8, 2, 32, 32, 2, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple, ck::Tuple<>, F32, F16, PassThrough, PassThrough, ScaleAdd, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 256, 128, 32, 128, 32, 8, 8, 2, 32, 32, 2, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S< 8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, + #if CK_WORKAROUND_SWDEV_388832 DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple, ck::Tuple<>, F32, F16, PassThrough, PassThrough, ScaleAdd, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 128, 256, 32, 64, 32, 8, 8, 2, 32, 32, 1, 8, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, + #endif DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple, ck::Tuple<>, F32, F16, PassThrough, PassThrough, ScaleAdd, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 128, 256, 32, 128, 32, 8, 8, 2, 32, 32, 1, 8, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S< 8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple, ck::Tuple<>, F32, F16, PassThrough, PassThrough, ScaleAdd, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 128, 128, 64, 64, 32, 8, 8, 2, 32, 32, 1, 4, 2, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, false, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, false, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple, ck::Tuple<>, F32, F16, PassThrough, PassThrough, ScaleAdd, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 128, 128, 32, 64, 32, 8, 8, 2, 32, 32, 1, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, diff --git a/library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp b/library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp index 21da6895e6..291a127a66 100644 --- a/library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp +++ b/library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_gnk_gno_gmo_instance.cpp @@ -47,7 +47,9 @@ using device_batched_gemm_softmax_gemm_permute_xdl_cshuffle_f16_f16_f16_f16_gmk_ // #############################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Wave| Wave| Wave| | | | | | | | | | | | | | | | | | | | | | | | | | | DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple<>, ck::Tuple<>, F32, F16, PassThrough, PassThrough, Scale, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 256, 128, 32, 64, 32, 8, 8, 2, 32, 32, 2, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple<>, ck::Tuple<>, F32, F16, PassThrough, PassThrough, Scale, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 256, 128, 32, 128, 32, 8, 8, 2, 32, 32, 2, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S< 8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, + #if CK_WORKAROUND_SWDEV_388832 DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple<>, ck::Tuple<>, F32, F16, PassThrough, PassThrough, Scale, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 128, 256, 32, 64, 32, 8, 8, 2, 32, 32, 1, 8, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, + #endif DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple<>, ck::Tuple<>, F32, F16, PassThrough, PassThrough, Scale, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 128, 256, 32, 128, 32, 8, 8, 2, 32, 32, 1, 8, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S< 8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple<>, ck::Tuple<>, F32, F16, PassThrough, PassThrough, Scale, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 128, 128, 64, 64, 32, 8, 8, 2, 32, 32, 1, 4, 2, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, false, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, false, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle< NumDimG, NumDimM, NumDimN, NumDimK, NumDimO, F16, F16, F16, F16, ck::Tuple<>, ck::Tuple<>, F32, F16, PassThrough, PassThrough, Scale, PassThrough, PassThrough, GemmDefault, TensorDefault, TensorDefault, TensorDefault, TensorDefault, 1, 256, 128, 128, 32, 64, 32, 8, 8, 2, 32, 32, 1, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 4, 2, false, 1, 2, S<1, 32, 1, 8>, 8, MaskingSpec>, From 8a659a2e4c52936986884632f13cb58091a54cbf Mon Sep 17 00:00:00 2001 From: Dan Yao Date: Tue, 21 Mar 2023 03:30:24 +0800 Subject: [PATCH 20/31] rtn in ternary way (#632) * rtn in ternary way * Check both flags to preserve NaN * Format * Rearrange flag1 * Apply suggestions from code review Co-authored-by: Ronan Keryell --------- Co-authored-by: Rosty Geyyer Co-authored-by: Rostyslav Geyyer <46627076+geyyer@users.noreply.github.com> Co-authored-by: Ronan Keryell --- include/ck/utility/data_type.hpp | 62 ++++++++++++++++---------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/include/ck/utility/data_type.hpp b/include/ck/utility/data_type.hpp index 40ee8b617e..5190036144 100644 --- a/include/ck/utility/data_type.hpp +++ b/include/ck/utility/data_type.hpp @@ -974,38 +974,36 @@ inline __host__ __device__ constexpr bhalf_t type_convert(float uint32_t int32; } u = {x}; - if(~u.int32 & 0x7f800000) - { - // When the exponent bits are not all 1s, then the value is zero, normal, - // or subnormal. We round the bfloat16 mantissa up by adding 0x7FFF, plus - // 1 if the least significant bit of the bfloat16 mantissa is 1 (odd). - // This causes the bfloat16's mantissa to be incremented by 1 if the 16 - // least significant bits of the float mantissa are greater than 0x8000, - // or if they are equal to 0x8000 and the least significant bit of the - // bfloat16 mantissa is 1 (odd). This causes it to be rounded to even when - // the lower 16 bits are exactly 0x8000. If the bfloat16 mantissa already - // has the value 0x7f, then incrementing it causes it to become 0x00 and - // the exponent is incremented by one, which is the next higher FP value - // to the unrounded bfloat16 value. When the bfloat16 value is subnormal - // with an exponent of 0x00 and a mantissa of 0x7F, it may be rounded up - // to a normal value with an exponent of 0x01 and a mantissa of 0x00. - // When the bfloat16 value has an exponent of 0xFE and a mantissa of 0x7F, - // incrementing it causes it to become an exponent of 0xFF and a mantissa - // of 0x00, which is Inf, the next higher value to the unrounded value. - u.int32 += 0x7fff + ((u.int32 >> 16) & 1); // Round to nearest, round to even - } - else if(u.int32 & 0xffff) - { - // When all of the exponent bits are 1, the value is Inf or NaN. - // Inf is indicated by a zero mantissa. NaN is indicated by any nonzero - // mantissa bit. Quiet NaN is indicated by the most significant mantissa - // bit being 1. Signaling NaN is indicated by the most significant - // mantissa bit being 0 but some other bit(s) being 1. If any of the - // lower 16 bits of the mantissa are 1, we set the least significant bit - // of the bfloat16 mantissa, in order to preserve signaling NaN in case - // the bloat16's mantissa bits are all 0. - u.int32 |= 0x10000; // Preserve signaling NaN - } + // When the exponent bits are not all 1s, then the value is zero, normal, + // or subnormal. We round the bfloat16 mantissa up by adding 0x7FFF, plus + // 1 if the least significant bit of the bfloat16 mantissa is 1 (odd). + // This causes the bfloat16's mantissa to be incremented by 1 if the 16 + // least significant bits of the float mantissa are greater than 0x8000, + // or if they are equal to 0x8000 and the least significant bit of the + // bfloat16 mantissa is 1 (odd). This causes it to be rounded to even when + // the lower 16 bits are exactly 0x8000. If the bfloat16 mantissa already + // has the value 0x7f, then incrementing it causes it to become 0x00 and + // the exponent is incremented by one, which is the next higher FP value + // to the unrounded bfloat16 value. When the bfloat16 value is subnormal + // with an exponent of 0x00 and a mantissa of 0x7f, it may be rounded up + // to a normal value with an exponent of 0x01 and a mantissa of 0x00. + // When the bfloat16 value has an exponent of 0xFE and a mantissa of 0x7F, + // incrementing it causes it to become an exponent of 0xFF and a mantissa + // of 0x00, which is Inf, the next higher value to the unrounded value. + bool flag0 = ~u.int32 & 0x7f800000; + + // When all of the exponent bits are 1, the value is Inf or NaN. + // Inf is indicated by a zero mantissa. NaN is indicated by any nonzero + // mantissa bit. Quiet NaN is indicated by the most significant mantissa + // bit being 1. Signaling NaN is indicated by the most significant + // mantissa bit being 0 but some other bit(s) being 1. If any of the + // lower 16 bits of the mantissa are 1, we set the least significant bit + // of the bfloat16 mantissa, in order to preserve signaling NaN in case + // the bfloat16's mantissa bits are all 0. + bool flag1 = !flag0 && (u.int32 & 0xffff); + + u.int32 += flag0 ? 0x7fff + ((u.int32 >> 16) & 1) : 0; // Round to nearest, round to even + u.int32 |= flag1 ? 0x10000 : 0x0; // Preserve signaling NaN return uint16_t(u.int32 >> 16); } From 36750a576371758d6777e8788b0a71fb4d47549e Mon Sep 17 00:00:00 2001 From: Illia Silin <98187287+illsilin@users.noreply.github.com> Date: Wed, 22 Mar 2023 08:05:48 -0700 Subject: [PATCH 21/31] Get rid of XDL parameters in WMMA kernel string. (#646) * remove XDL parameters from WMMA kernel string * get rid f two more parameters --- ...vice_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp index a1ee1902b5..9d4b68c0b6 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_wmma_cshuffle.hpp @@ -840,17 +840,8 @@ struct DeviceGroupedConvFwdMultipleD_Wmma_CShuffle << KPerBlock << ", " << getConvForwardSpecializationString(ConvForwardSpecialization) << ", " << K1 << ", " - << MPerXDL << ", " - << NPerXDL << ", " - << MXdlPerWave << ", " - << NXdlPerWave << ", " << ABlockTransferSrcScalarPerVector << ", " - << ABlockTransferDstScalarPerVector_K1 << ", " - << BBlockTransferSrcScalarPerVector << ", " - << BBlockTransferDstScalarPerVector_K1 << ", " - << CShuffleMXdlPerWavePerShuffle << ", " - << CShuffleNXdlPerWavePerShuffle << ", " - << CBlockTransferScalarPerVector_NWaveNPerXdl + << BBlockTransferSrcScalarPerVector << ">"; // clang-format on From fe96e8fbf2503e14574e429dde61546051f3e4dc Mon Sep 17 00:00:00 2001 From: Po Yen Chen Date: Thu, 23 Mar 2023 01:49:11 +0800 Subject: [PATCH 22/31] Reduce group & batch of the tested convolutions (#648) --- .../grouped_convnd_bwd_weight.cpp | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/grouped_convnd_bwd_weight/grouped_convnd_bwd_weight.cpp b/test/grouped_convnd_bwd_weight/grouped_convnd_bwd_weight.cpp index 6545b6e566..75f934cc06 100644 --- a/test/grouped_convnd_bwd_weight/grouped_convnd_bwd_weight.cpp +++ b/test/grouped_convnd_bwd_weight/grouped_convnd_bwd_weight.cpp @@ -43,7 +43,7 @@ class TestGroupedConvndBwdWeight : public ::testing::Test DataType, DataType, DataType>(true, // do_verification - 1, // init_method integer value + 1, // init_method: integer value false, // do_log false, // time_kernel param, @@ -60,9 +60,9 @@ TYPED_TEST_SUITE(TestGroupedConvndBwdWeight, KernelTypes); TYPED_TEST(TestGroupedConvndBwdWeight, Test1D) { this->conv_params.clear(); - this->conv_params.push_back({1, 4, 128, 128, 256, {1}, {14}, {2}, {1}, {0}, {0}}); - this->conv_params.push_back({1, 4, 64, 128, 256, {3}, {28}, {1}, {1}, {1}, {1}}); - this->conv_params.push_back({1, 4, 128, 128, 256, {1}, {3}, {1}, {1}, {0}, {0}}); + this->conv_params.push_back({1, 2, 128, 128, 256, {1}, {14}, {2}, {1}, {0}, {0}}); + this->conv_params.push_back({1, 2, 32, 128, 256, {3}, {28}, {1}, {1}, {1}, {1}}); + this->conv_params.push_back({1, 2, 128, 128, 256, {1}, {3}, {1}, {1}, {0}, {0}}); this->template Run<1>(); } @@ -70,11 +70,11 @@ TYPED_TEST(TestGroupedConvndBwdWeight, Test2D) { this->conv_params.clear(); this->conv_params.push_back( - {2, 4, 128, 128, 256, {1, 1}, {7, 7}, {2, 2}, {1, 1}, {0, 0}, {0, 0}}); + {2, 2, 64, 128, 256, {1, 1}, {7, 7}, {2, 2}, {1, 1}, {0, 0}, {0, 0}}); this->conv_params.push_back( - {2, 4, 8, 128, 256, {3, 3}, {14, 14}, {1, 1}, {1, 1}, {1, 1}, {1, 1}}); + {2, 2, 4, 128, 256, {3, 3}, {14, 14}, {1, 1}, {1, 1}, {1, 1}, {1, 1}}); this->conv_params.push_back( - {2, 4, 128, 128, 256, {1, 1}, {3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}}); + {2, 2, 128, 128, 256, {1, 1}, {3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}}); this->template Run<2>(); } @@ -82,10 +82,10 @@ TYPED_TEST(TestGroupedConvndBwdWeight, Test3D) { this->conv_params.clear(); this->conv_params.push_back( - {3, 4, 128, 128, 256, {1, 1, 1}, {7, 7, 7}, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}}); + {3, 2, 16, 128, 256, {1, 1, 1}, {7, 7, 7}, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}}); this->conv_params.push_back( - {3, 4, 8, 128, 256, {3, 3, 3}, {14, 14, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}); + {3, 2, 2, 128, 256, {3, 3, 3}, {14, 14, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}}); this->conv_params.push_back( - {3, 4, 128, 128, 256, {1, 1, 1}, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}}); + {3, 2, 32, 128, 256, {1, 1, 1}, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}}); this->template Run<3>(); } From e5376be4acc6fb8554c5ff5430b8f2750bc939c9 Mon Sep 17 00:00:00 2001 From: Haocong WANG Date: Fri, 24 Mar 2023 00:22:10 +0800 Subject: [PATCH 23/31] [Navi3x] Fix Gridwise_multiple_d operation (#649) * Add CMake Option "USE_OPT_NAVI3X" * fix bug --- ...n_grouped_conv_fwd_bias_relu_add_wmma_example.inc | 4 ++-- .../grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp | 12 ++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/example/30_grouped_conv_fwd_multiple_d/run_grouped_conv_fwd_bias_relu_add_wmma_example.inc b/example/30_grouped_conv_fwd_multiple_d/run_grouped_conv_fwd_bias_relu_add_wmma_example.inc index 8161b1088a..a6888649c0 100644 --- a/example/30_grouped_conv_fwd_multiple_d/run_grouped_conv_fwd_bias_relu_add_wmma_example.inc +++ b/example/30_grouped_conv_fwd_multiple_d/run_grouped_conv_fwd_bias_relu_add_wmma_example.inc @@ -74,8 +74,8 @@ using DeviceConvFwdInstance = 8, // BBlockTransferSrcScalarPerVector 8, // BBlockTransferDstScalarPerVector_BK1 true, // BBlockLdsExtraN - 1, - 1, + 4, + 2, S<1, 32, 1, 8>, 8>; diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp index 1e8f8ff9fe..38edace197 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_wmma_cshuffle.hpp @@ -431,6 +431,9 @@ struct GridwiseGemmMultipleD_k0mk1_k0nk1_mn_wmma_cshuffle constexpr auto b_block_desc_k0perblock_nperblock_k1 = GetBBlockDescriptor_K0PerBlock_NPerBlock_K1(); + constexpr auto cshuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat = + GetCShuffleBlockDescriptor_MShRepeat_MPerShRepeat_NShRepeat_NPerShRepeat(); + constexpr auto max_lds_align = K1; constexpr auto a_block_space_size_aligned = math::integer_least_multiple( @@ -439,8 +442,13 @@ struct GridwiseGemmMultipleD_k0mk1_k0nk1_mn_wmma_cshuffle constexpr auto b_block_space_size_aligned = math::integer_least_multiple( b_block_desc_k0perblock_nperblock_k1.GetElementSpaceSize(), max_lds_align); - return (a_block_space_size_aligned * sizeof(ADataType) + - b_block_space_size_aligned * sizeof(BDataType)); + constexpr auto c_block_space_size_aligned = math::integer_least_multiple( + cshuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat.GetElementSpaceSize(), + max_lds_align); + + return math::max((a_block_space_size_aligned * sizeof(ADataType) + + b_block_space_size_aligned * sizeof(BDataType)), + c_block_space_size_aligned * sizeof(CShuffleDataType)); } // block_id to matrix tile idx (m0, n0) mapping are controlled by {M01, N01} From f80776d937b9009654a7cb6e1d3f9584cd7b6e41 Mon Sep 17 00:00:00 2001 From: Sam Wu Date: Thu, 23 Mar 2023 21:58:59 -0600 Subject: [PATCH 24/31] standardize docs (#655) --- .gitignore | 11 +- .readthedocs.yaml | 18 ++ README.md | 14 +- doc/markdown/dockerhub.md | 93 ------ doc/markdown/tutorial_hello_world.md | 191 ------------- docs/{ => .doxygen}/Doxyfile | 10 +- docs/.sphinx/_toc.yml.in | 1 + docs/.sphinx/requirements.in | 1 + docs/.sphinx/requirements.txt | 269 ++++++++++++++++++ docs/{source => }/API_Reference_Guide.rst | 2 +- docs/{source => }/Contributors_Guide.rst | 0 .../Supported_Primitives_Guide.rst | 2 +- docs/conf.py | 24 ++ {doc/image => docs/data}/ck_component.png | Bin {doc/image => docs/data}/ck_layer.png | Bin docs/{source => }/dockerhub.rst | 0 docs/index.rst | 52 ++++ docs/{source => }/refs.bib | 0 docs/run_doc.sh | 15 - docs/run_doxygen.sh | 10 - docs/source/Disclaimer.rst | 13 - docs/source/Linux_Install_Guide.rst | 15 - docs/source/Makefile | 20 -- docs/source/conf.py | 219 -------------- docs/source/index.rst | 16 -- docs/source/rocm_logo.png | Bin 355437 -> 0 bytes docs/{source => }/tutorial_hello_world.rst | 0 27 files changed, 392 insertions(+), 604 deletions(-) create mode 100644 .readthedocs.yaml delete mode 100644 doc/markdown/dockerhub.md delete mode 100644 doc/markdown/tutorial_hello_world.md rename docs/{ => .doxygen}/Doxyfile (99%) create mode 100644 docs/.sphinx/_toc.yml.in create mode 100644 docs/.sphinx/requirements.in create mode 100644 docs/.sphinx/requirements.txt rename docs/{source => }/API_Reference_Guide.rst (98%) rename docs/{source => }/Contributors_Guide.rst (100%) rename docs/{source => }/Supported_Primitives_Guide.rst (99%) create mode 100644 docs/conf.py rename {doc/image => docs/data}/ck_component.png (100%) rename {doc/image => docs/data}/ck_layer.png (100%) rename docs/{source => }/dockerhub.rst (100%) create mode 100644 docs/index.rst rename docs/{source => }/refs.bib (100%) delete mode 100755 docs/run_doc.sh delete mode 100755 docs/run_doxygen.sh delete mode 100644 docs/source/Disclaimer.rst delete mode 100644 docs/source/Linux_Install_Guide.rst delete mode 100644 docs/source/Makefile delete mode 100644 docs/source/conf.py delete mode 100644 docs/source/index.rst delete mode 100644 docs/source/rocm_logo.png rename docs/{source => }/tutorial_hello_world.rst (100%) diff --git a/.gitignore b/.gitignore index 5667695bb5..362fb9e2ef 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,11 @@ build* .gdb_history install.dir* -# directories containing generated documentation -docs/source/_build/ -docs/docBin/ +# documentation artifacts +build/ +_build/ +_images/ +_static/ +_templates/ +_toc.yml +docBin/ diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000..b739536837 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,18 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +sphinx: + configuration: docs/conf.py + +formats: [htmlzip] + +python: + install: + - requirements: docs/.sphinx/requirements.txt diff --git a/README.md b/README.md index 151da974a5..04199f11bf 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ CK utilizes two concepts to achieve performance portability and code maintainabi * A tile-based programming model * Algorithm complexity reduction for complex ML operators, using innovative technique we call "Tensor Coordinate Transformation". -![ALT](/doc/image/ck_component.png "CK Components") +![ALT](/docs/data/ck_component.png "CK Components") ## Code Structure Current CK library are structured into 4 layers: @@ -16,7 +16,17 @@ Current CK library are structured into 4 layers: * "Instantiated Kernel and Invoker" layer * "Client API" layer -![ALT](/doc/image/ck_layer.png "CK Layers") +![ALT](/docs/data/ck_layer.png "CK Layers") + +## Documentation + +Run the steps below to build documentation locally. + +``` +cd docs +pip3 install -r .sphinx/requirements.txt +python3 -m sphinx -T -E -b html -d _build/doctrees -D language=en . _build/html +``` ## Contributors The list of developers and contributors is here: [Contributors](/CONTRIBUTORS.md) diff --git a/doc/markdown/dockerhub.md b/doc/markdown/dockerhub.md deleted file mode 100644 index 91b6cb2295..0000000000 --- a/doc/markdown/dockerhub.md +++ /dev/null @@ -1,93 +0,0 @@ -## CK docker hub - -[Docker hub](https://hub.docker.com/r/rocm/composable_kernel) - -## Why do I need this? - -To make our lives easier and bring Composable Kernel dependencies together, we recommend using docker images. - -## So what is Composable Kernel? - -Composable Kernel (CK) library aims to provide a programming model for writing performance critical kernels for machine learning workloads across multiple architectures including GPUs, CPUs, etc, through general purpose kernel languages, like HIP C++. - -To get the CK library - -``` -git clone https://github.com/ROCmSoftwarePlatform/composable_kernel.git -``` - -run a docker container - -``` -docker run \ --it \ ---privileged \ ---group-add sudo \ --w /root/workspace \ --v ${PATH_TO_LOCAL_WORKSPACE}:/root/workspace \ -rocm/composable_kernel:ck_ub20.04_rocm5.3_release \ -/bin/bash -``` - -and build the CK - -``` -mkdir build && cd build - -# Need to specify target ID, example below is for gfx908 and gfx90a -cmake \ --D CMAKE_PREFIX_PATH=/opt/rocm \ --D CMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc \ --D CMAKE_CXX_FLAGS="-O3" \ --D CMAKE_BUILD_TYPE=Release \ --D GPU_TARGETS="gfx908;gfx90a" \ -.. -``` - -and - -``` -make -j examples tests -``` - -To run all the test cases including tests and examples run - -``` -make test -``` - -We can also run specific examples or tests like - -``` -./bin/example_gemm_xdl_fp16 -./bin/test_gemm_fp16 -``` - -For more details visit [CK github repo](https://github.com/ROCmSoftwarePlatform/composable_kernel), [CK examples](https://github.com/ROCmSoftwarePlatform/composable_kernel/tree/develop/example), [even more CK examples](https://github.com/ROCmSoftwarePlatform/composable_kernel/tree/develop/client_example). - -## And what is inside? - -The docker images have everything you need for running CK including: - -* [ROCm](https://www.amd.com/en/graphics/servers-solutions-rocm) -* [CMake](https://cmake.org/) -* [Compiler](https://github.com/RadeonOpenCompute/llvm-project) - -## Which image is right for me? - -Let's take a look at the image naming, for example "ck_ub20.04_rocm5.4_release". The image specs are: - -* "ck" - made for running Composable Kernel -* "ub20.04" - based on Ubuntu 20.04 -* "rocm5.4" - ROCm platform version 5.4 -* "release" - compiler version is release - -So just pick the right image for your project dependencies and you're all set. - -## DIY starts here - -If you need to customize a docker image or just can't stop tinkering, feel free to adjust the [Dockerfile](https://github.com/ROCmSoftwarePlatform/composable_kernel/blob/develop/Dockerfile) for your needs. - -## License - -CK is released under the MIT [license](https://github.com/ROCmSoftwarePlatform/composable_kernel/blob/develop/LICENSE). diff --git a/doc/markdown/tutorial_hello_world.md b/doc/markdown/tutorial_hello_world.md deleted file mode 100644 index 297df10b5c..0000000000 --- a/doc/markdown/tutorial_hello_world.md +++ /dev/null @@ -1,191 +0,0 @@ -## CK Hello world - -## Motivation - -This tutorial is aimed at engineers dealing with artificial intelligence and machine learning who would like to optimize their pipelines and squeeze every performance drop by adding Composable Kernel (CK) library to their projects. We would like to make the CK library approachable so the tutorial is not based on the latest release and doesn't have all the bleeding edge features, but it will be reproducible now and forever. - -During this tutorial we will have an introduction to the CK library, we will build it and run some examples and tests, so to say we will run a "Hello world" example. In future tutorials we will go in depth and breadth and get familiar with other tools and ways to integrate CK into your project. - -## Description - -Modern AI technology solves more and more problems in all imaginable fields, but crafting fast and efficient workflows is still challenging. CK is one of the tools to make AI heavy lifting as fast and efficient as possible. CK is a collection of optimized AI operator kernels and tools to create new ones. The library has components required for majority of modern neural networks architectures including matrix multiplication, convolution, contraction, reduction, attention modules, variety of activation functions, fused operators and many more. - -So how do we (almost) reach the speed of light? CK acceleration abilities are based on: - -* Layered structure. -* Tile-based computation model. -* Tensor coordinate transformation. -* Hardware acceleration use. -* Support of low precision data types including fp16, bf16, int8 and int4. - -If you are excited and need more technical details and benchmarking results - read this awesome blog [post](https://community.amd.com/t5/instinct-accelerators/amd-composable-kernel-library-efficient-fused-kernels-for-ai/ba-p/553224). - -For more details visit our [github repo](https://github.com/ROCmSoftwarePlatform/composable_kernel). - -## Hardware targets - -CK library fully supports "gfx908" and "gfx90a" GPU architectures and only some operators are supported for "gfx1030". Let's check the hardware you have at hand and decide on the target GPU architecture - -GPU Target AMD GPU -gfx908 Radeon Instinct MI100 -gfx90a Radeon Instinct MI210, MI250, MI250X -gfx1030 Radeon PRO V620, W6800, W6800X, W6800X Duo, W6900X, RX 6800, RX 6800 XT, RX 6900 XT, RX 6900 XTX, RX 6950 XT - -There are also [cloud options](https://aws.amazon.com/ec2/instance-types/g4/) you can find if you don't have an AMD GPU at hand. - -## Build the library - -First let's clone the library and rebase to the tested version: - -``` -git clone https://github.com/ROCmSoftwarePlatform/composable_kernel.git -cd composable_kernel/ -git checkout tutorial_hello_world -``` - -To make our lives easier we prepared [docker images](https://hub.docker.com/r/rocm/composable_kernel) with all the necessary dependencies. Pick the right image and create a container. In this tutorial we use "rocm/composable_kernel:ck_ub20.04_rocm5.3_release" image, it is based on Ubuntu 20.04, ROCm v5.3, compiler release version. - -If your current folder is ${HOME}, start the docker container with - -``` -docker run \ --it \ ---privileged \ ---group-add sudo \ --w /root/workspace \ --v ${HOME}:/root/workspace \ -rocm/composable_kernel:ck_ub20.04_rocm5.3_release \ -/bin/bash -``` - -If your current folder is different from ${HOME}, adjust the line `-v ${HOME}:/root/workspace` to fit your folder structure. - -Inside the docker container current folder is "~/workspace", library path is "~/workspace/composable_kernel", navigate to the library - -``` -cd composable_kernel/ -``` - -Create and go to the "build" directory - -``` -mkdir build && cd build -``` - -In the previous section we talked about target GPU architecture. Once you decide which one is right for you, run cmake using the right GPU_TARGETS flag - -``` -cmake \ --D CMAKE_PREFIX_PATH=/opt/rocm \ --D CMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc \ --D CMAKE_CXX_FLAGS="-O3" \ --D CMAKE_BUILD_TYPE=Release \ --D BUILD_DEV=OFF \ --D GPU_TARGETS="gfx908;gfx90a;gfx1030" .. -``` - -If everything went well the cmake run will end up with: - -``` --- Configuring done --- Generating done --- Build files have been written to: "/root/workspace/composable_kernel/build" -``` - -Finally, we can build examples and tests - -``` -make -j examples tests -``` - -If everything is smooth, you'll see - -``` -Scanning dependencies of target tests -[100%] Built target tests -``` - -## Run examples and tests - -Examples are listed as test cases as well, so we can run all examples and tests with - -``` -ctest -``` - -You can check the list of all tests by running - -``` -ctest -N -``` - -We can also run them separately, here is a separate example execution. - -``` -./bin/example_gemm_xdl_fp16 1 1 1 -``` - -The arguments "1 1 1" mean that we want to run this example in the mode: verify results with CPU, initialize matrices with integers and benchmark the kernel execution. You can play around with these parameters and see how output and execution results change. - -If everything goes well and you have a device based on gfx908 or gfx90a architecture you should see something like - -``` -a_m_k: dim 2, lengths {3840, 4096}, strides {4096, 1} -b_k_n: dim 2, lengths {4096, 4096}, strides {1, 4096} -c_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1} -launch_and_time_kernel: grid_dim {480, 1, 1}, block_dim {256, 1, 1} -Warm up 1 time -Start running 10 times... -Perf: 1.10017 ms, 117.117 TFlops, 87.6854 GB/s, DeviceGemmXdl<256, 256, 128, 4, 8, 32, 32, 4, 2> NumPrefetch: 1, LoopScheduler: Default, PipelineVersion: v1 -``` - -Meanwhile, running it on a gfx1030 device should result in - -``` -a_m_k: dim 2, lengths {3840, 4096}, strides {4096, 1} -b_k_n: dim 2, lengths {4096, 4096}, strides {1, 4096} -c_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1} -DeviceGemmXdl<256, 256, 128, 4, 8, 32, 32, 4, 2> NumPrefetch: 1, LoopScheduler: Default, PipelineVersion: v1 does not support this problem -``` - -But don't panic, some of the operators are supported on gfx1030 architecture, so you can run a separate example like - -``` -./bin/example_gemm_dl_fp16 1 1 1 -``` - -and it should result in something nice similar to - -``` -a_m_k: dim 2, lengths {3840, 4096}, strides {1, 4096} -b_k_n: dim 2, lengths {4096, 4096}, strides {4096, 1} -c_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1} -arg.a_grid_desc_k0_m0_m1_k1_{2048, 3840, 2} -arg.b_grid_desc_k0_n0_n1_k1_{2048, 4096, 2} -arg.c_grid_desc_m_n_{ 3840, 4096} -launch_and_time_kernel: grid_dim {960, 1, 1}, block_dim {256, 1, 1} -Warm up 1 time -Start running 10 times... -Perf: 3.65695 ms, 35.234 TFlops, 26.3797 GB/s, DeviceGemmDl<256, 128, 128, 16, 2, 4, 4, 1> -``` - -Or we can run a separate test - -``` -ctest -R test_gemm_fp16 -``` - -If everything goes well you should see something like - -``` -Start 121: test_gemm_fp16 -1/1 Test #121: test_gemm_fp16 ................... Passed 51.81 sec - -100% tests passed, 0 tests failed out of 1 -``` - -## Summary - -In this tutorial we took the first look at the Composable Kernel library, built it on your system and ran some examples and tests. Stay tuned, in the next tutorial we will run kernels with different configs to find out the best one for your hardware and task. - -P.S.: Don't forget to switch out the cloud instance if you have launched one, you can find better ways to spend your money for sure! diff --git a/docs/Doxyfile b/docs/.doxygen/Doxyfile similarity index 99% rename from docs/Doxyfile rename to docs/.doxygen/Doxyfile index ca354598b2..1084f94c81 100644 --- a/docs/Doxyfile +++ b/docs/.doxygen/Doxyfile @@ -51,7 +51,7 @@ PROJECT_BRIEF = "prototype interfaces compatible with ROCm platform and # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. -PROJECT_LOGO = ./rocm.jpg +PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is @@ -775,10 +775,10 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = ../include/ck/tensor_operation/gpu/grid \ - ../include/ck/tensor_operation/gpu/block \ - ../include/ck/tensor_operation/gpu/thread \ - ../library/include/ck/library/utility +INPUT = ../../include/ck/tensor_operation/gpu/grid \ + ../../include/ck/tensor_operation/gpu/block \ + ../../include/ck/tensor_operation/gpu/thread \ + ../../library/include/ck/library/utility # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/docs/.sphinx/_toc.yml.in b/docs/.sphinx/_toc.yml.in new file mode 100644 index 0000000000..ff21248873 --- /dev/null +++ b/docs/.sphinx/_toc.yml.in @@ -0,0 +1 @@ +root: index diff --git a/docs/.sphinx/requirements.in b/docs/.sphinx/requirements.in new file mode 100644 index 0000000000..2dfc7b076b --- /dev/null +++ b/docs/.sphinx/requirements.in @@ -0,0 +1 @@ +git+https://github.com/RadeonOpenCompute/rocm-docs-core.git diff --git a/docs/.sphinx/requirements.txt b/docs/.sphinx/requirements.txt new file mode 100644 index 0000000000..e2b7935903 --- /dev/null +++ b/docs/.sphinx/requirements.txt @@ -0,0 +1,269 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile requirements.in +# +accessible-pygments==0.0.4 + # via pydata-sphinx-theme +alabaster==0.7.13 + # via sphinx +asttokens==2.2.1 + # via stack-data +attrs==22.2.0 + # via + # jsonschema + # jupyter-cache +babel==2.12.1 + # via + # pydata-sphinx-theme + # sphinx +backcall==0.2.0 + # via ipython +beautifulsoup4==4.12.0 + # via pydata-sphinx-theme +breathe==4.34.0 + # via rocm-docs-core +certifi==2022.12.7 + # via requests +cffi==1.15.1 + # via pynacl +charset-normalizer==3.1.0 + # via requests +click==8.1.3 + # via + # jupyter-cache + # sphinx-external-toc +comm==0.1.3 + # via ipykernel +debugpy==1.6.6 + # via ipykernel +decorator==5.1.1 + # via ipython +deprecated==1.2.13 + # via pygithub +docutils==0.16 + # via + # breathe + # myst-parser + # pydata-sphinx-theme + # rocm-docs-core + # sphinx +executing==1.2.0 + # via stack-data +fastjsonschema==2.16.3 + # via nbformat +gitdb==4.0.10 + # via gitpython +gitpython==3.1.31 + # via rocm-docs-core +greenlet==2.0.2 + # via sqlalchemy +idna==3.4 + # via requests +imagesize==1.4.1 + # via sphinx +importlib-metadata==6.1.0 + # via + # jupyter-cache + # myst-nb +importlib-resources==5.10.4 + # via rocm-docs-core +ipykernel==6.22.0 + # via myst-nb +ipython==8.11.0 + # via + # ipykernel + # myst-nb +jedi==0.18.2 + # via ipython +jinja2==3.1.2 + # via + # myst-parser + # sphinx +jsonschema==4.17.3 + # via nbformat +jupyter-cache==0.5.0 + # via myst-nb +jupyter-client==8.1.0 + # via + # ipykernel + # nbclient +jupyter-core==5.3.0 + # via + # ipykernel + # jupyter-client + # nbformat +linkify-it-py==1.0.3 + # via myst-parser +markdown-it-py==2.2.0 + # via + # mdit-py-plugins + # myst-parser +markupsafe==2.1.2 + # via jinja2 +matplotlib-inline==0.1.6 + # via + # ipykernel + # ipython +mdit-py-plugins==0.3.5 + # via myst-parser +mdurl==0.1.2 + # via markdown-it-py +myst-nb==0.17.1 + # via rocm-docs-core +myst-parser[linkify]==0.18.1 + # via + # myst-nb + # rocm-docs-core +nbclient==0.5.13 + # via + # jupyter-cache + # myst-nb +nbformat==5.8.0 + # via + # jupyter-cache + # myst-nb + # nbclient +nest-asyncio==1.5.6 + # via + # ipykernel + # nbclient +packaging==23.0 + # via + # ipykernel + # pydata-sphinx-theme + # sphinx +parso==0.8.3 + # via jedi +pexpect==4.8.0 + # via ipython +pickleshare==0.7.5 + # via ipython +platformdirs==3.1.1 + # via jupyter-core +prompt-toolkit==3.0.38 + # via ipython +psutil==5.9.4 + # via ipykernel +ptyprocess==0.7.0 + # via pexpect +pure-eval==0.2.2 + # via stack-data +pycparser==2.21 + # via cffi +pydata-sphinx-theme==0.13.1 + # via sphinx-book-theme +pygithub==1.57 + # via rocm-docs-core +pygments==2.14.0 + # via + # accessible-pygments + # ipython + # pydata-sphinx-theme + # sphinx +pyjwt==2.6.0 + # via pygithub +pynacl==1.5.0 + # via pygithub +pyrsistent==0.19.3 + # via jsonschema +python-dateutil==2.8.2 + # via jupyter-client +pyyaml==6.0 + # via + # jupyter-cache + # myst-nb + # myst-parser + # sphinx-external-toc +pyzmq==25.0.2 + # via + # ipykernel + # jupyter-client +requests==2.28.2 + # via + # pygithub + # sphinx +rocm-docs-core @ git+https://github.com/RadeonOpenCompute/rocm-docs-core.git + # via -r requirements.in +six==1.16.0 + # via + # asttokens + # python-dateutil +smmap==5.0.0 + # via gitdb +snowballstemmer==2.2.0 + # via sphinx +soupsieve==2.4 + # via beautifulsoup4 +sphinx==4.3.1 + # via + # breathe + # myst-nb + # myst-parser + # pydata-sphinx-theme + # rocm-docs-core + # sphinx-book-theme + # sphinx-copybutton + # sphinx-design + # sphinx-external-toc + # sphinx-notfound-page +sphinx-book-theme==1.0.0rc2 + # via rocm-docs-core +sphinx-copybutton==0.5.1 + # via rocm-docs-core +sphinx-design==0.3.0 + # via rocm-docs-core +sphinx-external-toc==0.3.1 + # via rocm-docs-core +sphinx-notfound-page==0.8.3 + # via rocm-docs-core +sphinxcontrib-applehelp==1.0.4 + # via sphinx +sphinxcontrib-devhelp==1.0.2 + # via sphinx +sphinxcontrib-htmlhelp==2.0.1 + # via sphinx +sphinxcontrib-jsmath==1.0.1 + # via sphinx +sphinxcontrib-qthelp==1.0.3 + # via sphinx +sphinxcontrib-serializinghtml==1.1.5 + # via sphinx +sqlalchemy==1.4.47 + # via jupyter-cache +stack-data==0.6.2 + # via ipython +tabulate==0.9.0 + # via jupyter-cache +tornado==6.2 + # via + # ipykernel + # jupyter-client +traitlets==5.9.0 + # via + # comm + # ipykernel + # ipython + # jupyter-client + # jupyter-core + # matplotlib-inline + # nbclient + # nbformat +typing-extensions==4.5.0 + # via + # myst-nb + # myst-parser +uc-micro-py==1.0.1 + # via linkify-it-py +urllib3==1.26.15 + # via requests +wcwidth==0.2.6 + # via prompt-toolkit +wrapt==1.15.0 + # via deprecated +zipp==3.15.0 + # via importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/docs/source/API_Reference_Guide.rst b/docs/API_Reference_Guide.rst similarity index 98% rename from docs/source/API_Reference_Guide.rst rename to docs/API_Reference_Guide.rst index 3665049dd6..b59c6e3026 100644 --- a/docs/source/API_Reference_Guide.rst +++ b/docs/API_Reference_Guide.rst @@ -49,4 +49,4 @@ used in the CK GPU implementation of Flashattention. .. doxygenstruct:: ck::ThreadwiseTensorSliceTransfer_StaticToStatic -.. bibliography:: \ No newline at end of file +.. bibliography:: diff --git a/docs/source/Contributors_Guide.rst b/docs/Contributors_Guide.rst similarity index 100% rename from docs/source/Contributors_Guide.rst rename to docs/Contributors_Guide.rst diff --git a/docs/source/Supported_Primitives_Guide.rst b/docs/Supported_Primitives_Guide.rst similarity index 99% rename from docs/source/Supported_Primitives_Guide.rst rename to docs/Supported_Primitives_Guide.rst index 066e024bc0..4c3adf67d7 100644 --- a/docs/source/Supported_Primitives_Guide.rst +++ b/docs/Supported_Primitives_Guide.rst @@ -72,4 +72,4 @@ Else if :math:`j>1`, \tilde{Y}_{ij} &= \diag(z^{new}_{i})^{-1} \exp(\tilde{m}_{ij} - m^{new}_i ) \tilde{P}_{ij} \\ z_i &= z^{new}_i \\ m_i &= m^{new}_i \\ - \end{align} \ No newline at end of file + \end{align} diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000..9b43b7155f --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,24 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +from rocm_docs import ROCmDocs + +docs_core = ROCmDocs("Composable Kernel Documentation") +docs_core.run_doxygen() +docs_core.setup() + +mathjax3_config = { +'tex': { + 'macros': { + 'diag': '\\operatorname{diag}', + } + } +} + +bibtex_bibfiles = ['refs.bib'] + +for sphinx_var in ROCmDocs.SPHINX_VARS: + globals()[sphinx_var] = getattr(docs_core, sphinx_var) diff --git a/doc/image/ck_component.png b/docs/data/ck_component.png similarity index 100% rename from doc/image/ck_component.png rename to docs/data/ck_component.png diff --git a/doc/image/ck_layer.png b/docs/data/ck_layer.png similarity index 100% rename from doc/image/ck_layer.png rename to docs/data/ck_layer.png diff --git a/docs/source/dockerhub.rst b/docs/dockerhub.rst similarity index 100% rename from docs/source/dockerhub.rst rename to docs/dockerhub.rst diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000..f4e66c1b51 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,52 @@ +============================ +Composable Kernel User Guide +============================ + +------------ +Introduction +------------ + +This document contains instructions for installing, using, and contributing to Composable Kernel (CK). + +----------- +Methodology +----------- + +Composable Kernel (CK) library aims to provide a programming model for writing performance critical kernels for machine learning workloads across multiple architectures including GPUs, CPUs, etc, through general purpose kernel languages, like HIP C++. + +CK utilizes two concepts to achieve performance portability and code maintainability: + +* A tile-based programming model +* Algorithm complexity reduction for complex ML operators, using innovative technique we call "Tensor Coordinate Transformation". + +.. image:: data/ck_component.png + :alt: CK Components + +-------------- +Code Structure +-------------- + +Current CK library are structured into 4 layers: + +* "Templated Tile Operators" layer +* "Templated Kernel and Invoker" layer +* "Instantiated Kernel and Invoker" layer +* "Client API" layer + +.. image:: data/ck_layer.png + :alt: CK Layers + +Documentation Roadmap +^^^^^^^^^^^^^^^^^^^^^ +The following is a list of CK documents in the suggested reading order: + +.. toctree:: + :maxdepth: 5 + :caption: Contents: + :numbered: + + tutorial_hello_world + dockerhub + Supported_Primitives_Guide + API_Reference_Guide + Contributors_Guide diff --git a/docs/source/refs.bib b/docs/refs.bib similarity index 100% rename from docs/source/refs.bib rename to docs/refs.bib diff --git a/docs/run_doc.sh b/docs/run_doc.sh deleted file mode 100755 index 58b0936c67..0000000000 --- a/docs/run_doc.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -eu - -# Make this directory the PWD -cd "$(dirname "${BASH_SOURCE[0]}")" - -# Build doxygen info -bash run_doxygen.sh - -# Build sphinx docs -cd source -make clean -make -e SPHINXOPTS="-t html" html -make latexpdf diff --git a/docs/run_doxygen.sh b/docs/run_doxygen.sh deleted file mode 100755 index f66c038c1b..0000000000 --- a/docs/run_doxygen.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -eu - -# Make this directory the PWD -cd "$(dirname "${BASH_SOURCE[0]}")" - -# Build the doxygen info -rm -rf docBin -doxygen Doxyfile diff --git a/docs/source/Disclaimer.rst b/docs/source/Disclaimer.rst deleted file mode 100644 index 5dcff748c8..0000000000 --- a/docs/source/Disclaimer.rst +++ /dev/null @@ -1,13 +0,0 @@ -************ -Disclaimer -************ -------------------------------- -AMD's standard legal Disclaimer -------------------------------- - -The information presented in this document is for informational purposes only and may contain technical inaccuracies, omissions, and typographical errors. The information contained herein is subject to change and may be rendered inaccurate for many reasons, including but not limited to product and roadmap changes, component and motherboard version changes, new model and/or product releases, product differences between differing manufacturers, software changes, BIOS flashes, firmware upgrades, or the like. Any computer system has risks of security vulnerabilities that cannot be completely prevented or mitigated. AMD assumes no obligation to update or otherwise correct or revise this information. However, AMD reserves the right to revise this information and to make changes from time to time to the content hereof without obligation of AMD to notify any person of such revisions or changes. THIS INFORMATION IS PROVIDED 'AS IS." AMD MAKES NO REPRESENTATIONS OR WARRANTIES WITH RESPECT TO THE CONTENTS HEREOF AND ASSUMES NO RESPONSIBILITY FOR ANY INACCURACIES, ERRORS, OR OMISSIONS THAT MAY APPEAR IN THIS INFORMATION. AMD SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR ANY PARTICULAR PURPOSE. IN NO EVENT WILL AMD BE LIABLE TO ANY PERSON FOR ANY RELIANCE, DIRECT, INDIRECT, SPECIAL, OR OTHER CONSEQUENTIAL DAMAGES ARISING FROM THE USE OF ANY INFORMATION CONTAINED HEREIN, EVEN IF AMD IS EXPRESSLY ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. AMD, the AMD Arrow logo, Radeon, Ryzen, Epyc, and combinations thereof are trademarks of Advanced Micro Devices, Inc. Other product names used in this publication are for identification purposes only and may be trademarks of their respective companies. Google(R) is a registered trademark of Google LLC. PCIe(R) is a registered trademark of PCI-SIG Corporation. Linux(R) is the registered trademark of Linus Torvalds in the U.S. and other countries. Ubuntu(R) and the Ubuntu logo are registered trademarks of Canonical Ltd. Other product names used in this publication are for identification purposes only and may be trademarks of their respective companies. (C)2023 Advanced Micro Devices, Inc. All rights reserved. - ----------------------- -Third Party Disclaimer ----------------------- -Third-party content is licensed to you directly by the third party that owns the content and is not licensed to you by AMD. ALL LINKED THIRD-PARTY CONTENT IS PROVIDED "AS IS" WITHOUT A WARRANTY OF ANY KIND. USE OF SUCH THIRD-PARTY CONTENT IS DONE AT YOUR SOLE DISCRETION AND UNDER NO CIRCUMSTANCES WILL AMD BE LIABLE TO YOU FOR ANY THIRD-PARTY CONTENT. YOU ASSUME ALL RISK AND ARE SOLELY RESPONSIBLE FOR ANY DAMAGES THAT MAY ARISE FROM YOUR USE OF THIRD-PARTY CONTENT. diff --git a/docs/source/Linux_Install_Guide.rst b/docs/source/Linux_Install_Guide.rst deleted file mode 100644 index 0e16bb6a98..0000000000 --- a/docs/source/Linux_Install_Guide.rst +++ /dev/null @@ -1,15 +0,0 @@ -===================== -Getting Started Guide -===================== - ------------- -Introduction ------------- - -This document contains instructions for installing, using, and contributing to Composable Kernel (CK). - -Documentation Roadmap -^^^^^^^^^^^^^^^^^^^^^ -The following is a list of CK documents in the suggested reading order: - -[TODO] \ No newline at end of file diff --git a/docs/source/Makefile b/docs/source/Makefile deleted file mode 100644 index bde66ebc25..0000000000 --- a/docs/source/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SPHINXPROJ = CK -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 65ac187034..0000000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,219 +0,0 @@ -"""Copyright (C) 2018-2023 Advanced Micro Devices, Inc. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop- - ies of the Software, and to permit persons to whom the Software is furnished - to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM- - PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE- - CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -""" - -# -*- coding: utf-8 -*- -# -# Composable Kernel (CK) docuumentation build configuration file, based on -# rocBLAS documentation build configuration file, created by -# sphinx-quickstart on Mon Jan 8 16:34:42 2018. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - -import os -import sys -import subprocess - -read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True' - -if read_the_docs_build: - subprocess.call('../run_doxygen.sh') - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['sphinx.ext.mathjax', 'breathe', 'sphinxcontrib.bibtex'] - -breathe_projects = { "CK": "../docBin/xml" } -breathe_default_project = "CK" - -bibtex_bibfiles = ['refs.bib'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Composable Kernel (CK)' -copyright = u'2018-2023, Advanced Micro Devices' -author = u'Advanced Micro Devices' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -#version = u'0.8' -# The full version, including alpha/beta/rc tags. -#release = u'0.8' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = 'en' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -# html_theme = 'alabaster' - -#if read_the_docs_build: -# html_theme = 'default' -#else: -import sphinx_rtd_theme -html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] -html_logo = "rocm_logo.png" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - 'logo_only': True, - 'display_version': True -} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# This is required for the alabaster theme -# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -# html_sidebars = { -# '**': [ -# 'relations.html', # needs 'show_related': True theme option to display -# 'searchbox.html', -# ] -# } - -mathjax3_config = { -'tex': { - 'macros': { - 'diag': '\\operatorname{diag}', - } - } -} - -# -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. -htmlhelp_basename = 'CKdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - 'preamble': r''' -\setcounter{tocdepth}{5} -\newcommand{\diag}{\operatorname{diag}} -''', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'CK.tex', u'Composabl Kernel (CK) Documentation', - u'Advanced Micro Devices', 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'ck', u'Composable Kernel (CK) Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'CK', u'Composable Kernel (CK) Documentation', - author, 'CK', 'Composable Kernel for AMD ROCm', - 'Miscellaneous'), -] diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 68adf58afd..0000000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -============================ -Composable Kernel User Guide -============================ - -.. toctree:: - :maxdepth: 5 - :caption: Contents: - :numbered: - - Linux_Install_Guide - tutorial_hello_world - dockerhub - Supported_Primitives_Guide - API_Reference_Guide - Contributors_Guide - Disclaimer \ No newline at end of file diff --git a/docs/source/rocm_logo.png b/docs/source/rocm_logo.png deleted file mode 100644 index ee09dd09c71e3de1081f0f4c8b67967f79d768fe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 355437 zcmV)XK&`)tP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!vFvd!vV){sAK>D|D{PpK~#8NZ2fh3 z9a+An3+D7WmCR{IF*7r>#mvlLvBk{H%#0RWvP_bhnbJ8QeLa~Y z24dbj6c;>#alWND}L3mVMgIDu&*cuta_&^_eS=Ej82o4H_ zt$`uz<&_Yop^YF3DTGT)B1}mde$qz~qpXa0c}2t>KZI5@16&LZ#aSPJJj%+!qL(k$ zgG2DJwgzV#>(G{;i>9(lbhmY4cxV9QV?&r39mW*bhx>Za+17%lrUumT`DOfG1%-vk z%*;eeaxx;rBM|5x03RP8xVX5$*wh4y{BBaxGB|Sd7>*n{f)giBproi6Cmfw%Wn~2m zOG{YV*ud1<23jVjkkQh?VP$pfS5m`4H7y*{(8WJeJsfbhm9 zL^ZV_y15l`9i2$%>PCEL7h>8w5Y^U(NLEyPyI3)u9f<4hL=wM$3jgP6L;c7c9YWUl z2(l)|kUcqp+?iPvEi9q@^g3!+w$Z${gQDRj_~+EYJ~j`oDP{0VuR?rPKl;wx#@5q6 zVCn9+7`b>KgBNaN{rL|#|HB`!`1B+i%`{pNeDy{(OJe-P`;cW-LryC1R6TH}4EZ^(wYp#F& z3hOVv#?rGFn19T_{pcCSA3VXxy~h~5_Xs_A@1yh19kkuNiI(fv(Q@q?TCQG0^OdV& z3G4rOyXo>3H0@qS)6Om$FYTb={CTuox`c);{!cc}qwCytw65-=ZuT5vDtloanFE`s zT-e6s!!fBCZs`^9$gYB0W;tBaiuk|J!-=>|*hVD7EFcnk?*7nr_Jq3q2`HLdLDtY1 z@+=K=OXykIK+nPo_HLdCk4=On+nl+DC2VZ0VPRn|uA5ofLrzs6UmQ}x7su4GS56Pd zbu6H2>jG^TZ?;o+95ZmjK}}a2)b)XcT?AAEGN2z@3e}K2?6Z!be5B;Z3c#ldL3pRLtq#4t&8V&`#i`tMG!T3}%{7>4uR%|BKIX@} zxuJGqX?+Ek_|H2%zks{fZ(?KZ3`R!AaGw9Xix)3om5xc2rh3psKPS z?ruIvIhlb9!f9w|7)|9hsLd}y?#Xl{M#UhSuuM-+M?i28OwCQO7CPL4forZ&>BXkY)Ag3q~X&G4p?Jsm!z?-u=5YzI zN=kvv$u!vV^EhVb!i61=2mfb2>|_G?n}iT{LWMOnBC?slYi&aee?I|uiQPR&V8wQJ z{vCEv9RwgNx}zPjT>|(zkl5RWWWw)ce;-Z`4&Wp^&g{u)WQ_UR-Om`keGm1U zTc|m+f!eK2G@Lt&Cc>}d(glp&y^V!u&j_XOFh$S}-MoXo>$fmSs0`kFfRTrfG4}W= zCLTW%p*Q#R#armT`oBR>ECGDqd?(flfwuaM0JZ;T3HPsm_Y9qulF_WULo)Zy^A|&WCzf8@giEc&ZCJP%Fy{csGZzGXl^S^{L-Q677yd#Y}hB2 z!!5g>RSU#6Y$W|7chQM8m`<47x5p(6D!bvZXC#jZ7h-tp^1{Pv6!a zTBhbOWCt7^9ShHZAea$?LK_u;XJKgpGXhaV+Xx4bE90X*GI)Ql9KMiH$1x2PXxY2L zjB;Y=5eQ`)4@et%;FysQ=^XMNwrjnrV7XZH+V!njZ~tWTGE4OEb7~ z>pFJWNX9$caQ{3_kDyywoI__<3t=^it&P(-yRn4UCc?6-4Xw=$2n_Itr-ui^Xe%L+ z;V?8ap~V!Supl2{p&@X0ut#%#9%kEHFhAUj;ek#Rlonx>4L<)=5t=F+aWXaur!q3p z$oH$Kft{k+mFE0_Vh^fDa7(Id;-In`_7i*p^bTpU z7d0?}thqH*9Zo>Q*$tZRUeNOmfFb{$NkkMZViRDQm<*egG}!U8*t2tTIz_w6FM>Ov z=UY*Qz}i}bG&CTTz!NR6rBwvpua=k8(~Cq_Y*!Z{N6-;|V(~jibqcok27K`}z67xd z%3i*vuOI276Ewh4n%^7>XI4=(xrWq^N%-Y75_p7NRtdtH6cVbNki-9L?c`~UT)2k$ zTTjrxeH9&NE@JZ5L(D#WhSqbJP_eRxsxzCY+}uR<*)0)#g7x)XzlOz^FR}TjGdGU%bNUufM~>i*GUY=p}~VLa&QpZl{^H-nd2Z z-6Z%3zbn@XIKlF0w!aqdYvz4TyH^Rl%cAKuUOY$epGEuG3+OttgQ2sxkybNIz@$OP zB@spe0`Mx~Thxhw@^1JPwctcr8BD@XLc=YX0CI=Aojdeg{9r^N>7MX}s?`Lr8Iu? zg%m!f@f}w;fdSvwF*FJeVX-h}YSXaygS@pr6x|b{9a#*^>~>gWcS1G10{d-}@R3Fk zK2-F@=gI;2f@b%bY9KySBka^dxD3M=Dv@|+gblQdKp7nDLoFj#YfleqnwwD5Sc}%q z7PR%WV`c3$Ze88Q`I$*9R2JdxSRZyKhOjcygR+U|ySRq4Gs9S)97H9JF_bpt zW@`gqXIDhi9IUw!>gwvEqM#UQv9a)Ru}8RvJ=QwvF;bL?^`TbuHJ2kQI)u?K12co; ze6M+oa${+%EJa0CF|zYAP*z%kYDUEbnn)~7@8ro;czAljmOY6(qqGa{#@EUk&Z?@2 zr{&}s8luPE9-RjI=+V;@tfabg<$ACe;M5uE~AaWD?7b{VpjRaCQ8;eP<3`2wF2}mY@_Sy6^z|~fEhw< z;QDQh+=|LLp=|C5ZF_fU z+B!qYl(CkbsQ^5AJtL@7K9uzgplxaf4;r3Na0pD9M$E|q3oCOPo+)FkC3FqVa70oS zAATu`cR!cHdtb`p)5FS;)G&pyvo~DAX^7l5U89m<8x#japE#)dra?2J1V*XNFgw)) zz2r8?_!QwQlX!fh9)izRL-3_qD8A4L!{-{|_*^rR%P8I#gLf82MlsOQivHei401yp z8lOT(?;vXG8fYCQ;(n%!{O3I0-NE(o5iDn=U?DjQpkHkQbym3i4)!_ia|c~Rad^0vqGxk2oXwIeIi z2l25X7#!-ueg1$~&TL_GVFst?M){c<(a66sJ~D!wjBF$)B*V`y0D(av(A3m|9eb1$ z)MhiTI}voQ2D)&enI-7!U_2}slWr~;GB-hkiabgbr4fGk0D|}KMb5!v*rX9Y3X8&q z{RvzO3CHEaA~a{G60D&}i;qKfej)m~Vc%dpdUgE@{&?d$=29}z#Gb9m$`qI4BXOVK z@lj+9?#0IAIZgJ5?q)n6>&8TFG3pC*G1StH>HaB<4NYKtY?9w`7(IQxBJ>(sno&bz zDyynS9{+assa&MeuwwXr!2!YW_wj?9t1C>*OrfZz3<+5&96fd%M~@zZhK2?zDk{YI zD?rcO+#I&{cI-`UMd&Fqh96N1fBprA=>_GdA~w0skiTc z4?QvB@;+9K06+1z072dsUf(ys8bsRg1Tu!FaB6r43C(?ouI@nf)H23)cCh{82^MeP z!1&HZtlYbg3xvbvAAiO<0%Bl?X0x%4%B3~rP0t~J{xk}g*N}6185P^xs3!CpE}lm# z-=}@&GNbMlG@reQ!JGF9y}Pu%d+6i(;2qlEgC`gx_yp()wkO2g+2>zlmIgLQ@XZr^ zzt(~PKLKt6%vi$uKbH9TE3UIn)A)p1WE`G<^%^rTzsA&y7nmUUMhU$M+TJXUa)HlX z`R=zg!{5^UzQrgbZa3lFPAhGrkqXdjW+eYR_z1XvgkGCaR|vjc{!jQl8wkA`0>AG3 zHkui4`?jy6d|(MSff+DzONOCmDvSb8!8MZ|TFn5$YkLt~-U6STGT4MCK+DMsDi)5A z)-{KWo;g&^?4fFI3k6DmtiCa1bq%4YZv;(K3&^Oc!<6kZA|VkDlne{5o3it^w6=hi zjTH)88KB8PWJ* zaRkp!k6@yy6brM%SUt0ZbK6^3TU|msO|ZVD9HaBY7@inJF- zi|(E{A0CEng85-tDQ+@uc2}1mJ~0Ljgxws!|Ki*NZtPycZ`nBi$G6|&>EaUVJUmfi zsDn`_J6vNde3hDkXL-4JLNndYOT~0vGHNnnQC*UYp7t&b6L=#NQ|KEWLI2<&x@pyI z9qni&^s4#4EumrNlBg*^@y;@H_)T$IXXH*-@pKhDoT))k;1X#5;%0|FwD%% zP|w#22RVdjYbErej0{$6T)OOGWi>RhUrqsE$thyrThlwpcq?sU2?fS}0eaeQUeNXS z6QO4o5e-YmeA|>amgkuHCf1&y<+(80dK8z5`}2Z@eR%|RSQCNQ)Qkw0V0y7^LxSlg z6L!Kry?6plu(w~KC)At$dT)RiB^=~vZ5I!8M6|Yw2RkC$8IgN?kiht#!p(|GG=g+-lET-iNB>-)n&z<4@ zlZ4*b^A{LptR3Os86jZDh5dPa&N895OpBc-^u}qjUAG?)c=yqI^B(Js?KKf@&8(Jd zw+K5z@G5T;fP&=-!28wo1n5;U-c~T?*KM3b-Qroq)8;Jw(_rA12z`%a==-O`Cb5L3 z*M^YFHUt({!8;=l_Tka6_78!Hn=kZec(QtCIIf`&MMDcH7??p)Qx9?i^z;m&##k%I z_U9202>-Bfn6o1`XZka@wuGsL39RjGpiMJ8D4~Q8_sHO%K9j~jeJ+Fd_OiW7XhKHQ z0=m|2Fm?8aonIu}qff#=B?sQAB`^suhLT?)j(g=oF02xoNgYs+X~i*@0(_;lIW+r;kQdca3ucxC8jkQhaBlJePhR|8pir%J9%=Hdq zhcRQFjeRpY0V_fN7^D@AIeTK-)f?kZt{CD*xfL3QHEuxb?!MRzioh(RQd6)$Vy#W! zt|^ba5Ko*6@u2;w!plMf`Qh%^ZY;q!1lqTo^H?8j!Iqkr`90e=4aC7x|D9t8!X+{6fzeE&;+LHOoW0bW*C;$mJVrW3=_mlA=x zjCeFv7Nft5od`Ev;b6?@=m-Y-`q4umv@+T@(3YxdYf)TQhHN3)66Wy)Pk5+s<12YZ z?4{`)AoLFW1N7ARKhSpfhMtc<41+>p8WAHx@7H~KLbMeuPk^2fZ9PlM3B4)=)z*s# zFob9;nBK3@`^SBHzaF`b{`DXRO8{D=06MVyj zKP8LHNE;nT8atZ|cHEisi^yGBM$yJ9$~HGpeU`uD`HLuD-aum4AX57$P_c9tJy-9e zgWwyu^B8>u%;3F;;z5nkhmSD+nAZ2~C8l4z#_ZPw8KLyQt9e@7uO-|Tp8IPpy!u`& z@qX46p*PMbJ3{CU3(zB2hZwnsSrg9$xbl6z{(->#24jR?H(}g%`yrYc!-aYSzB>dS zqpna_ZxMKJ_VEe(^L~Y1b|LF@k{PR}|`1mk8XL&XSAandC5WOYAQo#b1hmLn?>8}?d(HkX%%|A2hiW%i_?9h*zF&| z9X9eI4=+snhGID^9s@33s5LQT)U(8;w38zEPJ4Od49UJ07cIu&bQ5i)8E9ZVCl&YV z%Fz+zMay$Wb!IG9TB>nld;k|l`>;JTip9wh%rHh@+1$d_%}p$f4PpDt3Wml8F*rJk z+R6r0u-8e5jz?;0DheygkX4umQ*Q8~fq}HDVoVPYpr#}rWhWDGiB>Ta!_R5qfB_v- z46!$A)6~U`iwB0>JTM#)il@^fn5!?v<@s^krd?iLTSEt<+5#=%+T~k#^57YM{O$(= z?>-(qxsRW}{TkQj7tjzGf@+7i5I*01@vj}hKKyWqT z+rUot@;$U&x`p1G57A4wbP*`Sgx=5t;fU@dM$Z?Rdj5)$^cxXkGyhNm#Qt&p*W1&4 zu28=|CKl~a0Nw;+@+hGKBiAF3`GwbismK}KQg-a+ucMhdn+bCMvK;Fa>QrgB4UeW=# zlrm_zMM2Lm5%!6Bh^^~FM&}?-Hnk#-NhBmIlQHuo0+UnW92x}!cAAQYW{}X(#W7Va zNT_Ksmg+)|C8I7_UJ!zr25p=iVZ_vDZe<|?&(hWsy2i#hCaZ>z_R8Wngxx>yQNafX zHSmR`9u6y-;IM)*4#*f{zpN3Cs9HhJ$OTH4o{+Qf#bLt`95PSDA;(Ne_?ALBrVW~j zy^!~>#{tVz1YIf)5^{%((s9@{69-K)vEPVgkcoHNa$=Dc;*7rfVl-A1p^J^Or<)ei z(~H8Q5|mZdps#lb^Fvd(I5LCn%37=>r(-294gDSgXmWHxfxbS*1N^ZV9E1(p&qiPn z&d0>yQbsD4(h|_*=Yl$KC+t)e;QPLIY~*L6voHg7g<0sUEWuny3kK_}G272*T~~vp zfdO2bnZvcoY2064!S&^3nr9~=T!#3l1hh7^p_Ut7w{TQ2F&aYGMr2qRsu)ja zCq^+pHHx{OPE65IdiXh~oqREC=ZQHFUo;t;VU&^f2LkGM=Qi-)zkZ07=|SwAU&l93 zX$@=ZSYZ^p%ZC2s)&sn}`w+i-{R%&Q^8(*}_Y8NhU&8j-80teJP-AP4-Lwom%qzxk zJ9==Xx(YR3Zs-dO#CS>^22W+6EjtsVH4Qj7HjVAUVVow2=Lx47=07l^m zuZg)COxTO6>gnOAk`ngFzJcDC!oEHBzK3)TMCd76JG_COmoE(bgJDdwG>_tY#U+XR z^L~Y%Z~#Myw(dn`@Gh%@AKSp+p-0<`5DsFnvy5(PK`a3lCxDL?!}TbBKfwaU2u#3- zzqL8R?4tP`q2566&GQ84g|-l+?JbDvZbNKeC*u3M2(C_Kjt(+@u3_xX4J$TjGw>;CfKyf}%9pm$N$_=Cy@#%w_t8#!>bv&<{rB$^I*%~+KqW|E*2U?M*pnMRf8>*t@!l2QQ3FO<==FYiwrBBw`6A zO>KN}NKUl8clW8`gF`y_=$J0PkT$@V1m73ObnwX`ZG3!47oSU-V6UPz_NrRrb0u4r zC%(`O!(Pi2M%+@U#B@M2p$|$yEs$_5#Zikq95&8kW#WkWDIB)Q!9mkgIB1-McP4U@ z(VZTP`L;TAGSaoxHDIuR@XgW1fqqmrG@z!T39XE}JtZ}$qtQ$!rDG~433U$6NYc>2 zL{tRMP+@nY<1lOEgqzU`xR-GX7t+#krr;E+LcCC(6okAOZ;X~@Vz4wDH3VM;jcBy5 z7bCsB=18b_O!O5f~^zpS} zk>SWXl|=)rL@*Uv82o#2=R970`y9#PjGsP)Pe>fbPXu7x#UD3o>+qD%8H$O}4v z=69ZD$1)&5uc89w#YN&!p3JN)B&8%HEIb?mG*@4KCl_aD7#kTuMOhiLGBS{oloStQ zDAVG)2!3JUvIsmzT4A_~DI9m9a+{zC!59m|H`^#sO*~ z^xo{xGYN}=1r6FNK3RmGeR}pkK(CmlS4!wri_iLv} zGK|FWQKU>y()bo|YGDz@%j>9IIg7G|O}OP&!Y;LtAn!x##oLU8HwnBu=wN*8ynTnj zxsQQIjJgEU=;Id{e<~Q*D@;Bo#NO7_3t|7M;-oa;i3u|{9e z;NAjn;K@t$^Syf>JVn>N$LJ*Vgk{(LCvR@wBk*oMAn@*^@wx!L2XAikb2busjWoUg z9rVO#d+riy&R#${JB1QP+rou)TSIe3S++-E z^nq~j!ramv1}3yTIVF6sM-uPtRli%Y4)mPbPRU2F+#SWnt&@VDHCILdqpK`<6<$JorTTxbZoPi*yTodA*T?NDe0&T48~

-ZHbVL$vR5#)2 z!YuyBlY9728r`>-cktrzUEF?j3ojX2FJHZkTi34Q_J#9UA(;O5;4bSH{(A2k{&?de zes}94u5Zp_W~c+DX=(5^w}jd8!|+s7fG__K0h;RY)z(A;t*qvRGsYq!G0vzukyn73 z${H+lqrO06IX}>i)9r0E+%63BdpDJqqBJiTd8e|Go}MO-I}GCY@uNL^d3lKkF%0zd zp+vKjm6C?!aS0qgd<6RX1{h`J7akXest9E!4EVIMh5>C)o^kkq94${qR)pT?Z=rXX zjZ?ea4o;S{YpzJr}2p%+1@M)3V3gaauA zX#B_!lBXt+G&z9`#@*7@O%%%Eo&RON~$ghP{b_L3o&M^*N6ZemG+-7{dag(v~ zE_(0Z!yx1H@WVIY8{^*<;P)#4$A$U_sEJ_X?a^mn|Gh>CH*qNzfydVl@qPXgc-@3t z2Z7gq=ONlyt#=-X)pGj*>aX5G?d4mj*}aJdt~U{Wjn`;;1YZ-a^bPcG5PB@Z@^;=@ z9)VZ2c@7nf^To8h{MlvJ3jdFb$R1ln-q_c42&SHL%Y*6fV>`Ir=!nks|N#XJ47X=(DK}1!W1Gv&)U`+rnEd^ zsmivw@0bkU`$~#%Q^!Zg2{vgnd?;yvcaLb|pY|%_H+$9b{$722c!2jGvc`wU?eT$( zJwBFq#vVmKe4!MCJ%)sydkK^x+MpEP0eQb>D0ntN#k~Ovj+KzKDZ(+U0vr>TLRlB% zo%6FJnCxssdtDW#$Hvjs-i=1~&TXyK+RP*rR~4aSpc5V4ouZXpoS48$c{v9Be9%j) zskbmj=Ls7uh6Z3aIT6#?8P;Lc%#1`wKT&4 zVV~cXi)ia;Wh7m|)W9HGs%vob)>S-ucn8yc-MGK8ju(uqf4y@XFA1*~cdp?1wQIP3 z{sLZp|1IvndVz0#_yNzJKEcaJ_wfAcMXWYg<8PNY@t55-{7le29O=L>m)3E0VFXPj zrx51l4P$i;XvrRfnTj-QmF3{7sscZCEu1p9#auun&c~%;J~kN(S$S9}t-uV8Zlj|C z%Wbuotf|H{;V?{FYAh^55hHL;W;#xuOhz=#Q8>fI%hw04Zf>w;WYyKyfufu|WF(~^ z$!IHaOoFg;5=~Etw!%;q0ea>F{0Kc=_M|duYS=3+&A1>VLhmyLB{AL}VQ>78(9`x5 zhO30aG&~xXF$u6vNcsCdJqLDNj^feVLfW1%T%|&cxPgRtP-QiOt7;HN@I}zfBkLN) z64t}{y#yc%j|Z^?=>69K<~AgDbs(9hozmTfWPXQ4M*cYAU7FTh7^G(p`{~2PfA-Cl5GD1^jrgi;TswbV=HU% zh^?uGnK(?v%-k3zW+spllE(oFe6UvzA05`g`$vuN-f?5RdrS}i?SLBoX`d>gr$Oi$ z()4I|hwSm*F(Nqv$l#=_S{p$Be;I!GVVRRiLv2sTs^yi zU!J|dFE3u;#lu^8_3S=gKYoITckbiqt5F6i$xa9r9 zQW(TsG{20db35V?jJ(Ntqz#M_to?)@jYY6h8fGXhGFXVtU7d(zTaKqKCesu%rxuVi zwL}PP;biYLqM8O7ZwF|eBWS&J2hEoVkPDa5e*FeIZd^yt?OPbQdmlpt(g@)+Mo^8i z#t5zvp$NEPp%_=+?4u*h2)ceo;(kI>*jLy4_!)YH0~NggO}#+xqvz=53WRyQ_ifPL6Hw~V{j&*Rax%XqkZ2Y>$N7d*dv z4=*3y!w=7&<4-^S7Qg@QCp>4PTi@8iC7RziY|u@KahRoT{=77fU#_p`^t<)S)0R z|7K7!El-9JOJ{UXVuUa?HG2a*ma&;BEbZ-}rmrXN&-;RA_=U6_K9k|PyebZ<>qA1< z42ousP_l4_y1gf~T?3%!6-L{Ofq6tSY~nIulb8j&lR0om%Y|b)BW`9M+!=9w3QOTv zQVxGwc|dvP-)%3nit+Vrg;h5oqNWjHl>}c!JtC`{5Zlm!#Ad>-tq*CP1IQq#GW&k)cFMbP#lX?0N@gdZ!it(*6E^M0YaxZWv3Qi!}Y>l-vkL5$oXg7GmnhX|*?jgBC8a2Tn4e)_}$@)>Wl$5si;8DtKv zB71n1U^s_?E3~b%moa?(4kqv2!`$OXnBa2i!6S?_c23jG#_v7F(CtT%3hyN=woOQ^hX3-!BwQ>iGPC zEIv7)C_?Z3y^8qF9wod>yL|9r?0|9sdC?;W)!@a*tDRy{W5(#4*%`Q*nTF>jCAgZFh8Awv?Jlm^;`=Y1%*Lc| z7`|l#|Bi8Xj)vC7=rTAqfS!R$#@ki=&wqJL(4E71_HK`FJ;9%U`Yj#`5WKpJ*Z1$? zuiyQE%V*Ew#p_oz!+Y4-SivT(_P1wN@SpcC4LWQ@!sn8b2-(PVzv0GT{y(_>=l{U@AOC<`|MG8m{(k}Y{^NgP>xW+m!WS67`y7)Gzs8UNKblarWWuS*+TIx8$PF_+|mmRDcjwxtBP0tj*!4Yuw@Dj)2nK9A|gOJUI18nBj(9$u+ z!J`WJaE~mbts*|yqk#9nlE=GW%D*|FaYz#%A2Sf+?r)Bm;-8LM;N9aiJxP0fB<+aL z}LS(&wN5suuHHcm^pDXRM={Vhgqi7SanSMP)F@^aXL0W0 zdECE#6ZbD)!`t z^{g~J%3zh3;P*FoaB+JDvkMcLm~O|_-Ddpt_fz;^|Gti2eq5ztHlnw;3s*00VvBIR zxwVRgzD@$|BJNzdh?VoJxc>YWZa%t(7f)~FcTex)=O?#u@9qWMy1k9l6O8pk?da*M zLq>KI3d(7n8>`q|SwmTTGW^&Of*3QyOpOr729c+$gElh@EVz1L(ajUf!6BF-%*KO* zF-)*E1_q*r;42Yki3tiuOh6#Qf`Z`BrI&ETRsi0K6L7G#g+2kMB(K0oD=R|pFl|p$ zTNm{-7khh0S{@q}Y`zfN~ld*e5NGPx#!=BxUfKlyFwY8|X=Dn?c^lp0;-a zYBn@ICz_sn81%iPU>uYv4pp#<$%I|f8|a-#&xdPf0X$9>!8@)RsW-d1o^D}oqVh1eP*p8wl`WTumnxdno619{^!NbVj)SY4|K zJzoOZucjV;b#?HsuV>5^4wf)ZvvW^iJ5L-OMau9bvS>MJBd1Yw<_2b;{|Rf~|2yWN z|Ahv57gftwP`_~v@aShR+QMjT9#pFemgZtc4kWgm!5SLlaBLD{0}=uO#r^7t(l- zpnLx-1uhlv8`gU?y^oLRvAvq%6DdoYofZE1s1>1ShYzHj@QIu&zEJSMmrB0)Qk~G# zPllvfx>ypXX^^(cfs#ul)P38a?Ar+i?+&Q>_rfG<1a`4QaE$DPb7&*pnJ$RJg|<_e zC?`;AvI)F8^mevlu(Ja_+^7bc+R;(dg!aZx^s|9>bkh9V+HkXX0J{ZcxR87btI;V~ zOv}VhUoTb&w5j^|UzjYhWZV-Tskh^WIjIlYGkBh;Xj8u$7hN9cs3+rLAxJ&EUijBe!BHDq6P`aD#DD&64!{4Q54UbKqpB_)Wdv1MV+Bf6qLJn2fszn^OjH!( z-kH-F>}|l@(g@D&tl{pxD|mGO23|gYgnQ5K;r7!zSYDgM$XE~KemzR-iqJLGgL@C| z;V~O#D>wcyXBPz7SR>rp9C5}5$kWq9GaJV;fw$!ofK@+#%=`IafH3P~bZrg|MJYj; z=It%M)|)Zb&Dj~wv{+#}E@6U8CBju?-)#JcXjoRjJAvqY*2#f zy*UMqjf()3R#nAbDJgt>^cX&skp3^|sWRSbnL^gUmf&-QiiI0A9sHo<8UlT>5N;XCGn(izCcJ8=6^+ zqK!f|BbX&D#dSUwBox;J2)ZDqf?zHKYikfe<2%LVP{$3tWNwKOcN~#To$#l53efYZ zs)cV&4SZ{B#n%ZSjqIddGR?%&_|nFv2)-%g5hP#!~R1tEO1m3Sze)R^buG~Pw(} zBp7yLTQ_1F+Ywk=jrgW6gjY4fCO#FqexcCzqUE{K@+cZ|R(9f4WKxW$$Mg(vK%Hi% zqm4rEr%322;XT&7U#Z}|{Tl3~jqrtxHNKE{z{fHUcu&F}zd7!J_obZ0eR*Gs(bgY( zwIgxRFcHU1Qz0QhPq01f0z$7AY69&1dZFx1)AQ+sdE_8Gl1C7nHh}2#ZoCs|uY>=R#y1LNJp0T~V3%x^q7#Z$ES4{(nUh=;_a)r{@@OtKYxzD z{QL*}^~c}gx6fbV^~3vkd6n^#QRONd@tK@lETyNR%F`9?K>@f~SdNFq)i|4;j@|q` z+$b!?V_L$6sVPj3j_~i(OnV5-^)B4LIf%2{wPLFHJ1G&bA zsIaxdQceaQOwv*(2Jqm_GS1FT;r5N|c+AiC{M+yF?DaReaPcDM=cWnNehiKFV&lSD z+q@IeJtCpPRdfIC$LB z(wde>;IWYjrYFo=p=V+O2_+?bB_Sc2o&dd1CFMov9a7Q5aZM9Q>smub-wsNqjJP&F z&~gfbu6qQG{S#mjnhLwPQ#f&wc9&HO@0@a08GLff2)lCl<(I*q9iD%FDFO>C5L!}= z2*NI^vJo+r&0OEdf`@;%&j&{Ay}>yAA<0)d(cegh>v{PA`i)>a%Dx zQA{ZT)s66}sD)QWHM}dU;LA4a&({SAN0^1Nf$U&~BfCjMLx^EIX}fkGQ?GtO&H0Cn zgV#{F@&M`6H*j*|I?|?YA#3&lBk^sdP27i1^=Y`5OrU7~7UsYH?^yluf1sZcwnv!n z?%rLr-@SvjyZ6v~_aRzuKSt|K0`JCCG~M{Gadr*Y1=zi*+G}@EeT}fYO4zXk$O%<; zg=WV@RKlvbd>yTPUB~qYXxY7o>a#acx^@*6XBcZQ&?+xKL;sbhgx(_zT)Kmv?JH>8 z*hU@WZ1MOMvigURM!QRB>p_SxXJt+iy!czjH+Qm=%Y#91B(!~lpyBQVRkmM6TSvx6 zD@dDI(DF=hSW5@{RMl{lY2%=ZaGIkcjw`9b!rloX5mB(Ru@mRZGq)h{EX>3SE_;dN-Y@s^`daVAQneWk|1eL;92ED&aM=S&J9rU?1ZNOAdRmNS_1S!yWpGL zkCR2?D6XEsJFbRmu+@`?qqPoFQlgMkl#g;YqQ=%Xaq6SK{vLD?g6%zwcEi2sXY~wp zqPDRTMfq7+85zRrzyMYT2T)U(hsLH_jLl47eq{+Q9i5n(oW=Imd2DWNjOEl6JYmCsd1ez&*--C3e2hnrUgFu4Z*b$v zEv#{4TAZ20`s!&ub|2U7+(uh_JN*27VaNAyb+Ut>gEfMT^^s|0gnC-ow38dw*mzdF ze6bo3jA<`#w7a<@%jpEd?d;)UW)3?;BbXT&!hoftuMcfnq88(^x`sNmG}WObOd2XB z0Z9pAW|gCmkdze9*hoxDfgRsN*r#V>V=K-;YeH~p8W`Y+oE$zU@IF2&Ku<=5-j}i} zgq|*rX&6IN%M#K$g6*B4?Ri4OJ^;FI;V|(_fMr-3920WkmPVt?sUqB}S;Dds0R>g8 zN(2{Gv1oX2ffrNVjJO&CjTKkjDnc*3qydoxo=_3RjR-H|eXIzYUkEF-f)Tcg(5Y=j zcwHN#XbXWyzzJac6>e`UfYCIN6;RE1S|h+tFt-{JVt&=tEW)g^LOc&Rb7TyKvx`U{ znm|-5p~s}+U0wsvvPyWBSHQQ5--Xcgt*RzWX{P)Q!&;kZd|gOqG8udJJ!&u9MRe~H z!aG-yvv?o*E6OUqJcn8I(?) zM&bB04Q~)98R;V%S`fkL@19%2_?Jld@&8O)Hw=x2Za^rsy!@f+;sGW56SO>AND1@g z36q2B8RC$JHcd|x`&BjAo;0vmMjo=f9Tpvj6RvLJAbD|uP?j)+%9I_mmVr4ANvY!f zFBxsWl)-O4m&3cCE7I~*XnAV*%^pp>cR&vx95Kbm64uzG(wwsu-oEHK?cl7t-EJ>&ns6+lrRXW^{G7 zp|hhE1ET|IZEwcH_!!nECUI?Z6SKp^7#khJ;?e@vHr6pRGKz`eaV*cC#_emj@Pa1y z;K^g$es~Yx|L!Ne{NXj8fBzbP`j@}pcR&7!AD+L!w+|lT^|c##b!Hn688@yrb>M1C z2j|dYARxLKKihP4&`&cu>mP_YHxDdR&wtCK2iV|xdRiLn9qnP`Xv?JyoNdhEVW@{_f~(HS8I!(& znDq3;psNSE7-L&pTu{L18fIn&H)9iaIQlSW8R;89M@JXxTG~)0j8zyPg!60_S#q*c zkdi!(tRy4fWnpP2dv4X)>5@dqul?oA8C<3mC(8?n`b1LDVTg6Dsjf{^4 zmNA}I5`u(ga7{D8)<{SRM_UOvmcLL0oG|RfpBe03aB^&ebskyM zH<2}SkA`;-r%pdZ;rcg(-`B`qe1*KFZ;-d}68UpaaccY)@+PmN=jwOZ`osT;j%$xZ z8*9ArW|&DWmo+!;F|OW6H4Uxm>Ky`(CP&bf?Fx{it+7f7yP}bmeg;e)8~BxGh3pn_JFU66~M;v7`em5Gp>Vr1><>LCEzP+ebxiIEY^ z(YQv~VCKgraAA8J8|!PBCxk`@hs2|%7YK{Bxzo6OVF#~Yzs94dkMYfSukjPX_w%2A zkKg|CGw$5JiRTaQ<97t(_iQA8U@!P$X#+R=M{%`p2p4FR56-ON;e{<+*<8i?@)Aae zN6}g<46JTIPg5hI*NK7VUX-MlAkZTip?;xAjEzCCn;Sw+Y>;lixXB-N%i9kNPA+Jt zruPyyD}>Q5;WVgggnlz?+++-TwRHxU=7jTqX?3U1;?~wBTse0Mx31s7>u1mK;^u8k zclYw|c*2}f)Y`#@8qcMLle+`N2vdHaYlLYQGi zJea{aZAJ4_*4DwExP3kd0_ZY?N zFIcaTzw{hAvkwWAiwLPX4Zo6cr1YG@?6be3{qj>ZUlGonxQm+WcTh#>m2+8sgC@ph z8NpRb=#>z9MTDJDg#=zcVJB39uq5R22)+D^TwdT(748BC&6Xng$t z(AW(^qM;uY0Sym7(ef1R97W5MW4k&|;2mQ7JglyPgR1Hx@D8YIVvno>qhK3p4_Hp&vd9t-xMFyaT4e?Qn{0ML=Q)(h5fLPMV21e3VpRLq#=LRD+eS8Jz9h z5f&PYyu4yGHP)l4p&G4?b?6~fx;p#OPZ0Idwu*`hk)M-;@xft?_6%T*a2gvN!Sdn~ zR+m?>wzP~(XST4lAdI8CfZLa^;=6CY#g9M!fZzZ2C;a&BYkc?WC4T{Ndv|Fb>+|EdaPbVLRu)mxz!=`#h_Rsow6OuV z)YhWCu><+p1&9dngSU$f;$!_0ALfT>M^6;l`(wz~8<+edF=clGRSL@JH8#bHyBE&5 zd0~O~joR3w#@HC~I_gN{57+Dyg!M8;>Yd9pyi0iS_z}J*xWB)46YCv42yt|RJ*~~k zN;n0~6jlVD1Allg_IN?;>B6}TH8n+`3GX8a9gPiPZKw}R#%nWLrV#<9r>PB1buFkc zF32k>LsmfnGV*U`Q<0I866^T!qd0J2KbI0>3@#`vg5wD%*x1>M`|_*^J!@Ng7?_wr zo;~qr1m4GokK)6_$ME413EG|vzLHVKAtfywS0nUjdU6DwBIB*HnJd(+JfU+U2vVg%Z|%3Tog-)AJ(?{rG-<1fL&YFM!Ff>do*H;h=+f;DNvsOSnzgc@ta$ z@I1Nn6qo#4v^T-_gz~2Oc~?}41C~=6Z!<<_kk~!U$lMHH+MYkb>CadjP)h(-mLs@< zz-wuzK{g_5^fXH5F2eRCt+sLk(M>Bz>$`-U(Q7z0b_H2uJII^8isGePC=_b(7UDYB z;9EKh_fvgvPVYcO%?xT*?xXeMGc;U$h??EIsJL<)<<|%)wwWTPm_nhh-b4Z0cOD^j zig3#&+=OKgVJB49xm~fcS*Nylk;BS8yNf*GER5}I$X>XB#NiEi*H6NyX#qii2g1dYpfM8n-yPA=6c1bommWB%qQ&H(gah(QusFda6%!2;wtahh z8U&zz&6lr{{y@cLDTHX;uym!rv+TLuN;ZQOShqOTmqz%I%XBrCyi)1KSr9;&&2O0#PhHDwrJ*uGYT@Q8t zR%irvLO*f{I-z~g7S;n=VG+~<=g1BOCJ*49L^U-8awBt7R)(3p0?k7g4t7om4~s%h zMizQ%tI%0efYRi68dMruo7>Svzzq!!h*$9{CHPv|vrdkTV~UZYyS*FZG{2dd8JgZ8 zHcp?$S;Ft?)>+)Ua+RR_7B8MY!BYb6^@9ia`N>l}zi|_f1?WAvi*Fu3!K>T%@Z+6_ z_?NH0!;2fYvAep4@%A=M^>m=8xti}ijK!@rls7kEbZQLUw9XdBl-9a>loVGYKfef} zfnMJuW+;%6L#d1c2DrXSou9O{#k`w4Dz!D? zzvnaL8R(+l!yAj~S)ye=zI%hv{u%g@$HT-BzStj*^(-6W(;Ej&CJ*c7Dh&} zGBAK0ZO)NpPn)!1Sr{6@fVcGw^kJy014C_X=xJ#|n?0yHdrc)aCRuq!NHe}l%gN%n zBn?lPJMZv89AmUSbm)*crIZt6vtW7-PL6M&Crlbj=vgu9>Kd6qh7I#m5qbpPp|{XG zCWAeM-eE;TPt^cYn&yzzwI%eNpknR{4O=hhIS0bjHyT#KiExNYhig(UyfaG}M=KGS zUxg5wTNr^SnjQfcT=u4fBY(aH<#5j_hI3|tIH1|}R53jB86^ovA;N}MwIYnsH-sCW zV0Yd$Kd}f?Z@#BbnJ}z`C4i5n=fjloEBJ&c>s=`vh^P@^=TS<#W4SY~x|de4%4vJ$ zZ{b%ZK0ms%52?db$emt7=Ey9fTYC{uMY|;U0tit7c!3T4{0$9^d!2lI0I9w6Na)+HX+%~}Bf5TykIf@*d;_Hm=lH%GeBTv>)egZWqY;kD{2tlea3lC) z8yC>H^#nC%A25F2M%m6STHQ?)U%i3+%Y@s`4sv%1QUP$ZyF5O2>hg7DT)d35b2~^q zdkLB6cag#M%|Oy+T20L+IeK0K8Lu06~b%=wz-`!%VK=1okln4Qw0j*01hiG^Ql~r+ok#?_~D)tG3k;w~>;9xkpyTX*6zNN4&5PrfzISX?e z=;~SExQrG)+0Qsj;QeNgGX6=hyuC`aJVI}uI^H|1N8lOby4Hm&2aER!|JJAZV2vJo+fT1ooPAxb%SRpVh5HYclC^(sbx$08P zfPW~gA?Ew3!YAOSQpHi+iNS`-%*ppL-l?;k{8-vA~ir!YJ=ilLDq#^Eu{ zjgR8W)+TP8KZjdaF5~*{B|N@)6HjPQ|Ks6v{KOyq{_YN5K7E95X?{0$ckz@F_oo-H zaPj;xt!M$z8Zih=GPbT9_BGVW9}B9(H3Ny|t@;>j3f z7N#(wr=fxyM`w8xw%EAm((};k<%42I$r4F<%vw2O&cYtO+6Fii6pU&u4I~}fj}B`~ zECh#OyD%TGcQ*0EliT>7aK5v=j4eWVq%aRr?oM#DG=`apK1>Kb3)-A;P{NqeRVX7J zUBXS55mpy^ItI|xHGqbW9<)W+sY8>cuBHwZ#sx(MHX>Ph2$y%1l$OFViQ}T}?cKkZ zvGxezcbM>#Ktx0o($X_v&!}u|Ya>F>+J^9BX%no+3C>Rr9$~zt>Ai*Cr^jXg4!vV4 z`jF5tg|w~}6pbCAYT*J+J5NH-A12=6w7nQYFBPtdr{JAYMB6JP^n`;K)ncT5V{|mC zf>O9<=fgfV8`eqbuu4paO|r0$C>ze%MR3nAgHK5fH!xx10pVap2V+$`g1GKU0~E@u zq@KkkZQ8qn@T2MZ(Dc0d8w%w`fO!&j9_6eu0eIDLFCp+4$HXG|-1ytN3E-pUc?$da z__~nhPDa!zn%OGyrdN>CH_pG!7+1lVUPt)V@w>3o6rycdV;kbzhmqd9fTFQ;h$TP* z^7;{+KgeP%EbK*CNhe}!y2XLJX-qr$6Vq=7Kr=RmmhgQ~G1`|7!#TMTAvC|*)%&PA za}O2UcTjrq7K$%jXMDYi{N2mQ-J!7&a)RAu2ykPio!>#q*^4;2eF^Coc5!lx>zfyl z%1Yn7h^#HbZ}Sq;RMC zw)7*Qv zR>u2ZGTMI0Si4t61mFAnl<}TW2Q_JV`grd+El{SCJzd9HNHWJ*eFplbmNnAh75{6(D(+|C{Zs-Pg z(PFz{5YPpqfF8V)psI{uHC05|S|Y^b1ft`@a56g?rNueuD$U2;fp$FWX~bM|I11gY z(N>m^mYQ-b_jhByy#?Jg!3Or4Ia%3gY-qxWV0prc|BLe&pPj_i1d2SmoZ{EVoOPBGV_nzWk?>@$p%e%C|ukrNxV_dm*89V1L@b-5&w{sDl!rz*k z!1n1GT$&ugLUSF4Yf8m4Lz*heG1=3LK1Q3;Dh>lix;njA)IS#VPz>o&&<*a>U#P(BqNKD4;;pa2mb+jG`+8+m2g-= zlhD&+FG?fSvxJg~Jyb1EK;6cb@zxtgp237(B_=oKI!w+sRK8 zqjhQ)Y?ISr9-jW~@HuXTo;Shd#~3W^vkR)GR*nja0%qmq%A zukos8M6Pc`T+b-7CQc)FY6Tg?v^@eVh@GNvK5l4pGfl5f9Q+*K(8eg+hrHnx%-;P8 zxjk#hY+XW9%_O3V`w(8(PN20QzPcHi?VTtb8bs;j7@F7DFmT}t+E*_ky=e?ysdcc9 zC`NEGQ_zJMXub3t^_L!^Y@3EhOUvE5ggn|?9wC==?lOzkcb;|ODzf-K>5Q^zv^v4` zQV6@`jdMs^-$u&%Iizl!N80*%q^@owm8Kcjw+^?W0Ya|^I#Go%KUvP`--gioF2vJn zh12|sMyF6j+dbLL?@-!GkT=2SR5Ls?>*1GMi}bc3@hH1dKom6G{h@rq1ImtW1RgC* z0G^=%UyTi@)BugFnA`hM&KCiObh_@c89ZTxM_hl$5R*;4AoNS~e2-om! z!_d$ORten87q+pzx`?5kCaey1pgk%WEhijMt*eVF_FzrQ8W?80EhnflH8jxT?t#mV zjd-y zjE@e#f!-bwdKx&YtOIEsQ^*+-dS-S|wxI1V+!$xw2sT$f?kc>NaasVR2SF*8 z@O;4nX@qZzkJA>t38=8vJ|xq4GDjB)y*VUw3?rhxQ{0LFW}tH;VmrDK+uDW1)_!F7 zP2=1*e?|4wHVS(gi<>5pUO$Zd&IyzbOrm0B9L)=J7(Bm;>04K@c>fNTA3VbHy=NHQ zx`FtL9=Ilz!z!u()(IsD=WEMXub}nX6Es}DPxxI!(dI6SwyvOH^D+w0UPT_4nQIr2 z#@L#+aS^F!E+TpD98xw2Jwh&t>q!J%;>uYhE^i^>^d^$#&mv}U4dET9VV_UiJ6Qp% z^a?m-S0bvu8%2!sV>>sobo(*Z?mWZ7m4{fo`V8ae9wNJW8Ww@+2rXzp7NKV!od#`> zAZWPyLeb6zO7dS2*Xo^Be%kV3*l(3D%+meXb}hxpA;mWtU_UFFAB=~5uerw&#+S1`d7frhtTt_fmu)^ z%){DX8Ql$=_yO1_kHR5!g1^BCEMkXX9y1KH=wX;d471%1;hk3RKr}=pprfc1oed4B zudP9SO%)BQ3`13=xX@aOR}<~{X}$;Nn@b31PBGqMB>G4Cu`oS`)zKlWcXs08+#&&Y8$Vsz#kZ`Vu3pFAZr{V7Zr#Rz{`eif zeR?0)ZtUXaw=ePH#S1(Vt~ok9f+COHI!_U4E()P|d` z8f>*x;GnGqM-3ggY8t{`jj@(Srb&xaqDd>Msz6;y73%UTP?hCv!cAUL9=R8eBq&0hl}4Cy%2GJRhtLyD&&t*o z#)RG*(~}dS_dcQbK4b2OhmPZuqtf_FQivq#gq}8`X98J63qsEp%9gY}Ye#51xwGda zOhY2!M39|`Pr-@AR5);HEnJzNMrRrk%NP*_{g7}%FAAo-&w_?0K+iJqBr6pbESr;A zu%q!gWfzLpcS6|TmscW|Bi9{rO5sR@bfgVB6;{Bxq!KPPHdg_1v^!Up3t{HUav}6w z2s~Fp(~ZD$C+vO&rW@~fE9L9nzV^-Q2}Az&{&m77CC8A`KZC@saU^z(h;c8phR~~T zK~x*9sIv!gt)0jePKWCp!u*}*7~j2*N=C%W;nQfI-Nf+rRZQ+&!`Q`3n7+D$)kk-* z`Qj0-{`>=O{O)(y{qDEedh#8bX1CBVcb+D=jO5lagj99GC9e)njF$d&JxJmE~? z!RbpVT;4(P8UeUY>s!8ntd;Y~TBk7*fXR%o@k^UX5Fkg`#d8_Q+cAq9h*?-i%-jZI zCpQqvIPTXv4xff$##=`A${v(WpGE1&3c~a2VeKCeJ$rwcyGO#zEeg8!A+Yj2iSSd6 zh#>Tm8E*}J!lCKn2Mt=jyp;04>i9hGs@0^bF062|#OT(()u^wejg*1-$>cyf_e9IO{?@^J4GY2|^Fj@($DT zj$7aZNjrQj){iti0KyG0-LgH)S5LCfLQVkRT z8kh$+!78#7HZgs$PZ-9DlnFQ{Prxo=6gF`q?4*Zb$)x~3qew!pKQab=d4=eyZ$M9H z7n&GRTbf$Li9~zbn=#f|jitdlT$t&=@<=DV+FxF^p zmxtT%sJ{+BE)C%7f^d#(69)O*%Ax`^Gs0|dZsE$sYk2kK6`tI_jpeo$4EXq=SV@PpxK!lp~>6~8;t2Mrx)1m00$Ox`i^z{UQ92UrKinN`e8tq>L*jX?iU@iaBzG#@eE3j6eU zTY#Px86${G<+FWP^41>cM4+y%D21W8N>637=;Y8T-cgViMl~F#aT? z1tCZ)v`Nht%Qo$l2tivKqjfqfBOf-|MX(hBSU~Ue zESD0&3~S+9Boyx}tb%h9Ut3(x$16ohx>YbDGe&z?36qivWA%pNU(tr}x^5)24kD7a z7tur@vSSk_8cn3_We)it-3M5?cMs>kevX@e z_z8FZ{0H3r(=WL3^Y3xvr(baUcYnk7vmbEjn?K>~*MGp|&1V?7@dRz>Zy{%V6%ma? z@Ga|vcX1bdO1co+Hc8+zdJZvu&u=4dbSP=_r zh?rkR_}mI2=T;HM=pH=00H3xA1h!10c;O}rM|WToQN~Uz651?H^B^ecdq6?g1uDiK zP%-s{ys-;xLXzMTmj!*#5UAMmemhslumhAdw!|@g6C4(Sr!8JKbPs{|6+7xZGKz$r zq8M#;jM&ZtLtyLZ1Y<^8BW^!N=Eg8I7rhHOb+UXaVKfGcb3xU#;CXIIbT^70JMFHB)| zxEE)~`fzuC1g{r|@cVP~xV14sV=Y2qX0mt@^KJrnk)P@Er5m_TZsG+Y`Siwp+`MuP>(f){%{YmkNPmpSd!xz47suknzyhYq>QGl!f{Zc&rz`_$MV1`FCPT=vj!PfMG09_u z*)j2Y7srkebYdM7>&W55*uQ^24iI>IxfF)7xO@7-*FS`IXD@sGna#ZqEQFXm#hGe}ik^{fyf`{gKO`aOveM?7Vt~+duz=M}PY>p8os4 z;?|%3MDYC$_x}2CxbpL#vHJ8|oPP2xw!Z%pwtxFqoc-OuVExCxVD`n&7`ponI(MI< zX8jflr_a-Vt6`B+0CSd8K^;7+JK)>YkEo$}WG-Js&Y3GnB?#l@HW4|qfr#mKgwL!a zjLXoeRRoPMAezP(N+XRNTt(UX10;8ELft(J3YKwDHIIghQ3#X`{Gn##2NgrYPWJ?) zH0>avWs5`VmQXNvgU*RS$QV07-pqkZ8ywd&#Sv{`OrEYd4sWl53W2A9uVm!0M_L~H z2)zS@o`Z`g0@x0W&CFn6Vgw6YOX!;z!ql3UM`#|G)5I4CXm_8=iqLysgq|`jPnp2e zVEWL(2Zs&tp)f>6$`&6=zk%LfC3hTE^Q7td5~g8ra*jtpKo;WSE0B@ZhV)aNh)M5& zZD=)&18dkmYsCF|){)(?6QCDA3YX+5I44aId=qd^o`g&46i%F+Wb7S-ZPF;-8LOJb;n05%l%$6XkD+gL z9CJ&{*gkg-XI7VKd&7idKQ;)d+vm4%wyzuOrNwyE)s3GQm+;fdI{tY13VykD1K(fY z70-8les&E%U)#kGtZ!~y#n$2+#`=4)!-#qF{0<&ozJa?tSH&Z{D|2%=Gdqv%**Prr z_uePTs*&v`|PQONk-=;hcG8x_07o@_56Vw ztnDxq8j6jA0!*Z*W3H?e_f9Y3@%AOy(&3j~+z4 zx+>yndY+7;c6$%Ni?$c6XABP=0~p9FKwU69Mp!9%8k`KxPD+BY^%x|M3!o$LjuLK+ zuENysLJ4Lk9I!Y*I{s!j-F}2|YCnTj)8u!N|o6 zMsD8FclClEV~duH2ZW1&YrFeE+tW`xElG#))1?XO`Ui`rD(TVqgnfHP;W02`>@|*z zgK<=XSSHa)Fo{WqY3xau#WC{6r@<;Q1J+3d-pL$TrscpYGY__mzV>;ga3HiCiYnny zTn$G7aE!7}jIoZiItK#IzNiXzg_UC23(qYqg;P-(PSEmPiWqB)8F@<@;8D>eMq97i zX1=}x!NRO9jIR#DOfl)jNUG~YYReE33B6=mURM7IvU&vIt)j4RjFEN*mtOpchXmG* z*FR(T$v3$D%};nqxV`+-U-073f5x}}{_lADZ~uvFfBY-1{^?(E{lEVQ&iwo*bl-e{ z^36*q+uB9l&Mh=v5vG`bB+jhT|L7ZxzW51K-~58v@BWB7cB&cEiwJD$BE)N9l9~^V z=yd2O74Z8t!@qeP;XS7jH%jxHxrFr7H;}b<7wIdvku-k=5hDa4VHeZCj>Nt*Xx?}R zecwDt5p*gRkx(=Wg0hi6WOdviqkRGj`mS6*f#Yhn1fK&ADO%#Vx*ZNFTd)JP6DOaL z&}OXFHo+k^TAFZr8G-kOw5&KM<9=Cre93n7)iEikvVF(KC&SL(Q9MUnXop5-CXBTf zFlIYc)-u9ZhZXVh9t8sL*E27K{dr2XJY{@*SOXs))yIcNjq#y`1wNLx#V2x3_>9qZ zkBSF@>jNpB0BBjnz|1KP4qkb153YcBcpU;`+Yp>OOsnf<(&%RLXoG232Z7fQr}#m! zP9%=NDRGpB$4VZDL&_xVl4yMdpL6OA-f76qMMq;Dx_a8tJ=h~oD9}g48yFz;Iy%rM z3`p+nrSS>(4`FC<1Ox0HI~!VPbc7wXd*<{yW)~J2Z)dT7dJfwg%UE9F?YU{3oo6E& z8N}u0M%-;_!E?rl@7SaM_S|`Vd+scrudm?2+!P)$2H))M!8QJ^=QP9%3$uh?55|Xw zu(`T{^|@tSzjzgED;t=gF>cJvp|7eMRVm46&C0-1dkYpjT5)B08E57eaP`tv?C#t^ zb8|Pp%NTYq?Gk=lSez4vy|iGVFdwtYDVSuP&dXu(!R4LUNiP-Tpj zlNIbv0G=d{9T#HjQCi&*965BD^%iz4Vc*>Wf^Pr*eb~2euL!*Td-p<;K-JLHWOOx$ z8LiIP)*1!`u(>Tk$HpeitRl>^V#UU0W#b^8*e%Cc@);wtV0!NzJWS|G;7bW<9FUfS z1fz-s8?!9qi@c#Jjlc$4HjdD?bA}d6)7}~C4lGAkXwd#N*rN+GZD@M(J&O?7a0#DTAonGF#`BfMDUpte3l6ruu35Gl25@hmBpxQlU)eA z+)~X*SHqf!@ZQiD{mH| z=U!D0UtyAhre^WfHIJN9_@1gnTEj3Jrq7~ybRKy_lgRBKN7dwM^qtv7!{iD!@4dp6 zmp|hc;q>5#KjXpozu@ZA*SPZJC0_jUC;a>${}W&T?Z4wLVYfrjo%#M}%zyozc=7Gx z%?rq0*+PzRlyzYPX^WdkTi8VU!WpD5p5g5c!s`q&mo`v*_B_hAFN#yU3zu_jxpp79 zi)RtrH;0JM8N~E0Bdl`~UUd_&Dd>TDZWruIhvD8Z58t*Gg!gVCb@CeGySI@&cm*kq z>(Ft@Cb$A1rymG;Jzql47m5b%gq;&_I}>~^1fD&P5O~LwY#^!TAg)WO+dxLg7Dv@h za74`j`;|1rNGnV}D_m6SD?;xRwxj)$(l{t70}ooir?($0EUjQ{YAhbS5H9a%Y+=cy zVa`rf6QAr8Vyzth`BQnk`^B3{1>V~yT;frMz|&$G5_)F%=(r`JXGiFrz^9D1U#WRx zzh(fW3?raqkpL}+3>doR!^EQm#vWy`32cO`u-`6y7B(lRVIDIG({M)H=sq~b4Y2xg zB4H5r2}7{q^Q;Ly%cOBwB#hGXXn%~p@5K0dqaZI6O`T2X80tpPa3A`GtA%lc@1>db zGVTry3}R$x7~>9Nnd*U0d70 z8sBHJv{alV;K|?wUeB)J8IAAe=qPT{7QY^yz@OOby=?Et4kOLgjvkyH9>Ltmus8^M zmS!jHW1QmiI-1+iT3wC&_-LH;bVk0HD{>i4dwF}hBp)}H=CO5p9wXypSUGzRJ&fZc z)5g1Xb+Nf}JM{iIR8cukk zz}S)z*aAs}SupJ^h~V-&b_5}etHA`DC*!H>aY=Y6D8reSWhzI@lahvlloX_d%QXrU z1qerBg&9(W0~3c1;K06pI6%M&XIbn&xF7qt+{@d7+3or2D;(In4-))aDvC)Np^~GNK1L0r>8|$b1Y+p&q;2=Mjgpvv* z`5EQ)j6~=u@q4M5SwoqnWM;#LW(!3tJE+*5fQo|)RGr+QF2Ii#_$&Cdy$L?X>|epB z7aRe-5W-IYUql=XX?kJ_@T2XS#1edQ!hXJVf-e)6$=NVJnGK6{+8!gW^{Ham()R2K zHah}N1Rd>7DC>esSml?q%3z)MrfdpIVN2lImCz7NDn&CqQCbg|vPQVm^gJsY;ak%r z4sZ@*=bTNrl~1f9yKNE~4P&StIfKD-cX0moFIao@9d^F?BUbLb#Q3?JxbWl$0`HG_ z^80_q!=L_&mw)_MeEp|?#na#a89)5%f8r1S@Bf9Tw7y$^_zN!l_Xds{@p#4Js28i^D0NS#?m z76DnzSX;C65Un>~qyEZE6mHx_{Mb48w=cn?aSrZH^YCn1ViewhL*5j$f*T-Zn+*k< zY^YjiLd7(J9a<2i)IG&vA9C8xkkYWl5hcb})^Sw}NT^uihytxn*&2uBOvG!<9#=QR z5mf^mRMDp4sfxh+oRHf?)BEzc1U_L~+IL(EDr{TPi7BwOv4a`YhY4XPP6baXFd_t% zwGFW6h`cyS=(}IYA=!QYcJOT1Hsce7Q>~ssE>RbQ?2imYlCCmvpm)s?|6|BM`Z3>pr{jd|DN85Ac z@&tir%d$)!Wty0Qb@m*r(x+jUHbc{!#XB}yY6$dkLqSO{n!8)kEnEv@upa}xgdc%7 z*w>Fy0!XZ(A@N+>DMsL#nGuYQ^D#niVPOFqjI}EZGgz7$!^YeUF3`wsZe76b^)0Nl zcVH+x6H_^-uwC7N+dacrC@#lJX(g`IHREo58y;4-;%<2(uCl>i>+HkDiAgLHTmu9B zXzlDkZCxF@+uPAtR*8JtSbC5*;_OV3V{48wT2GCm9Tu`PaCvM1y$zL&>2 z9y&^!J91czuR?S^M7SN?zn9S4OW5tj9xlHU?2ZPxhjzD@bzHcZH{VxNRZX0XKsdkG z($XBl#Z|2NeT8)^#tBju5tsFdr&m<(p9{ZqzA|&{J zlBybz*3yHFt|4UjTv>f%$QqbJ*3caC!bQL>Y{YBgD%m+fnbs$OPmRW>{&(;NK!>&` zo}5nb=>~;E_bvGJX?q5=J$;se2)-l{e8vQy32o0jJ_F{7nJ`Pqf?3)rSY+hElD21+ zQvxd*pLJfj2s+FB3b8DB+no1X5{g2J@GBtripyYIQV!eFD%h9Sz@e-T&gBh+UL(BN zhI}d-5ZBs=Izp$9Fe@Dvj%cw{2Lzp_-DNS%YWbx{|E5nzyF{3`Y(jvFMq|YAAX0+G`!VkUt{PRW9-=r zC|f^^!u4%JZ%eefH;`LL^6VNCXI8~Zm|jNQ^b+gz-{F_Y^@OQK#E#D+dUO_1BXg`p z#Eq}g7B3+0%qwDNe0FMAq^{B#zvGbHOxDWQ{??s|-5sCD5|ZhOBNl zlng?lr0-9Ya}XpOiih$S{UVXtOlgHuM;E8EX}2d2&kP zK0G1Te$JHetK}VFT5xdpf=^(GI3`q>;6m7^XJq=u^7Ks2Aw^64biXv-|5Ap)lfyrK zu7uxwrAFWhlL`n!R5WRLdiacn_t`N!d@9LUE9=H+>y0l|1F=sh9EXi!A!V5YCC5`x zcQ1m5XDO6iOCaq~0D0FkXfe{-WX{5~Y!mLqYp_WjXMz}nH7(Dc@zy1M3a)vl;aa*5 z&&EseY~F=?-37RopM`VI3f?i7mVm9ECZa+EP*PfemgWW;UKe`WJJHj@>h4BAqpo0o zy#u{m@5RWlF#Y>5<|oIoFg=H*dB)kr)0m%`!oti1&MYnB(#jd!K64&77MC$sUyHu% zWQ@?NCUY_{pPPsI%sgzfCx6jBf@kdmxKdt^8;u>fJ3N6a<5Soe8^_G(h&ZobNqHH{ zN=wA6ob@oO6i3G(nsyhcr-3*%6*N0Kp_!J}FI;fEtPmYV*_fLd#>&bJx+p+2*v}C5BA~piy)@Jb2*TD%w%!vRyLD0C+icT;d2FNQSKuQ*VM)wQ#J55SBf7zWdF_V#l275+XMpy}&-C+XnfDl~|9>l=| z1m3>=tbHQ*4(uo7_I-sf_I%0uN(A42f>9WvA{^0GR#1Y0jxH=s%?LDt&B_9n7N!K? zn-jV%g&8f__(bTL(fSBHGsasTg6uFq_or;EAF)1Xti(~XRSUKHU++cO~e3<*9X#$4mLG?*l2z&JS*CbT_M+MX%F zXPQ|6v#dgx(fZ67MTIhbTLSbf{ti4sv9O%bW0h3ErmTup1H1A%I1zMi1fDm+C!GCJ zGrx)cts5xnnTK~WjWD7R?g=&UO0GvtQ7`gZ=df|-CroZXz~J&#EMI<#yWjp5FaGc! zcu3%V`?vpzAOG!t;pLzI6%T&u>FDLj+&aTkyrx=3)uGn5ol<8ec#RV|C2rBBE(}5o5Cm8<|GP z@Fao;gqlV`-xPehCgIz~^`1q946Gq~>^woai@2e)NFBY1h^7UYM3mEp(xJ~%Fb-l= zb!W8ofSj%ajoy|^D@dprvDnFJnL%2^oQ9{5gEIQ~;+Q%~5UJ zEjEJf{vmXyr=l?~7$c_=Fkg^?<-&YiY3{&oeFvWL2Yxm@jjNsgxHdG33nOEgYHP;` zOK4( z0{r&M$>V^$F!_Kwj;d+lxNt>!#u6z#W5^hpLyop5&zLK3X(vt~sbudA<+tDyVy*x_ zO@dGJE%>wuJ}qGmHiA!^wx=B$4Q(2qPDCtpXneXfKB4poKLZ+{0gcZfF&zfU88AGV z1;f-+FiOvb5#eV<_!(yv!8p4ZCbU0O0?&-VvtX>X6rfjF@urFhzv2p5(e$hcJ)81c zI8@YgLl9=jak9D}PT{$52+fBFf#;FZ1iy?9MCA1#Ccg(6HRC8~p2ozP zJGk@B-|+I6|A~j+)A0WM@A&yY{x5>>KXCuYUvT@o-{IExzr&4he~at1yvyJHg!Lyc zFn0YmIxg;_>dZFsme!HAxJDD(6irWnUBdJV;-?6^sU<`cauGDT@UeMBO)iTS$@@ab z<`B%78^8|VZ*UYo{Uh+`9)w5d5NiZpT@&!`oQ8YLB;1-N;odlbfVOG))DFTfs~*0^ z{4UjFP_hk&w2?O?b)0cT%?grQ*5av#5(J&3mMN5s?4WLTfwHARVSe`HtS(qEZpO2KY$A6rW1ju-;tg=Q9OD zPg#hzK{#v}11XDSC^}?8%cB_Dgq{kct)dglxfqJBKxUe{B6_cwFh7zKc5PZljp0N~)5IRm@CcW{V}umL*xTWoBk( zW@XUywkC1^D0(auUg*4T)xD^}vvu06_9 z@iz&dtJkmM^3AKba+}fe(rH{ge-aO_U&7nBuH${i-8UXR#gnJ6;?=j`#2X*d_P+iO zKKS^3eEZ{X;NwTH;mwn0ac9GN+*rK=57v(3-5ne8<)!oZ$&*)Ubg$vZufB!vK6-+8 zX>_mOxrN6!uj2thd+*#OTswXi7mlCCspBVb`0yc|Jaz<^Xnos;N3gA>4SN~G4uq1p zoUF%Z%ck*KYcC%64&kkJ>+vHR>6foQ#Cta{;FYt7@%YdVeEsx6e0pRTUS2baqy7Cj zJ2j27(=#|RF^RRUZD_XJ&`@bbN>_$tnF>`by^cyO_qs9d_hDl=gc;h>IKi}>5q6Nr z`odzQ3kp$B{ZG>poisHAjZVj-bW8~FifDDkv^y1VA~kR3o zUy$Gn(fmTI)*`fe143&yBD|J{LhwZizUZc%h;P}0_||<$()gszLGLiC2)t@qUNxg_ zEsyp4j-h@(fp_SH0=+syuaTvL(Ca&V4l9pd#rWO}XrI`Fn*PlM-$ArbpG5omix}Jg zkWu#zcAO)8b}{N+d>wBvLjIaD_h-NR@A%EX|L^$c|KtC|&;JMh;&=Z{82&T?rlu6 zOt7qY{07EeeuJQUhoE~KqYvN22xILqEpOn#WAxp7sFVQ;K-cxVXuWU~E$42bA5t2DxUA7J$Q>*zgw4_ybZp?l92EI)h?+C)E$sR8H+JzcOFYHu}Y=nDpb~QfWLhwqD%K9+Or>lwmtB+u(a~yuA>MKoG4}31VD?-81Dn71 zzhhp%#f(;(OSj#DAy&5O##Zd+&3a+)0bD(O8dtAe#Kp_!aO(6C9NxDBXZG#J({rcs zjjI>&G0UshuHfr*n#(!j^JT!E3Rhh@N{$x4_8m)>t|2k zjnf3)@qKuBU>je@PQ1Nu3+}9)!s%60xU^*}&TZU;y~|f%tfm?X8eTokH&dab?Uf;v zvjoZf9JG|GvC`$l#z>g4)`=;bot9@tFO|QV5R2p&Ak9b{CR9v>o0df*cE?gk;7M$i zXLTiFcm$rRpa>fNOc~8iZ!*H{u*2c^!xxJq#v7`tv4t(C>}d6Sk9dRdGnvf_$>FPl+3M^E?>*0K&-iIukJ z^rFn|hgSB(4Mbp|C7R+HmtXSZO~T8_>!tbmXnuae&rkRTrq>}bvmU|OO$gEWB8<9`jl9t}??#+4 zH?f_z_h-;MqJU3JLQm$Ho{XKsCyZ?co{QbWZc>iDg;eY$T z2*5w!Cx7=31m7R<gx){l!(aXl9(?fwoRhs|-}^fDzVkI~ zCGgfUzRo^<7c);7V_#-;CFI6gRxrXYBix4Xze?D>jiLK*(d=Hw;DguDfBy-3Za+Zh zjoU1@(01h-S}$H@xq*&LchGU+9@@^*{w_Yo$j!Gf#?p4=2I_X4Lv(5%O?xZMjpNW~ z26?SUXy`-GCuP;+F0@Q;!qomV7}|IM^&@N0Ff@Z$Qy-En;hGL*K6b9wOye_SiCTv| zEgMJPGx=pYWHFhP@ZPCzXu{ILVbnG@vq4TPmN$1#)*~Xrq-+3~+U=XV4a_Fm9!``$RfsS2> zbniu^Yac=#`w(p34`1s6gu725z3eK|EAJq^;Z?L>{63aG{Tnnt{%f?o^0ydx^>H&f5o(TpjMl5)$M`$H!`jdO8|Jm^%h5p#=rowoSLwoJG=mMzoj5Qt zjgvce;M%!UIDhg8_HJB@HGNC5t+yYq?%IW)Up$4MFe<)rbPt}MI*d>6-N0v$?&9Ou zA2L$DjyK<8qRO;lsDz!Q1b=qih5D=+Q&m zd;9=*9^bFnBH*VnKjca)I^;hx5*S?O=pFF|Y@e!=?J8-x< ziQ8QrcztLP=Tj*hjYe^$s}tK91^4xKVc&{j99chwv%46v_iV!X-5dG7*5km|jo3mk zP7U^BprH;ep#W+KyefSKYIQo)YBY#1$wGYLB6O%V7nX%cyTuGNH8+Q(=^gee)b*Vw?Ht3`}ArHN6y zlr451O)rP9Kc`HK95Fw>ZW+RoM_?D4DxtD5)(H5X+g>;hLo@|LEUjz;V`kL-Q}D_1 z;f@voUk{w^y>NB)!zra_08U!Avws9G#$6ZT=VIJ-kFJD^@N?1p+-yt*e%_Tc@X`4E zjJko<8xdT)h2YzU@P-|TY}$<&BW|4WHnHO%l8m^iXW)BoeA2f!4r(P zjJOA7aPuiFJ9Q0%yU!vsvKckQ+t9r7AUfB|+J8^5=K6pNYX1}8{`>zE zAOGs#@zKwIN0|LPe)B*6PyGIWW2F7Ze$b)pc}dO1VeWpqyILI z?)IyUV^Us0?`;C_=0mh!xrO$tH)wb_3A-z3IDH$B>#L6oxm$w{#j&f-S-HQ$4vF?He|uae5ZP?mqZw>GAGC zgc~{uy`_lOcR=qBBTr{$F|mP^bWnyxgq>JkKJOos&4HeQA+&XMBas%^F?mS5rRgQu zD0_l&lxQtjl&`~rd;?y{H8R%PmASRq6$unrYf(hNsXSfK_?AKw?1v^IkQ+jAbOgn* zF{qOh&{eO2rD+TAt6d0m@8|11h;YwggnEv^-+37Ru49M}TtH^|EmV!)N89!fFm&~2 z=zsVdG~E76)ZP3A+OPj5x-b3(TK9d1hRyFHz4j^68=o>Ayo>fT-^AdpA7fs()r9`a zN(?dXEVH^W?Tcc4x*oF)E!e$coKbfh_N<-8L`N(7BN0r56FAee6hB~0{PCH+ci&poj2aW z(^p@|qo+@C^WJTozi}C-_%}|SKZWbG$9q?<6DdSUPCgRQ$Cc7o5rxa(xpb+9;xSHMa0n*-k@JdC?u#$AD)Z`C^Z zXEq`*yZIUPb}G<|ZrO|2w*3nDk~!Hk`xA{ySK65OK3ZF6^%zPqV>deEIof8z2{#> z&HhV>Y(4?cs=e?`>_(XHHM9BvS~ed=@9vZ6+{TYLZbkLXH0oz(P(8I05jOCVfj-3h z`cboV7&V=Ph&2fGLWtsA069J)@N7GxBU#O&oNHUGy!J)bPl=tY|vpg2wr;#L6 zfy0lS;tIT&Rf?}>>F||ogR)TQi$yNX*8~`C6UZ^wp~%^evOqs{kx^8nSJLXHVW?jV zQ`07xTeiX0u?Mc6gG>&`5MOp4;i1#;_n(Bfm!(W9rfUcl-qI9X^OV`}XmBNAb?Z+jz`!nHTl^ zsk1movpaI?DE1uMg#!n6;mY~bczWwLUcGl8PaZwOoA11Zx8HvkAH4exK6&j;d~o$T zcD1))TPlXr4fVL*(uP|!kju3VxY*Q;8H)wW39qfO7}zhpg;kTdwRZ;&Y+i%@ z?iSQkr4S7T;bXK65_U0`RHX$enqHCst18u?j`6dRKx<-Yq5ZXs5fgYFw7o{g*&12J zn1J*!###tDZ84*(K#rxDkQ2*Ov9&Dc&sDNvkrh{Bd`%hNFv_BzEHZ^mW>v^Gc`^=9 zHdB$F+3Hjh)igk*(A-k80I9@Z*&&TBY??n`MZl+JG<+>>knpP_{4%uG1pkKgs8u@M zC=%Eadf6(Cvc{e)EGod0V{W-Y>7^^Fbo>!~5_6w}PajGC(f9;@veC1prU91PCYbA6 zVQy%Lh48bqbivx*11l|}lHjv3^4b}9Z7hzV<*>6j2tU^-ZEu-?Z<-Z#HM~=6;hWw7 z|C&v-y{!nX-+}PP-2~qr#J221e5-V5(DVd+f9%wF4!xsjrs*~ACH!Qz27%WotG67z zfRz^?VBPgM&@g)hHB$#MwEq@XUw#W)Uj76--~KN4fA}R1zyCd4{njsV_QUVt_!}SN z=?{K^@Bh<(#ZUk3zvE~B@?Y=+0o~vJ7C)rvefgW;;(NdTJ0;%!@VEbj@BGd0@%B%C zg`1y#fs=244f|ew7h4Fs^$%ai8p3XpkQ=}C6w7Zu!RYlz7{2y01_`$Qs}HdB%3XA` zbX>Yiz};tTy^Ge8%8;cZ^khB8!V>7%a1Jh8j;6MDfVj z-Y1%hN-WGR!&erm@s%YyyqIUf3xwVag-*Pv@?m~y7>ml&1Ye`F;g8xk2yJX7Dl)UM zHf@KaV?R87#}OPkkNC>#$jsbB-G;|Vt-g=Q)NO>O?jS;VB{si_>fK-Gb^Z)JC%!=c z*&kxy(l5|=<*(3l_1EaQ_$$W$UlN3Wjb#^pioQdipkv!>=-Bi!IyOE)*QN(d0Iy-* zL@0#yi6l0JVp!|)VZGCbO+Ffu+lvv#xps#YRYo1E^*VIf>=0@OamDT-dz>mk7Gk8#iL#*fMPG?Zjr9)ltTtOKY~`=$dWV zI=K#ex9-5H6Gw6E*naHay&0$X?Z7LSFW{ql_werByLfW%4&Hz7ZG7~#_wnTA2Y5t? zK0b8@dwJmwG&bQ3VRt-PgA=g~&Q>?zECILPYHGn%zJ{yoX0UOj z2g#5VKCeqz%TDIU1{^j7EhYpj$`K~;VuV zV>BY5)1p#T2@11wQIcN-8~=Zpjg@p_2sD!fWvZ6o zBn0IsgGB?8C=6B`3J5;Affp62u!J{rwzfig2pdh=U2^vRbv z`}xoD;4goVC%^nhT&Lk({q#$`_S3({`@j4RzW%G<;K%>`?+W;S{x3ZK^S|N8zh|WV z?LR8d^1e;Odyl}o``Pz#=ABQl|LHr}{_-1GfA3YSChVpNyA`y!FhsL z-b3GoJLtJ^8(kM}qT~Dxw4A=Clr~23PC{?s!ac0K@e~^#zmGk#GV>eXz>e2G#>BO! zXxx1onXNK6`7+uLUqNEaA-HDN!^z0%WaCjeyb{Kq5g2K1?tv8q$7*!$+(YQ?rs=Ff zWOM|co*sC+3B3LxM0kBHgq_wARF)fCqA_5xiiW3|gI>0pjbo7-MQj*l?A?l~X*RO* zh^3bCHpRwS8r2M9{GOG($SKm{r6n3duZ*Twf%ydlUf!JPy~KzsDZI*7S7X?#vo|@7M`^arPA6psiiswH1d~O<-qN2evfVV|8r|8yYh>F*txz z%O|mOY#OVER$$}WS?t}l1-m!SVDGBsxVm92K0I+8U);KdkFQ_HgY#$b+RKd0Z@-4y zx31yr@uN7rX%jZKbzpN<9rk1zvBek1{zMIq)wf_1O=`kr$Ihw*PPR7E1Z#1wxe+%; z2bDGSHZ5&NbAGfpFc#{rIbOOGF?nru5(ZN=%fwf2jeO@uj^NV4tFe1(C zS0~`hF#ggItDc#ow8$P==9@p0U985E5)BqHy1Z1PR*nKbu|0XVC$?8&b3rXJm)5WK zhoKEbp$o=P9!V%GZ&c9w1b%vgucE2}71d49*R?=T>oX91MuN}O)&p~AA1qR_dIkx; z5mYkn&Vi3nmlf1CItAB?8Ms!iq3x|xY%j2SGlH`caS6SRdlcx!w(M7+r`VoAkH#m1 zm239Q_1-q_KgJ^gxSs%&0m=tt&h9z%p1g_mFMkco&eOKezk=@Crfp_Rs$YpZ)DW;*-Dnt+Lp_5B}j_@WVg+ z8_U1r`~3X#-~9`|_N%|cqtAbg3-2=`zw$nIFv@OXjGdv?t)$VdxcV|i3A$n0<{+bJ z|Jl3fJ#&lJb_1ORT>GgjXgx`I@z}^H-gf*FMreG~w`hsC-c)ujo4xirw%z{_d!K%a ztvrsMd4SFXm(jZK0;;wgLtth*Y|B@na@jbnJg1hvF&Nv2P~OlBT}>BDCu3tb0{!EN zkFG*=a033$WpLE@!IbQT%9Tc;#fK$jvhJb*3rpAts>Ss5e}vxRf)W_m5Dt&8L{mox zs+mZl$pqmiWAf67S7qRj#AtaIyp&afuPjvKtBcF=mBo6zkZV!K=)EYLc&ofvq>U)^ zY_s$=$Tc+~&)SIs#{i1FV^D`zp)|3UuX!8O!eInQFC#g94^=XKOlDE+eh+O&K1Bxs z*Kz&_=(zkB=)Ca@)L;D)wU@t#np2;n{`mLNcKR>ScIId3x%Mkeyz)CtJp2X5E`Eyf zV{c;9skd?9!u!~N<}GYL`V{l}Ty9K6qgWMToFxRNtyauhtk^){ts~qf?3GyVv=Mej z#=KH=R2Z<-T8WJDys(rmZIM~vH1B_j3 z(;;kbNZ}lzcWH74N7rw`s?k?CWU4RBZyaUK`vl2h4nbX1>=-Mpn08k3dBQVl=rlKTa{WCJ4Y}nHCvB zFT&5AMTKyb6vI#R^DzE8S8Cv3f+=?*mVYFS^65bz_PirObe2QMP(s^NIVIP!;Mm-1u90_e4`nQWjI2aP66Ly znqO{(X>Jt_Sru10HrzgFJOOCDA(Z(e$}&S`JjdGb9Q<^&>he?_beRT})A-73o9DpS z)D8oqu7Th)(fG`aP?nw{SeMfH`Ut*$f{!NR7+ML(vQ=>W8TdBBzj_NoYj-f_{s;Kx zZ0~>qJsGfEMbnes+ggH78K<|O&^vextui?I@EMl#=sJD_)A!!TWV*!7*h%@{`59leV7qnFWg_yU>^pF;y*XPqphaO51Jcmpf=J7Z_=p<~Bc zq*m@mcyKeSR_;Z^%wcqFIg61acQAhL5qb_?MSLxz;P?jEM^?hpKLSJ7AkX&zjLicu z)b&D}?tmuV40WslWr;@Q`%+j^8Nw2C5V__kiYimQ2g1myaAJ|hh{dHcbH{)z+FrIs zkA-Ydmz0#EfcIOhss_tfO(Vv{B#%>)ytf2+b0Y}Slm;L2O0;-kaWTHSs1z?OslZp4 z81U5`v$E-rtdK078H-B!|8)uEC>#E?pwQNdVpl&(e4|i@R-!aM!`HomiD4HUT}R*> zI*ah~%ZN|iL}vXX)NX&1joSxkKK2>9Fa8Mq_kM-G*Z&4RulyC3-uWfEF8&NHCx48F z<6olf@=ujvD$`GYf$1CH#Kv>);MnC4aQ)6FxOL}aT)6fw=Cv^1^$`lAu>i)y9?be& z*y?p-uh)xxEE@^G%|0LFZwSjO?HHsL3^FpE9vQ;r^)ooKVFt&yti_|ld+_$@19o{O4&vL_&*0-H*Kp;^A)LE%824qB zOD2I+gFQIEVh|T6hjC)03p-n?vBF!45r+jULq3d!0_gR5QNx(-X9c(My;x{LR$T>S zWI4QqS(wm^)A}NWSg=e3A7idOv$K^H!H}N^eL=qR{LVrcS=1_2R_I`_G{env7Knxs zrT)v5oSM2u+8s+xJ!5S(WIs^Fz8F0dwY+(1Q%F=r5l{LN4?7U|7?H4*p~_T(+KNIn zX$#R%l8eT|e6$p+(8L?Dwy+d+D#q(_8``XXMrSVn8o@eO3!Wn zUBlOvfR*-E;qsuA1}ZyR71HP>-U{@VlvN;CZ$h5YiULMm0bdD?uf!$$n+Bm~+||(h zN&^HS;a3_?qAXsGGJ>xxQx9!*BU@_1kEUHgHoDP<>!- zkaF|klZ1*u?*cjrnx4}S(0ld~#&5ra6W{p-?)~x~aP5ab$GLBQ2Z!GM0EgfI2JZj# zZ}9rB{}HeL+V7Yfv1f(p)^v90&fh3z6f#|Kl7b_6gm9JH@lIi zck!BAkfpU^A)%MW`)467Z(*^HmZu~1N|CM7!OBLjvwsjxon6W^;#fLK+pANSkIK|l zBN&fCYqDT*o(eB6Q4xG)critJiyu1q@%4ID#s_!JUj=aHGZhN{)Kk)FMW>djBlcHkp)o&PQd zp8gUeZ~ZM+zx^+mdF7wcf90SvJ~r=EybO^oAK$j)A;!6F}!hfJDzUafE&ZZc*KbFuwyB1_AbTsfdO2T z&)03iY* zhjEvdcX#`4Y_D&|7M}F&$tc#xLzqoPu&J>MCuxl5r;QZl;!0>OW^4U zI|J>`Q9;`?8xe9lk%)zr=W-bWO~%rtX?4=Auk^kWLUS9Xh}FsFDM?1q5JC|Lf&mMH ze!?zfLPOMw=AePEtq4`6i)q#xw5oE^khKU+S=ne|lx^e<)>v4ECbbcD8Z&CjDv>U; zBEr@>WH2M{cB3jDSBx*6k@0(V%4R{ys#-XFVU+QGmlF8KU z!E+3*$A-;Hwny^)T-CE`x1w9F@Zpit#y@&%nKMEy1?|{+TTV-*y_`PK4L( zLTuw+BnZAF!Ivf|GS6aeN`|i-IF6cwER4Bz2T!7I|0y&cx`_7Ux6pCoKDtgmz}WRS zarTS9#^c}oD=vQV6YO~PO^jc-f)ii=Hm-l~=eYZ$U*R&X@5I}mDAU$1fAk&P`__-~ z_9{=!Hxc~iM;Kp}T?a?sI+(g=VD4Fkisn9=br19n-6*ear`0t< z6RCqLSVfp5QRE4ukk>7j5jEfLL>{mG5+;J&ax-$YW`fSZ=xN}!rF>?X;_ll#RTLz1=7Y(oAE^V9ZRRvYw4z`%Xlb9zt^P1gb{Q zpmxPY)K6YT?bLOoCTO{n*O8gMkD48?qUGEt7<}+kZ2j7Q!LG0WuNc4o_vkqHW92zt z%gJw`@5Bd~IsG0EUHAYuZ+?Q;AAc9`z5WGWedU{&R~3q)j)u}4_M$i7#B9)ugRuy% z&=gM4@bac2QBW}>xzIWyrzC3;aZ_iHR z&S)>LFiyV3IQ#zKFg{#1hR;^7!R?MN+@F}lTNlpa0pFe@OuV2Jl zm(Swfk$t#vXg4k&+J)1E;UU`O;nkBkvT_`I8Ev=JH)1pOdK)3LraFaX$uK4xtFdcr z5IctZF_uZ8Nrs;o^+>a17|oR2MLoWtVKD&S5E8G%X7Y(W*8*yHnhXk)CF@(56o>#VG;N-j#+z#74TX6rDvDm z8<~P@Y&Efl`g4V~f z_Y^YwP9wAb92$=@PM&^9i@Jx+uYQgTpZ^y3fAMd4<9GiP2S50(vHd;blkthq<>?#{vJY4;$P*D%LyL$<-nYiv^XOIgGi`%*~-P0yqp z=a-Z#9U1f4sOD?R;o|je?d(C_vpKdhIDT&S1(O3C)KEMDt=qOxWU+)Y~$?%1nr z_ES4}77Zg8(Yo>mS|)FycKjOBjJxRxLT~yOQfqFZddq#Z9(oHiPkxELpZzab|IWXn z|LV`sdgR+^+4D8@?0Fw!2j0gz!f*eTZ{Xs?&vEnV_i*>sFEB42iXjt?pg!zFcff_| zfERli9nbNNo%9EBh~V2F4q;Q!udE$6YOlmH#>w$a5-VHlG2BsumF?BIuzVQb-m?oo z*tG-iPb|lU=4u?H(Y)TMm>_I=Wv>c? zZ?K^Ty)sqH=|tK}0O<8fk8Fq$G(fOLH5ycxX;DkFinc7WI@h?Q8^!>|Pck>=LO20Y;dy)Ux%ftER~jc-4F@8J4sFkguhtk)=M3 zM8c1t*95=43=vl;s(g&9o&wZ(a**+4quQ6tMyD7ZQ8ik81*o@Xp~aMgE@J_@3BKmS zY}Dmtp{^hg^@T;KFVWESbm*@1qC@XSrlux+^guRcIw7Idfw=jJAItEW(!O|n=2(UBgVpNr5|B1_3cHtI{?;)mdKEdkS zuVTfe`~10!XxVujb(;^OYTX`$R&Ij3e>HraEX}J>nI3^5Ok?x2fso>BMTM&odQXe; zH6tNtPLD#LUIul11SJU`)5FlD+fd4eMjfw4aU_MJU|fl_`RAJGb+%N6;-?x zrBZ_fi}K6yLKY2=z?+vv>m&4}yi`DwB=i;)JCRN3FHT*{YBaZGIQVeO_!O?Dq2pKk1L)SRjJE9zWJFzxNdtqqkUv6|69$j7ZC9GsA7Q zm|!{7-HwOLM)BI{2p;rw;$R|(y&*TA5?CJ)bRQ9V?+=gQ4gSpS;UOF!ps{tdV?%2z zHg|UuY&}@p)rnQ@tr%}=zzAEE@p@WhVEQ&wB4uBo5v zKw(Q=Lo2AQZAORDAp+8697ud)|**jtjMJ${3-a9sOuy6 zXaj0Cn56=~XbReB2IT}_Ipgkg@EOGT2tF}BV{<2rO59xvQ+uBRK1=ryDwmGJ*0=o6 zz~>!jC75K?o!W>H<8GADi?7?MtS+3{ycbopzWVJ)_%Y3I>p|4+J%Q?dr;w)oHPQ5j zuDp&NZ~q9p-ue+Xz5H!j+1D{}@*$QUzl{w~zlPoKehcH5A7B}y>F~*`N<^K$bWhnW zZ7rd-o(8v$5L`{zP0B_>XRc%T#1-@rVBLq#DXTOq`*Z9)j^>?*QMY9usy6Os*@HUT zXyc9}Xx?>9DQ$aBpliQub$JdQdrm4##7CBIf~RXWZ1v-)Nc5vD*oiWK3(7nVD0S8H zn$)1oU58SZGJi94kq(qax@dA8$PaX&Fw{=7tA{F_R_v~Tc9+jMn@f|=wRw?FdBW*ZA z{_bC4o{xYFg#t)0);2J<4tl*<;|*XJTa9C(6b=Pq*hv7c=Z(LtREH57#wxcDlkpUW zX*vUqRan*8gxxKTI6?@VVI{lNSdTN!{FtWmN`Eij9~s35Bg^m>!S@QGc!R(@+S!Fo zwe?s{fUV&Po@T2uL5mxsnT!xFqbwsyiLPm^5XdDn7$@*nvP?x27-j4p_IWTI4PmKl zzer1sIIRdcEHo+$+-Am5Z7J+(6&!_yaOLJ9oL`7kQ3;X-CGafHg)6%d-XaYGtmMve zJq)xZ9g8g9>2^2~@OYK|YpeMhWF?bY78w_(j7qO-K%7OHI`*t*E5Vo>OGFSQgu?`I zz-@)kR*s;x3~5Ih>YRjKWg%+Jix^|)p*{E&bcbFY5s z(AXfo$84q3wWz6WM3RPH>2aXgU_v3GR_^vI$Cn5_*)>gyKu_Xsmhy~m4t)8%{zZ&S z#S(KB@P(mbqo@fIe4zx&!bt^uI$Gaz@KsdNSO~rf8P-DZ%{}Mqg0ZzndDb@Qkl+i?Y(`}D7M9J3uGxb4>=x8)+M|Fbvw07yzB}f^b$r-$woo9Fh!f2 zk|WDFLAU($RSX~J&m1|Ao&#sl!5H1L`#73*9zg@))HRqHcP2Zs479t7Ip8fS)hobL zV8_OO5#hFopnHk;{Q`cxu(%Y9_^~|4lf`Fc>ZA1di`k`_7@nJ+95X!1%phgGMqMRd z%AwuS@V-LeeI=&?FXWr>)jURK0&jkiP1(>}pqEYPw`ulyXHmw$$alV70a^ye75`a`UJ`RCaG&aZLy8-I&y-}wz5{P6Ga=CA${-};yT ziFt0HAKri;A;#LY%(~#`V{FMcP?xu|1zFFNxt=Y^cD~V#w7%5@-mHL!CNYpmp|^$+ zv$YXBnwqeWC+R3p&IQ`rrQR-F>+i;$?oNWQpJf0~1_p7nryExpcMmi+V@swMYZDo) z<=Ma4dl_LUxp9G3*av$QQtHP-AS@hH!s2VO$gX(@-( zREm(joKet?IpBX z7NB1DA{x!P=pZl~bVangJXBSbp~YrHeVKvK)1g{rL4)3bDndKLxE`x8BgNPow+CqN z2{hH!p`ON^N>?kbw`|B9N`zr>x=_OCqa(=VxP*<_5}KY=4tee)C7Y2bhsGz*`0`~Z zEUOE}cE((Suh_$A%&05iQxmWn-Y@?dd@`&>z*j-T6!29vv=V$Wp05k0|3BcfGwM2r z2)@xNc*bVnTd@X#m9qrNT0%sYq}qfefhP-)CRT4muZJ{y7Q^@yz9iq2yfvHa>2 zEW7j+!)Ko|dfr3Z?we@Xc9}8r0cP)ijHx^Cq4yMH(ZTb|67xMYuz{l&G0MoQl;f8% zLKqIv==x*{`h7II-N(?n^CA zrr;PDhn+yT_b!L4e;k34RR}Lzi`v!uQ8l?2p~3YqH4LMeP|IP7^{b#MeR!?-w{KeJ%U1K7zK_X@@;;aIPK2FC~2^ zK)AsP9BF95@s?H`Y;M5G&Nf_VYsEvtPRiZBew<`9Jl)ZOJ+%$kNE4YM zOea~y0R+5ucdKUyPw>5&9!}SQ^Mew{H=;f{W1~%O&9RFdX97ol6*N z8GnuZzFmWMlZJpRKt`h?4e0c<40S!{X zSHuWaB*P$hkCX`bXnjhr?lbU}MbZlR)n;8 zYwKO6jN@|-tyJLWS+)wnm9iG!T12MSJzI}&9f7w2nKheGy_R-0xkj1Vmf5%yqvvj6 z^4b#&ow|pn9T#a@`w5|asNQ&%#`gwh?tFxfBe&78=bX~HAlBBt_ax)zX~xg9=sUzh z(Dm~39{IX_f7em8Y(GS+JAm3vjIkSNm$bIbY66c27pKWZR;)#EYz_Pbom)C)`WV}~ zhGFX&g|lxA?tWUV*lgzztnCA+?BMr0`5t>#!PPSfOUp2{RUJ^p8CfIMP&2mbV%4aK zSHqfVfTy7YuDUjuVzp4aqbMZk@~vLvSv<%!x!5Qo@ zN0Vn+Sg67RHvTd@N2Vs0lpA1ny5VI*A7WjT6;$M2&#)m+Gi}L|@bdYeLN82FQCBz_ zZ)tbg1YWkD&@;_^<#khK-5%ffY7)Zx4)=-Dxng$FIa$T(ay>)~jEn*`< zxP!ph)zEt)>SV8w0r&uzHV$k*HC_MpaDi4=`adS+#Hl(fl&h+Yp*Iibf$ zYg23BVpR20>3y_0w>-n63AxI(%7Eg^QhwcJL7V{1L}RGt|Ciw@y~%V&fwM6(bnddq;Y<&VgTG8+X zyo8>A#WVurGs-cza)#iWMS^CRq4fnuCgJQKLwI~OTDKoW?}4-E+6 zjmo}_1mI;%Tzwlu=boTu=Q(7y9!JgA!>HeONO}I&BG2Af+NJRGw(W-*Mfag$BTaJM zE=JQGiq*xZ8Lt^zBQ(0`_&ORJK{qr7?*IWku##{iuzSZ~?-_xkdqe@8xwRLj<{lUt zyP&Tp>9!5S+rJWlLH<79zqOebT;BpK&ykhqO7<#=boL_F*#~z` zGs^uj6jc&-yyiK2J8~*$cRCAUH)nT?6xcm`c1Pezd|gMK#=<-!Ud++c^bCZa ziO{S3e?V_ZX@K{SJkv`P()DaC+Zk_{()6SQXC-WDT5;26ggbX2+Or#xrF#(S*@Fnp zFFt$}DMsC@@e8P(yo$DU4=}jvEi60s876Q33M+2?8bcRtck)E;udm0^?k;TS&rH)mR-_3%9>?nWHwe>y9=mI*(8aGw>EPdQj>S;N z*qw1WS&=IdGnx=GmLtHZ7u9Kz(3P`P@Z$=22s2Yb0qiAeW!hGliXR{#T)g1k3Vz>i zfvwVnpwG()D=QOclyP)2gF%_T#V9C{OG*a?i}JjVuP0qA-S&)z(J;JT8@!cj0g@Swuos|-uxug>Tm7Y2 z8r5Pbtin(rA0y#>jHik*8qUQse-1|MOIY}EKtuZ`__SGQEXqO)Tj`ck6HU*8SV=iT zB`U-S`8u5)9hL}UMOLKr9weM|&n0C+Qkfv0$ix-ft75|x=jS@v3qf6me2tdyD@B1; zmKM@tiHe2b%NE-s_;Pe6eaxJ{jSbgc#^#$(F>&%H>Sy=CxpW%3hGm55B!71sx)0ny|A~93+HwM^%}14a ztmzGVQBAPbY~GK$&GLNiAR0F9S3p;{emAPtY)6&Y-1HVCrZyoyIS0ES<7;qiH9{kV zFoEYM?7TEP_fo=I?5=|**VYFsVP|Zh&C0MHg3in$&o_0MdX%Q>p-DAEm1yGkyAdPY z>lxS64a42ltE^cl>leD3+m$UjlRX3Q)we>&h9%D+u%p?Pi`hMovIYfs3whlZ zl$2tzm^<(L1-$PUGVLrB(6LTr$-)yVH8d0ii`}lsjz7diAhU%Cw`4M@Ku-if0Uzsz zZ2psEBPq}Hs_Uf_uL)%aFBa#U@DgLKK<}%0W(9g*A@p8&20a;^yqM5iqK(i(Q^+;f zD(mUVz~oZj5OhIaquAW8XTIi5&%n1=Sv=I!z7w9Vz3}uL;<;rbHguL|eH&eS-oxnW zFED-QZ!mu2*BChUWAq&PKDv+p0If&AK<(jgq50(J=swG@9sMTSx4nnfb#I_~_I2JH zZ=hk@2bkyJO>eW>VX1V$O8D6bIy+5ImMiypdM^%C#D|@0;hZJLK zn2>XoXkcM0W+^CwpI=LvY>4SC1f&5$TO}gV0Ax3IWjYOCyG*Z9j)dCt$@&TDlwgrT zn6i{OyDxg`b{NplrDxxw*)rT3-FaMguh}DBGz2OrU+F&f+=A_ zYr=^3xE@VGHR{}XXbTiEmgl33$F6V*I{o?Rb!B6~m4zW+4u<`VxxPHcel^BDg_!WE zu$<6a9xOw*J`3&YEDRWo=;4juQsF?7&`XwTQLWP}9UH9`epIO(NNe3lSUre&gGewc zrD3Ko-xLlw5)@o0TR{Q;uATxU5JD{8*`EP^*A1mr*KT z_ENC8P)O@2{C`*<MK7TaF>Vb}!=V_aer4nwZ^zY23HrYu44%HwtiH$EQPhHn}F-2t+gw| zc4UsSJloVJYET-hf;yT)2~D^tkVK(B2@PYcqqPU=u}Rv@8pMW|!`a>gTXP32buF+r zw8F!4;~+3gz0tWSTVDAeW~abThN4t3(#kVg+MRS*uq11MVM>F2z86HIdWTl@p>z;fpD{~9@!~oM(b2EIz^z@bha!V=|=)IUH z9T;YOMOIfSwByAh2j&+$2|n7OT4rQ~A=A*Z4b{kF#4WJ56MRc~j|@Q{TY<{VG(7d| z5NO$oNC$1NYd`!Q`{3(7%=mu{ww@Dk3|~ZS_7U0+eTtDwKf>q*M%z>0MK3>Z-~ADq zcD|2>-5;R-z}L`l@N1~s{Vu9EzJj_L{?3|L(Yodh)J?yJ>UD2po=tC7rej$x0zM}! z4iBuf0~^8TWD)QMXnP@^pb)_qjPe)@AkN4s^p6t^(sS8D_3dwH!a!pa`WO-W36o*A z7!%au87Zt}GpUp^#%=?@KhED7s!E|Zoj^|}iJn^8U0t;Tyas-)HW5QrID~}9!%DQQOVWSmEyBoE^2pZ+0 z)}#@o?Ii2O=H>tw=LHvd%R&IMAV3Y_SH;ic@tCq2i`Qv`Ri>L6%3;%$!lqTjsa3(N zQ_(E*5iFarxmPV&TcR+>ykQ%cMcC4$nV|BF^ z({UrFgL+K+^_U6pW4{q=f>vyZI$Gff#)FboIz;ZVWo1G z3iO_VPebd|MCRb9OV%jxlc=k$V_ajUE^ngowGtMc1V_&hD!Year#ZQ4QSI9gVf+2p zvG2)ym^gI}b&Q$8;TbqPC*kg#fv0m7tzZ(Z)BCXI_#KQgzNSYvA~Cv|*0mC`@vUgx zaSDkwyWyW8$kuLGdV`~ks?w_}B~F`*Pw_apNdcWaqm$?w6q_4n#2%c4kFUi;qjT|f zIT>9YjIIuX&eqb4O2$`fLl+@MtE*{+v8oCBbUn(GG-;Y{Dc`#$oPa7AL$N=MB3~HA z-U#xY0cA{`eBD&t2vpO+;a83dmaZS6EZ+kZe$797w7u~-{Jw!R9v zmImb8+ED23Mv1QvWuXxm6XUSc_PmXo5o+5-<2!(0_Yrt|j>FY=8m{3>2&}q~^!9hr zc;s8?JVlc`^s%zzS<9B!(X{C`v}}JHt$W@_%l`M#wD(=qY?N(5*%zOr(7Xds><#?%58Tv_?><`f`Qs6w2TJz*de4JKuJ zm`Phk8#5wAVAk+|)${+?C-@o(Y1u(cpr)7{E5A$|ld)`d(n(Q881Qw*80S4U8|+4& zn{q~D!p>2qhDWR6>yX}F4T6M3jQ=mr$egUuGFleUwg|n_T+|SPbu2A3!Oltzx++!Z zv#T-Y)nSy8cBQ8btK2%Qc2;1vQjgXAKhwMh)4ZXlHFivu*sx0D!mJ^HS!)nej5AZT z)mfg4X^R0v>S8RVmG!GD&_v)>Wf!BCH+mDzthvOB>TEN*bWt?xBB(0odF0=T+LK6z z>QJ3-nA>-Q?^lN2NMuQ*tCZ#C^JxYzF3QFWi*xY8l03YaU5Eud*NX^08L=;u(sQ*I zS8AFPY>fd`B!~4%6GqD+_#(w@@2P$g%P+mO(ef4reW*I6H zeX!S#pn7m4S|)a(V`e`#oP8PnJ5Cdldl^x8AUMuwIXSnI{e+F`D4fhj}SCF`J1 z5bAut@_bY6kD|m&$a#V&a{Ez0p(tQ|DrCKq@sHU|A~J|L&(8BmOP1+lMFe7@JEROl zS;BL?NN>jitraiH3<{MI3)DtsoZU+TI!0Il-b<_(^NUoBv%Fq}od9n!EiZ?Se+lc4 z-r<0aiOtJ~JVc9=grh`Sd90%pHu@BgDj9D|;K|3*ILh}FD6(z{^y0}3!qGG;oqUZN z>A@{mIy1hSWBg;p75K@@$#dZI5X=GEUKCm7DdZUIkW1UkcXptF;8Xc!QPDAF=!&y$ z6THnk;cMHEAb}U?KZn4wtF-dRsN3@qnvZ@Pb%b8+zOSQ>psSgAfXviA)UJ7qwry`K z`{1?_gpJz>w^@FD`ZgL@-9_8XL$t4Xg61^@AptngK$w`d2AIo@FtcKr7CuSQN10alkzqnXg_Nv6@yDBMep z?j=wL2|X!Yp%B_>a%}`$8!to?OCA4SN}@FXho8{#S*`Gy%nJ1Uvb2&6BqroMc_r}Y z79&zriZ~-|tipt#(FCu>0-wvF7-^cXp-N_F@PbwIwbZ0CsAc6AuvHU$vJ#1ON+@#2$}^7(`CRodWSi|91yQAx--$~6d>%HikHt1g7MI1k>Ud;+)_J~iXDst}QqJS0kT zP@~C1wJHnMMT<~Vun_e{i_xmerjZq*Ut54-9goFJuv}Y2h-tB=LXQolCTw7fxwgcK zwZ%ScDhgq9VHlfBqS#m(#Vl=Ywa$fAC3tVG%~y!nbD?qa{a$HUg_n zMUyObqA|~o*5UvfS*l9BjQ@UukI-|~A|0XarDZ>jhCklS)r4R;mOzDx(UlcvNq#YH zuRwv`i#bJ%FC{E0yqM3pR9sGrFd;|JifU%%t@P9M0w}JWD{2Q#&lRQhg`svul-@=S z0jTlCP)Y#mp7rdOGv?}OdwSZQu~BAi5PJ1pJodugF@nU{TBMe5Ky+vgOmzb&3^YI; zX@Q}-7p7Xq$(9+$#|hZ#Ct$5&oQ-rtAL@XI@M{{|hJ82Q#^IMg!pMP(jH8UDqpJ~_ zTo3o?gz}8gKT3lenJYfAxgj10CTVlhp|Bj@r7H+J!jAFP)iJ18ot?0&Y!ujaz+Br3 zGb5*|x(P-$BnHB+B2J49r=b#bN+VZnlO3+B_P7nEDEkY#?E1uqhEFQ^Oz9-}1hgBN)XUMv)=|AHXLSDPY zybqMkph{_WyuZqMKihnM_$d=nHuMSJ^J%6TG5zPy>*j1vo@)S3>yCW>M<_}s z22(*YRfPx*)MWLcxRf!Q$znl1U!T&M!DBunF2N_mR^~J2E-0q$l`!I#K8v{eYUG$3 zkX_k~Jc3VIbRf*@mzaXNY8`A1+Zp%w(eh3pJbDEgM%re^*3P5fLhG?_A-(5Agx5Vq zeBuu8ksC-XzokI0bLV^L=J#5+zJdC6FQahv%f%rG%ZT37)j_IizW1dId;;TPZq^U?S`G(ImQub1%ihJy&iqKHOg zbA=~2PIF7}Se;0qnWa6MM61{w;n)+6qK9y63kJ~~2%<3%Km!3-%aWqq1)UDK2|Js? z02?jJ!Qw6_@U%2Nbs52@fu9%K%ZeCg^okiRi15M&_19xcw>;;Pm zy(|RPIS7>Ts9l1nb`cU~3(#oDMwhh!J(dD=XtU6x%g2yjjb(;1Oc*OL!FasZY{C@J z_beNjb%e$Sof}(J9&9P{U}J%YunS<1DvCp8NgOd|u-_QNcE-~KG=QB(BM#V%*kRFN zvq_Dqk|K;Vf({URBSqyHAOJh^)aWXzL`$w6jahEgWqD|QZlv@5NT_*DjJ#>RjI>cg zPlm8Oi#D=ky$nV02E!=lbuZNDkk67sh)Tp2(_2Whm|w_`OBjPo%~)7!!Qyh6)9Iw~ z$&UD})Mi!$f=^`+DMdpY(6|UY4-3Jk^%HuuK5dBBC%w8Uu|5KWmZYz0Wod)8u@BzP z<)~i13C^ZbM#DC!1I>&<{C=Fksp^M?mS=5T1xv#;>eC5KTFATUFWn2|2Hss_dh}C^om?_5mz0;5^WsW6;-@v>X2h; zqVW=X-ae>+GEni=f~K7$QjUMv^QPkMUoE;l?houOUJ3CQ`J`8eXVp)4`PO5vAp-mmXOel*=L) zO*&s_pJ9U9Pt$a{Trd-`Cc?NJTyprXqxl zIY`qL7s^uT+nMax)4m2t&OM#YQL8Y~>5gjKfyOHA1d{$9%#$ z*Wl(&&B(@5XrTdG1%P3~FRFk~=TFkcGSG*sP{F8M!5C=DG_r+nVQE8UO&7eaBZ&19 zP%X>(Go2`L)9^e^C~@-W;?KsG!c{*GTk~qzT4v#HUITMt7DG`of_R7!-c zq1nlt;#3{2t`_CeIU~<0Lvdw{qV!xVR>w#xLrP>kV<90YV;l2~Hl^gUE-7=HE6jwQ zO!w0(i#;qVt5BZLNr!|i^R$SNTR@vzM9@iDP^2Y5Gz5uit`w9g zN<)yEDJ4#S2*FrV*;Gna+L1<0uJN3)&6S#GUw{5R5rZVHPwI_yc1Z$~F{$#5Tfiqn zR_3OqNj<7#olio?1~03id~Rk2WA2OjCIx!(d~be%4GRhx%ZnIsiy3iC7;!aGf-lW; zP)pNg#I?04^Kc8j1IQ07LrHQ5`j%a&>^p`SqiGcZ)wrGJw&hhMX6_&`b_t%LGjR8v zgtv<(*nb{r#@nW?Z=sc@SHJ0XR8K!Z)ymtbpSZj3?^!^5rp^a5BzDjRwn&6BRbYWS!xRQ296EYh3e`!TpMmV*s zdZH>qnWh-!ni3dG)wC}i0v0pUGK3`*MoOYJVV7=T#ch;l6og)i@tQ50bh$?vbEDMt zI4`Vhx)X`UX{dfUd>&XBg=OJSWdTsW@5*u=oU}40o1s>8YZQk=+?;C!P2$CDaD zuo(MoMc7fU##-L|8;UBhzSxL~{89|%Xwb1(kLE>{=+2LzGbf6UtT0-#W2nvLOdRa8cT*6PFS7fk5Wp<;KaaI7P z_0!k_X;cI=sPNNv+;OOEArzW?$|99ga`lXJgrPvOh;Wqnt6}67@R<^Iu%sJd&9uN) z#c0~lk8t~P#Ji>#S34Mu8&N_?s~wFfbG5)2l%4yBU`vlOUQR+^GeH=wgdsYBa&H@S z&PM3?^QJ%>jNuL#6STe-n%&YV*xHw|jKbBu9JUVr4MtWemF)wtO3!QS04iH(k&V4H z$fdB*Bq_HoeRWB@{(Xq&L)%p2Fcqy-d z9~TmI#aKev<&>1MVX8nWW3q|Y%*EFePNb2P7|Jvwy{*c;Sw>SCh9Zw~l&O?FN+)E5 z7AU*%xx8MAj1!fz6{*_=H$_I~Xv@;@%J!lxN=F8vrwmfg$mjL^S(<#g9ofA9UX*8h zd5qu%W**IWDZla=_#6bbi{SIq_`-azNo47&c~3Mb3yKyvx>4XAL}7F#O6xYk*mDrJ zk<$pTxsBM`dx)*Rjo|oY_y$kGv-Ajj-G|`sIs{+mVT1?IBD4D592mE~i8P~dV*ECe z%Wj}XV(*HZs3!cXSKUF?%spi(`r6emV_rdVk+L7Fx|oGFr6yQZWg5O2f{)fyA#+j* zKD~i(Fk4Y(t3)}A&0>XzvC?gu1EAaHB=qKHcUXDiT=LMA(KW!&y*!~V55MnqAt=w> z>^8JlV$|Brd zk%xQZd3Z2UgzJNaxZGKYo9$&doyf!e9tX~)wKyFv#a`OuA)4@3f^KWE8EXnmSe9Fk zv64y*<~z}s=S6p(AMG@}j_d?lvIw=TDui>=@TiiE>~Ta0sZ^3Cz{o$hY|z}fk}-<@ zD9we}J*SuzF1LvBrG%AGqLfxyT{l-}AgLwP3T2oY>CXgrL)fht;DGb)oEuq8VAcdKYhDgJ&28iK6M z$w=wzgd;x0___j)rq$3_uVlO&LYc1vS_0DKYet2$mf)*_+Exv%uSu~*ORYSU8-ulZ z7$(MND^1SI*lKAe+!$Sz(%1ttL1(V(B2YVEAczg=W=7Zs{tcR4gs_vIRGD(;i_q)@ zc0Lr^`@5iA`)yrpeEuhsE(CYFkc%Ep<*@Rsd8xEP_qO5YF(f&tK&2>b~=lPYg zP*y!*S&*;BOS#3E&vU=9pqQl)3-hH{y#RSC6^cuB$~FKd);l*-Q#g@QU?)q7rFh@V zGrKC<?8*lG*-(rh%+b&NS|KpbrCH3Z+HoC3_xD&S4c%9k(3 z!HP)3l$a$`)@06Z4kKD#g$YHbN~kMcDCIdVrTrK}{9lAcMKFbO7A@^i<%%+@5*j=f z+JY=JLu-&RFaV7`2CXZJVoQi63{_=}@JgV>nj-jWm8F&{XP+J$Ha5kX#+GnFJF^Py_31evd$nuCo4_o@qNpx)45DUxs0y4LmuE747bQOij#XG1^_q$}5>_)+r{X#s+?z$|xnu7;WSAF47p; zKoWYnVyhB!WlCBlUMR5Rr6M!_jpdv57m%!ULK0uOPbW0isj)5udn+sB~~F zyM~l(3cmars>W|9k+*u~U7FtmnqCo4CR>oa`~nmZd_~2wjvS$ip^$Ij#ha_$FIxxv1LYZ2R&)ljqunoAI>EQZHX zh+t4fD3qa%k+iKb2dz!CxL!5Jr|noVWy0i)9&0yfv3`RJ>t>6wZcQoHOc}6ZScj_k zJR0ddY@D%T-AWCPZn5CTK`)N2F2R*8WjHbQRa}`}jQi`gxUsSjwp*0Q#B?m4Oo#M z!gxUtJp~>#7lasV*)Zg^A)M6$`(lD4FAY6y&!SBrK#-(jExdUfl_(??CR0BLzDP2r zjMy(I(GYyJ43-7Cg-XPlPb-l5n+u=quqM-+q}MP@wwo-IttE{pFj)yQS){)22 zK=BbY?%H;^YT98hh4Oswt89?JO50Pmf)wx-dl}<{SXdTEmbD3ao~2OB za`DxxVQkz8L*o``>o!AEy$R~fCX{A2^Lty_AZqw7XMP}?8j|AU}JIIXRBlKu_&r3exCr4!)Lq=VJ zk$^~DN&vD5ph~r63Yc^{0+6M=yc`uYHUZgl07{XMpF{3B=;UYRceI44{4M$00z2io zB0twNYT9Uo(m5gzPX%`J@HL)HAdyON2(p6D9dUM*WO+4(<|h`}EUA{-7Q z8H*ECEm*pADTbCU!|;j~=p7tJXIC#{xQCI|16?Hz(@hvNej5ouv$h10$}*%JI;6ZB z)YX*nBIl#6(}00tGcWc$boXXq#k2u^%kr^$Z6Vgq=3>LF3L9tD*i2*FwYe0V)~GSs zw*)l=UPt3XtXihW_8BksOb2mha~1ZksKoBk3hZBD!iiNj+}jbs#VH-G%@pFw#6p}N zoQG4rFW`KC0j>>c@nE?gZ%;e%q_+Z(nk~2)bmP3qjnm~W9Mw3ntDq7a3BGj|ajYs! zU`3V>Q$-m@+#0myH=sVh9?6^r_!qarvZw*YOQOhK;(>+{xKbBI#9N1SoF>3VL?W&j zf|!DAUL*rp%Z;qiMYND?!Y(JD(92h#H=h7pKnszwkXG>gp}JJ+#VW00HZtiwn^CyT z6^4^wbyu}Ax;C&h!o~P#=f`&bZ%4cdu6Q$oRb6mJ8lf`yQC#6ck=}zsy$=O?!p2Of z(cUTuIq9^}hkF@o+fhPrt7v(pjyPj&jMp`aQbJN=B&;m5&0!S#=7yV;%eIIzXP9PJDrVOEMT-%vpinGm|Ftlt{~;%TZGZXxkN%q}+sE zD8OhMSKt;W*rHWz;CWO!?4;3Wy^uu$WTA|3B#uBh1~cQgmT=avh-tpSzni7EqC8rM zN?Nb8sgKZaMTtKJ1FuJ*wHJ2Uu7)yDYWJdoW>`+hm+`$iJbps98i}O*pSdDQOIGg* z#nMbqUINmnth4u0{@mJne+s_&GIXUZf^0(ug|2oK`}?4ck3pB3Kv{AMs@NooB9kZ# zPog9?gVOkFUZ+`9R;^=WxeCaxhca2l2EjKH185E#6G zI1Mi|ep4|#0bhmyOfmK*m)}BS`5ny5%gbZ5EkscfV=kd5MfqI7M;HnCo)-mx8p4m! zSDu4O5rF+?_$ekQ&jbIY$j`{n%Sy!3WBlCg#P*D4lM;vh!GJPKT*eNG)jdBpwKOZH zC-9S^Y^PYq&)MQg$B8rGM=%~$;%#edE4n*6F+4bg;o)Hgdi|rL=pWqr%W=5oR|SF)>?#nT=)G zur(WN*S&~U6N|BI@I|Z`nul%c3vg&pIks<9WA#J{W+#kT*>A-1P76lrO&F~(tihWn>hSS}6h6O}#3v^`_~xlF-r4QJqxEJyT33OaqlLK9k%ybr zC3qNd;vy^DAy%qGWhU$`FklC7tex5jHmKs5%8FrCK`Vyx`_Nv{k4#}3{Dhuk5u@9J z1{BN>V(|-BsI%NK76;%lv*Jk1%``lFz9+G0ZW9>k&6OKnqcdP}9xEuJC(nHrFcpaP zEu@hwVr5-KD^X@}6wR&VAq!H

MXQiN@xb>-b=#RaY|38VNI#mmddEW)H&XO`(#; z;f^*Tl<9&q)PVd_CyLAYb1a2~pMbB($Y^OzG6K^?9Cb92dPdA@6w?4zwm7XMj&c^Q zO`s=*ao1eM|3R=Z78}AHG{Y7`tpVDYKu&sKWw?oeE~5;ikwMfli$u*dpmH-}i|JH) z7-3xmogKN)dSG*u9@sh9En$3JEYVegyiD|eJ`GHaPV*nIBj}z3?nRzUS=LFStMW06 zG#*O?bZj*82+I;(g_1@bKA8a(RaWa(#-3)9iq*;BdYKdurVKpa5r<22ie7X-V?OGd?Ss|tSmqA2 zr9qy*6dRCw6uJ06+#M+Lbfb{Qm*?(7u6qFa-Vqe~$Dj$X7*+?EmLCV%7`QRuXAML~S9o2Yn zAdLrmoOrlHi@WQxaC_APJYHXnJF5zCcUpt1Vtq|{xRofxE5UMH(-+{FJ`cyuWjIoz z!+|^#4ly?DE~&zX+;&Xm3}dWl2CYR?@DY4^M%(fQjFSs!GYccoF7UvR6NDkp2dgHC z08J^yn_dyW1bVcR#f07xLQjrj zEYk5Iwo+7XrERgI8Tk7ZP88@Ij8=?=IyTeS-6BhjC5}ScUy(V?LL0NNVp@Yx%d|{!0 zwe+y_<22!yWL=4+Qiv0F$!gi5teRGyfWsZ;?|S(+oKRIb*!a-43Uv5taXIGmb<0ZE z?zT>ZmJZSGS{V1)7^Iq^X4EZoMPZ`#`kK1fK%^A7mk@j^zW-ta8%Y*<25z*ulqssx zxgo<{WaX6v8^>f?dW`Gf@yGc89mwVXUQ%RaBUnk}b@6-!u%tYK9AgUk)>`D4tC3}{ z!4gvq7MtoBcUzES>q3F6k6<1`sec3&p;6dqg2C#QsBT$}o`J1cF}WYp>rP-0X<<&RN%^Z-g$@c`10!tYiD667$6Ldo>$1u2pVB^QbLjxG2;R*Pbu?&rlV06WD zemsWu#uivrI{2m6IckG7;eeUYbi~=hS1^(-dI7q^#R$1_v3$G&9eoS1e8Pl{n?hK( z#)@6rT{v_&fYs}Y2|G15ZZE`|b@`Z?F2ttw1{~b$!l@GuoIB&d-d&~Gz0HK}vp&p@ z`mwq{h{0+rwhq=|*Jur6t{-PMMR9c2i{tC;xVFoN*N)rq#q9uoaMO)%oYdnR$1He% zw-s-$G2>oaF5by;e zG3YIo$Wzf;m<(RZ&c*y}T3;58FPp~4N-M@E(37J;Pxf0VCiIFcY{+Kh$u2R`;B0fH ztP;5zLQh?Rf>H|#)RnC84wRcBFjx}=tefC-qOgob>m)E~9TjfmNilj+XbD2a7+YeI z;W2&`7(B?=^JjQ0G%|`BX_b~VG}daAyP9DMb-@^HMVY%AYFAPjW>e;iqtq8uK&N)| z=Us%IBS6^6Gduo0Sx`YtNl&wr9$11-hMY(*>=K%jEZroaTOje3kW*GOd1iG=M~|5C zA7NL(h9Vy?X6NCBtXv-R@dD4=OSwE}Y%B^0yHc8?#qNYJ7*g(iW#Ye#nd4D9@a77^ zS0z2}b3;;OmE%yHHtyqjBc!#oENNi!*|?}HLs(c^iLa_m_-d&c3uxT3he>$pAR@iP zD2-Gj*Ad2Iix2rc=2(5mwR(6y*r@RR3+Od6HO|QE&e&dRaX?2~R5Rx4EH-#TVg7%@ zSlMlluO%h*wuVWqN&%ln$7`>Z>rF#u1D34|Vv#n41!ZB(XOZIqZ4?W2aV)CHAj{N% zyvjBd+Pi6_y}TZMDEIYKP6iRG8bf2pYAhSyjm=w6;lQCA*nj9QCfA%oWA_#WYGz=s zS_5nKMp){$!_u%DmCXm>Xg>;f_bK@L&ci==8Nre3h_1MYc~+~LFj5mt@?0*x!_>?)p*4%1 z<~F!_(+15pnxYrZc#z;r!p@6T;r7s4mY`IV4Zk-JD+s%Z869S4J(!trV%Nqn_H7Md z?+!aQtt-RSSRt18WuvQM9(o$*W3pe5)gx9+4r(#8q7WC)+i>B6fyIg)Tg$L@oeM8t zpTg;V-MDnP3wO^p9F=e0S_NaS`R zn%jl&;wHEkrD4t`^opWztJ0{<3&UCzgwK>h%v*zah&OMXmdBVh2R?ZiJ~yPr!^)^L zn~|$#lw~9K5-YHPPZlSbpCi4yMTDNL)22oifhV&$3K+k0O7#4m?3iDHtYR9HwEjhU zWYJIsjCm>p@=DAsR;aYnV;n$kk&Rzx(Xi66;uKV{;*_({APQKq)s7I#Juybw2rK5? z(3ldL4rdLZz!XHDAw>I2qKpyP;BRGNtaaBg7AN_;jJ~uQ4Iw8BBnaqA?9v-c*h!=l zv-{KWbqeU_p3}){GqMV?vQFJIqk9fH>4jAaAt$}93wTtD*quBJWkVrl0pYihU&|sO z3pM4?8LhB7-0;YF)_7b2n~X87VI7d^lNlOaN(v*S%$i9kuw&ztu7=AUK&cp9MI|a+ zVVFW01?a`@I2`pojI8y@bw-hGlR3Rn<+-K3rUmYX4qBBTFY>%DG_YZDhG|_cEGFRR z23Nb;K=8Z}a`N6~(Fp-x4&khpSz56aOtSE3B&y8zP@u=xDsfk)sU>Ni4tEfGLbRAx zpHu2!BjLhJDmUg8u@t&!pKgBc#X?zur96o&eGPI=O|-yvHauNWJC{Po*lP~-!JisI zUHddfSMI{rUFUH0jK~!cn zwrt#l!L}~M*@7o5ZlwHCnr0GCwoH}0flRSD6}%AT)&hhXBUi0;VQRGrn>K_nGw#Cb zWe)VU=3x0q7N#c(F*#g{bt`PxJn6)tElFHG+Kmeb+Hh)54Q`yR#ii3$y!%!Z4<8tD z>$(B2J*vgqPy2A|LKE&^X~TnS4S4HO7hb*FigzA$;@UAc-nbsdy`#nW1=Fb@2|QV~t40B<9X76)rci zTq7BsFMFTbyfgu$f%YX3D^e=2!0$!ci&Sg@-y%jjvAwL~GJ>z1$8v&@M}lr~Apw{# zSx!rq){;su_=X7&Mr}S)#)$v@2)ycV$vOvxaVYh_mERS_Z z#@NX`XIa$4ABrfe70Z-JWf~akfW%kGLma48tC|Lt{Bn6u<3^gHKDCf9{ z=TF(r(MV%*pv0esrM3$ZnMd3?in2r_mQ)5YpWpkcnzqV@Lk5P+LIz7%4{~L>d7cZ2 zzcT+>CK%hh0?2A zP)PWR@y)?a#`sB*adpo>mR?>N)*?erMp}!^l*6(d3Mw&Xz>noKn;ls)iBi3&$$EK|Ybhc%qF1-jFro-60#*ZU=!?<~&8jo)_;P&|_ zj_uIm>M=duec6GxA6DR#H=OwBsTbdPwHB{jPvf-*ZTRfNalCq~8jsEg@aUuyPmWdM z?COPh<4_sCcg2dIT+rityL0jB$`|qT$#Q%>lZ%i1YP`kP_K~U@57omsw|E#QRjaX9 zy&QdIU09JGz*1i;ma4lks2V|2Q8zOA)u>ekkyf+e(eYffB@LCEASSKWr1>PFfHuZKL>V?~tlUos>p&g-Cm;g zEfDxI>drmeW9%y+;8^D8mGS#!JeCp?&w#=2FUXa@BZHx>$StuElor~ck%qvC#L8Lf z3_$A-ql`rtW-N~;VU%Mq3JX7W5Yp1)D8pb%eQ8FeAS04YcWZ$m+)S{=k!zsM$+Wmi z#yOf?5sj|UB(o+6R#tWySND9li9GnvR$xc7dj`5c&U2;7N~eSj*Lg9wfH9R|<56Ju zV$QSZ&*O_(Im*WZyT$p1d|#9VogR8d!%ByXX(xeHLe{X$FxAwuZb+v<95SOw2H1<; z$(9A_Of8}b!Y-7Cj#gV>a`T)p=JT~>%hWVR+R+QK5grR;POw9*T$%yM@#1-)Q1}-2pa)r<%Xrz^q zg#uh27b*f%`CPtNSI798oI61RJF!3+*C$pe;A?JfMn^{n`uh45$c@tA1b9P3gdK}OZ*+7R z%a)H~#rPN|R*qx!)D$+(%wo&zdTigc1v_`{#NLAkux0x;96E3i12nzfR6Saq3A9?O z(d=#@>?3gVMk|f@ppF<|tIEf+^?t107RI`*VeHyhh3RQ8dV37$A1KG< zVbAVboI2TwvuB!c@@4Q+RL=QOF`T^Zo{?xw867}+}xqV)!7_;a)h>b(u^N%Da4OxmOmtjKS|p0 zhAo1JrA@e1IEveAY+Om34ozsfeiZ-+vlSpga zh-q1w7$HLHYJ@d4i0T{Quc(5vJcimNO}c+2Iy?Fl=*=x6DfZ1$!-~if<@NJ~!!X*N zC^eW^`ATVW#fteY;!&PmePv0u0zX-$<%Mi{rbRQ#XT{9b5DXf;kfX*6x%?i>S96Lm zZ!yclVti$>hCfRSR9O_@iQ#D&w+x{;tc*-%Mkjqdjq*sGwikrfB;CHrkk58Qi_3%!qlp#RP~7 zi_6(q$etPYF!BX%?i5tvdK7pwSYY%c%MwJ6*$|BbP-T}m$+o5xM2w4{l9xt?Z2g+;?7`%RXcnz8dhS1VKgj7=tj4rn_yD1_I zjMP>WbP)yGGIU0wuAgbjXb&K-tWp8rygUQGDub8vO+23#%$NDOB_1p)rR|kxkY%W6 zqt(o~-G-dXPDbw@lm-T2ubxDtVIV5 zW;-hC_rTD&AJ*0*uy-7XOL?|;4#B~Tn3s~)Yj($w^G#UR5_=6Yi1 z#@NZw6Pc+oH`Y$Z?a4r68DA&Q-6WF!sntC%&wFM6*dxmeE8r8r$>3!v(&}}0cPr(& z)k$Z>bKuPx-rU!vmv(%`7$zrIVn%spw+5TnZNScrTd{k~4(#5!8@u-I#h$~5aN_(q zoIQ6A-OX(nY3e|&IfPby26d(k{M7kUw*@6ZD=MlCXk1~zn!Qo#>w*`+H_q$#;5Z}go}y0dw%21{bt^X1wxYYT z3SDJ=Xf7N=V`(26b)BdwPNPP{#)p9OlrY+>GYFKV;Vx!lSDHX&X&5!}PHbMk7dvK-)F_L#A{$l)I-lot@b38sci*}b=i6svdZl=94nzAz0ic0DVu^@s;6PfOu`UfPQxFAHM$(e$T%Ci38=%zzS0HFJWkE_`aQHD7aXp+>0>g( zLW-T#cQ~9d6H;Pz5@Dq$^?5I>08PsC&!0n1>`saRP{!E_>|~aPjOml*oqBtF|7db@ z92^|{qt!hx^7ZA*mt!TZZfbG@Gt*O8yLJ|vHf_ZAEnBf?`z{>Zy&s479m3ILCvfcS zS?oV^3Ma2!#_3BJm9cxfW;de2l|YB987WIf+1W`KcA|D=H+r`)5^nIJXQB+dPc-uP z`p`OBh^FB}bQ6Yc%M56r)L`403?9F?39r7l9=D$M;Qkx!c<_2Nw(Tjw+6^z@)QMs| zx@X2)Pwn{TM=^ZolNi4B4Ie)Jh7I5PZY93;9Vgy+V!->ahVkHX5Zk7Uapgz?7j}hk zYSxU;&bHwvM?3KOstmr#kNx2q z9BydGx@ZUbjRc==5Ur&{Xf+O^!#aREO*5kTN%%?<@DYA-T^*dcVdxjR73jsBP3UMC z#qRCLaO3)Yw6%52JzuI$@p=jL7+H8z%QExIGaTA^l=kkAMwGeMIl{dsF7KEBMQtI}jA`nxa>FH${iZ6i@M@U)kZxLgaEYGy4)c9 zRR-zG?1&{Ykh)l-462s$XI4kp{pa!=deU(t5ms#OrL1hiE(`N{Tujgl?8NLWtS2t7 z58+sh6`$u%2JtGB_E|qzWbH`-pR7hC_8ev1^Yi<5#Bx0dFnV;t8KVloUTkCA<$ zp2q5kqbw(+~}~osC2lbnyn5YiV$) zMz~s+qJ7OK3~btgrWKR$H@4CMwJWdJ77QUkfzewn2t>om7*C10^7v}*nQ*Q0JklFZq0H#SlHzhK%rEEp)GN@N zPw2@KRP#%`cuB?gpiN*Q!MD)ZK{epcs#RD+t6RT*odUb9+h}&XcVqwF{WyB)C{7+d z#mIRUXD?sD;S1++^6pKXesCLCA3Z=X|NfaH$I%e0MciEnpRW#uRQMbpEw9dpNS_~r zYdko3wh8NY#4xqlkG?e(XjoQ+>OK{Gb&Ij~P(98*oWkWN8}anx12}nS5J#@pW9u;! zj$X3j&ch%+`Faz+_h~CWeKUb?zn#E0UJc-zZ-(*VD`tH0nFT-jo(tdp$c@LB^muY2 zfKw|BxIE^8X=}s05!px8e1aMp6=j zj46{%X7n__GFG0VGTNU?qVL?m<;8{kn6S%H>5)eW6&bB_56WdZrD7dTuLScJ$C))%I=1m@wbd$(>Q`Cw)&9Iij_^PQ&tu( zk|oroXq1@|GFbTefMJ>5CC}w#I-L~hh>+e^WgH#js@UA07G;=8mOQJQdwwZqC%}^^ zyCgrKF;)dFZAf6}lwf35)~@p=@+>yRHzAvQ3E<-7J{+16czUJKL?7yoA^0c%zk6sfRT zVfDI|V?mjQ;8W4|nDqF$UPOn`wnw6H66$eEjGkg7wilx5$)hs4HKt; zp`~jLmB=pC@w}DOkj41SP?n%7nd{AcsWe1*Cun-r%0i=so*t-!Ln!tSF}e@X{Q6Pg z8bG0=7bX0>#77AFSD-L3@&BXit>d#g*6wfWEv3a>f+QhA+}#~w#NFKpNpN>}cc)Nl zKuf7Yaf`b<4yAs2&ROre_D$M8&-4ENxITN|nmc>;d}pnhYi2OWGzCK}GBDga2czt! zVXR{T)Lmvli)v4YHdoJ=&{#3!p0AdVEqS&v2@o~ z1jR&QITfIdwG(vB+%VGA8SnGv8#9IHE=Dl+Gr+7BQ?Y78AOge3z(0BnLbEjxk~aP;h4tlk-g#5^rTB=^DE%?7w|&L0;)^1}D0J@EZeCtNw= zjGHIiapQz1u6^K)E2quy{YfL7-))Fb)>&bDiXk=!nPHQgDb7cSn=p=k>LeOeOVbcgttWonq;b2STkF`Ldi5s$58Db2lAXY6BNfS~L zI5q?^=82eU5($U#Ua;r;t;cu~dXAXb-wI|UX{E-w!d}}OM&le{Jkbd*c7e#vnu`k; zzDHtGGMrtd+^v+bu~RM1s#5b@68_Cg>Ev@m!|U z$%t#usOw+(bX0M#(X#d&C0-@uUg`aw%GNU8WW3Aw52OkjN!V%9{TP~3wGn7GGFw5Z zC%jhDpH0xoHIi$lY%@w=*b)NL-!(={AA>bywuGTF^-U6{-`6n28^fr8hGQeEV z5{5Pys&7Y|=MCe)SUAQ{gIP$bvPt-B<4o|LmNoilTB7#^8SL?Zjw|cev_*)Wy9RdA znHXs9f_~=C7;5jWY(eRokdANyFEnR5j9fk0V6aRyC7E0u!^e+hS&fAHBrPS>wB)%l z7&vAGRp?m4SRHxm1O9I&{NH3LdJWd^i3FmoQ>j{;kJo_zvkkc-+Y(3~pB@$a z@KNe)C=* zjN-a^xWhj%0KqR|C(5oz-Tm{iD7zlm{jKb>k)N*`oR~d(Hs&puhlPs?yA{i@a?NV2 zU%vsHH*LlC?YppT_daYoZ~&W69>x59+mXI<5qwkQ5tI~~)Zm zf$_1C&5iBhG7j$kgW()H5U!C!F(p(v_5%_AKJMJD2z}@{*aBI5{igyL#`W8Q2+Y*R#OWbj6 zx*bmCyW?brFLs9nV56%)HdzH=r%@trsIqO6`r;i&R7Rx zw}uTrrf}-}U*s8Mv*z+E6WZ5fQ2a1(r#kRDyR~EB0w6KOQTltClW~>;dP}iec zBg952b319HXypb;S_&<#R8XJ4dePd6`)v{g3}UUZZUl$o=n-RFroJ%=f4g z2NA|&#!sNS)mLCA`^EgV1QY9sRCc9q@EY|r404pN98)t3j8iA**Z{mgT7xQTu zAhd@QdZQ<5V$4K!jGUkbwTa`E&@tA-L_=K+Q5%UN<3#OgU=l@*G0WP{hu`7q0v$Ry zQG3$Wp=(52&(~!=J^`N{uZ84%%EF~Gb##(GAwF6U?-O)y^_{>*hmF-h>Dy!Z4xfZ~ z#u%f&rX3qOabh0mt3fq8&K-T&c=y%tRifWSKlCFA`)LQFpKb_ka1;g^#bYRK?r`%| z+W8EOwaJ0HLjg2hW>f7ggpSX0OcLmgnqb14(HbUJP73&}C`U!@^(Z~*lvITj=-;78 z_J2eV@Wk>;062S8gWUGm~K#6$(RlXQWXjnz_0|!*43oLL)IeFbIA5|F~zw zVf%+WP%t|bY3X)I&2xm?R5e)p^oQ-_LGTRL#HVUZmcmjX*yxm zSa&Q`bHO|v2h29}L85LTLdFLnStkMUdWo1~5CJm3$!M@Buh$5gSKl8<0MP#S9fn^02*m!Q@y5VW1ma-4(nn@Ls9^V6pTPtZ zmE8OMtXNVRU8MzmlOPk|NhtG&88YhH12_pef8%`_oDenFhatzmL;CP!lv-^2aQ;SZ zT|L6fT!EWvI+mzlNyj4WMB$0zvmx|sMDek1@qd=7V(QxZN-}YO+307`SoBplfT2$? zi~}NR&7APAt~uV*vA`=Mv{m5iI~H#Zl^xLdc9zX;(9J@_}ttv zTDm3(EDb|FjMLFnU^iB4JXQKAmeE8^GS$a;U0Pdh4UC#N0fu%qFr(88WPSAr3dBSc zS(rcrCanLmK<^}Df{*u)^aCqw;I-*VQIjca>awzq^gj+D@MH}>IS$gKve!i)Nl+he zh5i~2ygp8B?CkOOCxTEodE#B(`~BFEzpu$4{NAVf>!%xvfdKbF-VS%MQU;a(lU~fm6wj`Gx9LKFb@mn&%xq_gdKsmW{r%*uEVBHo3MHFX6)Lv z3w!tO!~VSou>bf6SbF#nV%M#PPGTJNQj=gzTRhP}09up1*)qCf91C@lwI$S9Nhdhk zLz9JMLa2Xx{ReE`u?j(97Vw;^4YxQ-^blRR`e`CM!44k&#yomq$A;P1vtkBv{oPSG z)dhtSme?@E9~%q9ux&;tRuF2d;&iYrc@lOf>El?YJq{*YVOyj&4rl1&!eT32U+0b= z7I@-^X+ikZKNO!hB;lNC9=e^$yZ6NXqu%rnANS%~~(6Lbn)=Gfm_^EIw_-rS5!F0GS%m$ew)FK#D zOs2p=*B!be2{NM~IJ@%((V}fKVcB{IV#D5pIC%0D%i39KnI$n<`t5AxhLy(zm@Kz% zZAsfpu*rh0&eH#DV@c>)!ATZ#wK3;SF3TOU;@gQz zcl+Vh-ZHBCH{f~_sC$yAWw8Y5qf`3pdhD#gt@pclo$=Z`@9@|gZ@k+Z@AQ2i{bg|s zSy^ZTm7Tr;%q*;w#W-E~Kgq^ZQb&}LZsnZdTR?&8>E0}n1>}`ysNJ=KrzC~8NFG~y@rfMul}R(8e#W3<5fcPjiD3p z)<|8dK6|{bW`$vve$eoVfd-Y`SpO)Dql&Z5EJVu2eaPH=5Dv-H(O(u>aQ1^CdxSeUciy)c%bnrLba9dl!-8)#$PBn^zyRmX%$nwX%gLEvdX-@*tc zcIMDA<27J8+j_ZUYHSoHhXp~;(i8?(W>kceV90YFBOdu(mUeUk3iw1Asl<`YEHSqs z_;gG#aD)aMld*W4fPZ^175oT&^c!n|KBFxNdJEcJOR9Vu^d9YiciBk4JBAH|nv6rrpAmn1aa{<)d=Ri}LrLmmXb-W|gw4L}P zU0`hC%$v_ixjCi7)mc;>;U}_O`tJmK)?#^G_&k5SjKE6rus}~17?7D6@|-{}I5_yP z5!W8*$x$AMg-0SJJeJBY5iyL!m?WgfCnGyG4S6}4$e*5r0s?Q|!dY0bkm`=mTeE5< zwr<*tjq5imHg?y}J=nYN0QMa@g8e5?VAqKgn73^!{ALtFFD@2(X{neH8;j9_Q(?rT zbzBr2qe3vwSciqH3j=!#7L*ap&5W>q#ZoMrKNtRfF7OUAfnBf;w1XzXDa#R_aTbV3 zb3s^=8>WU?VMewO;=K)!;c15pTUO%9olo)P?it)Uy#;qpuEW)X)A7yLIQ+0B0XMg% z;@VmQZI(YyX4&D>A{$)a;Ec~_>fmtL2<&zqh2zdPIO`UIk1a#-txFotTSw!Fwj;iA z@xV1tZ+xI(hNBa#ut(hjtJEf8tCc<0S=(Zzu?1F}5xVLQD9{c-wpKKv$HgGVAPc@a ziSRLqgU$HKFduG*Df+(f*Pns_BR^P-v4Jtm+=9R}G7n)3&kAAUg7LPla0-vYqRm^7 zoSRE1xv=c1UZ`B`SjKh)p4d0Bw#pJl3oz*0e^l$&{(7IrI&Tm6;3Ur~*fhRmYG~ zV_8u~(_*R?ICzsPPzD!NA?YKkSrD(j?0@^;(F4EN-t9|Z^7!(xq@O6cbyR%s_U((l zvS{a^ffzDkIMgPLr;VMY?Ea_BH3uXTX+xbbz{Cx`^&K$M(O*$wx`DA6>k$b3pcq(3r$WmskapJ&?@Ta2 z@9_qBcY-0_Q#YZVv!Ft=!$>n)JUtH#FrNZ7?^En9$*jHs|+fs$-0i0c@xE(ET`Jf{`B7jj8sGbqPK##zaie z)4&+5u}WyM?igAd!@$}ER&Fdy4;KY^ei6a2^K^!;xgm5+^kHOWLhu=5l8GMl`PiH; zL9UOC>`ExAn2o0a;X6is5(W&Xg_qn+s=W6Iz23tOm2D&67-EdqhnV8cVHS9Mgf$^( zi+8E|dXI5H?{RDx#=8-G-n=dZpJo8B4Hd6`1cn&JV3>I#8xPuCyDW@#%4dTz0~(&9 z_LfpHkF|rE7Hy7&C*`OcZ&q0-P_<^As^wCtK4ri{K*w`|pE8)iR!ydt$#gTxvy+5k z8GI19Nxz)LKUJQbON53~?L`rOajd8*iAcyyMOt1K@}^J2teG<~Z}x1gTDBAm=gq^q zwQI0p!v?Hdw;o$JZ^fSNdvNObhuF98D2|=^6noBmgoOw8AZqm@n5D*2zI$WhWIq;i z2>R1Pjq&$|QG5iPQ=*{hKr3Z57Q-eCfTsRfsu)WYw_?PmLn2odmt^#8U?9Fn3HOPEek_%`Sc3ZezgyIVjZ87m)DH8EJurhZzep6!@$*GPh&UK$)GM#5P)gsLtSkv8E-aEyePwj1mTJCo70T!#KIFq?{rmVp?kLzQRE zO5rvcu7qCVw0t;vcq$E_Kus2GRg@mVWh3fLmgIMFQr1+nrkXJ_HiLnoF=XDTp@}iH zsnlfMHvxc>*T?%uRs@j_ehaaC4s;W;G?4@EF?$*0;&<5!40(U?|Fh>~2R`pWD=e$N z$ltKAvV)0*wX&>{u7MG>^b9ag>!lr5n}7i$#^C)S!>JgCD5)}U%b*0+o8-?4%zEVu(WkhS&vSxOFhp zUE}$B5_E$z;9jr{nR`zmY1?5KC*(qFY61+y(lEw35W172U_3P*o*9M6-MkkI4jhF~ zLINhz@r;#ytSMrg{3m0g8KFg38Q4;>Dd3w3HJ$MoqdktwPaRq&x`dk}rpB=@C&$4r zG8Fz%p$Lo#gKv1CA|eb4L{nRHSUOt6h~K4WtVc1`qxfDn400`HM3?1kU}}xAng+@o z-TqXteTK=>^4iM!d#?`E!)t>K2|g1lT(g((v!d#=!`ox*@y=Lhyfe<74T3k7p9*}U z_6F)lQticIxJ431*jNGYyw2Z(C@MIwYeSLjps+bHu2k*3+y1{_}MS9<%2VrMkspa zq(LjnAA@}8`>oCKKEcqRl}0n1ty@kuENGQ&l1ryC(L%>WDYO>pac5w4$~hp&$p z;?kiR_$P{BFV?EtW22=j7MPi% z$jlP6Y^NYY+Y@;jQ<1M7hb$_*c+*^X8zjP+g&yh5>*|$;WCAaMiokZ94P1?UVP)VC zePe%&F!aDkD{qXmpRDWyVd)zL2mh&X^qx#85s*}Q%DhkMld~6f&wIh$0p|ARY{4gC z{6uwS+48}|hENqz84c=-{=@oX@Tfrqk{U*iACB>?EP7^otORCCMKHCsfEg8@xt%Rc ztqBW(nu9BB92qPx*;-Kgza#{La#S{$H5X<0*YSO&}Os!a6me$ZWvw$|CtFEi3 zj1&(iY=+Pl4;(QPvMS8`gNINR4pxqR2{cs<@_tzwfAn~DWmzXlDAtjU z?P=i!aJIDiR{Xn`3=3WpGpbb^DqjoM4-@_`MzrU8ww6#gF~Fd4YItJ+ZS#PE=p}mx z4;+C$W3;GB^zqtQeY~wJ(DT82W}bLO#{s>xZ1JjrE8eps%sir?PJrn}PlrwVLKsFB z(s5*Cf^RBDy0a3yQ4R83Yic$|I>kcGErAVtFb3EKK_@H|Anmzdhrl!@{LE>EO{w}s z?HQRHv7BY2Aqpp|Jxex%Ql^r_r*C4d%sLq`LJRK>*2EkAHSqcXExa~B2d@v(r?n^e z1{*Pq@#aujJ;oAm$&&NZ*GH?{d#neQpC~>*^wkbR|4CsOWEhR1W(hCVUN+TU0VcT4 zhNkC2^ctz*h!M0;qt(q6=uPntrF>T{5~vz=5=AHdFTD9*hJepn5)#LbRlp~Z6PWen z+zI&pX=i&Zt{er72?=pX%Sb|IZYpw%a#1+92(uT=#p1<_v1a*dY+SVgo7Zf{zFh~f zW7l5nI&c7ojvvQiTGtFH9paQjGl&&%#aCio!J0 zEToubAV@D3k&cCYHPzMU6?{867LP`jXs0kMBkxrLql&gCK?bL z6Z&C40Rfkbu|C-t?Vf?5 z_6Znd6@wu*u{@8%xXIa=7(5;0{4*J87$1-f>-0H@U4H<9i#8#V@SAh;bEItC5A&#W z7zf3|DJ}z1GZ(>nY8W)EEMY+9W#C`~Q`r}Vz_amkf!ox{@Qn;&Oodm7FQ0RQsk04i z2{s=-mq6gnU9%LKv!)?1HdFzgw&^5Z2Ll+|7-O6+mEpuO7*B<$$LEa*P+g-*&@s?q z`Op@#(Nk=$PEIgSSUJ;+-)r1fM6~9WR3$f#|0rqqz}e0&OnU-e}uQs=aAcdo!WoHV?f< zX*y!qI1Bn}9m-G_1ct=GXG$QIoNAClCJ@VHXld=m=15raIjxrq0(f|MDEW2*xgJaV z|5AEleI?&6D>DPra5o6bkq=JdgO5(*!o>^Ndh{@yr{!VLWN*A(~5-nxMAyxW$0{fM&7KsYz19lkQWR6rD+&EJrE=2PKD<3 zU}!Dzhuh8!80NSleqJ!X_-YG2I8cal`wMV(dlpWuj>U!D+4$jT0nV%m#wV+M@y#Yb zTv+XhV>2}|!@C#uWLx3u^-=hK^JIKH&m2c+jgO@m<7Ad5Zf|u&=@B;^o~DLfVOluu zM;mNrkF$n;_=2s}DMD-?!M4pP2FnbRvCuLe3ur?#tsM}k>5UlU6y(}ZN2+-mqS>;= zm_;GME(S9KauH?h3ts}ybDS;Qb=_cL=nZuvZ}c}J>>L8omx{hWt=ec-UTg0# zuMgCX`(muYyBMbRI)+Sq6>3KBVuE>pj5d6i(FdcA-p5d#K6r1;yBMr76k`p?W0H+N zOxX}wI@3lvTTw-^Vi2-6w2QVZTS=0aJt36fy{F{9{R4m!{{*7|Q|7Ol({h`bo6~Na zDq9`O(b&uk#^x4Eh>{b)$$KT)T0%Z6BhCUmOItf-moZUxR)n36iwA9+s{%V+OWAnH z0{zENQkJ_Kp>NLr%?d+wOweZ>E!s#;hL$oV>}>+?ZA~k@u4#>4MAH}>s2fP)7Q;mDDr_~fHcaQ5su ze0=sZod5PKtlhH>rv6^&ZNZz%%>`qlqhL@}2m>lP{SE72ymuF@jva;J_8qX^x*g7| z*1>Jo0vPxO!*OajCWl2Mzo-Z;wWT=o(Qz32`9eE62@zj?42QD^pm%U31}}=mfIK^l z&N9QqRBIF*n1hPWn|SuD0lz=*#EbT7G~GOlr`6w}?cO=mTtA88%R6xGt0ma8$rE$) zM`8a4BkWyfgmastaA<}f76<9#NWK?N7W!d-t`!c=pMdWUnxOh~cYL|X45vI?amF+X zpIBz%lu0~3whqD}V^{1lpNu62e#p=YMutfk@*Uh!=D8b>UqM#XfmydFGkwR7Wnbvo z^@oo2d(gJ&11-i_lh-ib;!SAU^ycv`OtgCslUxR3qJ4j8*baf3`9KUMCHj{!Ek?kvkUO}+7!i6`FBvO#ZM zJG^h?4lTbFSSHScMbcu7@y(&lO(*nnF^JF`I|qyFN9X&QfMSBf_mb7 zXe7?T1X^LO*aA$9$cB0945aNmjujWKAaVOaBy8P>%)N)(sLs8CSsELB$%?SES$`tYpM+m{RvQ~>YJ#q34PjPLo)-Vf6u_wfPgb+S&}DY zi-;;^aE#7ER+G`1WR9Vub*nMH8=DbPn#=A%-RH>UML2O+A~kWNUIDbS$VwE4D_04;08@SXAD;}q6IL5 zyLT}1rq6~OFOsU|6xa#;B)xz)4S%4Kv5BGrWfc@rZxRB$9>9sJ>w%se71eM*nbLVjG)3f-LarT>U zFmwHCjAMcHu`^}CN5g!{ z+6H1jE6qY}S1hE0E40c+xs4jv= z7$T63nY*zMtgNRJKHeB=F&RT#rm~^%#oJUYgFM}#=HUjHNH!X5`E6*|oT&0-OFvOL z)=o~aB4G54^e}wnU`9V^Xpg{H?Lp`_suzZgV-S3LRs&#QGZ0$lqVnFt(22b;O0O3z zyoO_v13~EA4~A|7q2ur#bR7Fa*Qp=W2*NRz??BDE560QNkMY+1Fy3+ie`i08))PC- z_s|ln>k)tkV_{%C5vFGP{B0(%wvr9rEos4}Ar=*=YLHcKMD9}pOTeY9ULwz{x@LNw zmr#`A%aG6Xe6A-1esb^29w7ofBVH3tePfJN*T(SivWz-yzJ)vb8@u2=eFwa6;D7-p zRBRR=7-H>>;Q~7cf7(z#40D=_F`iKv@0&=)mIO7wWK8f+f{tIbGL6hIBniGVRw8cQ zAp|Yk3+%|KLcI)VtlfW#;y=cmIvcrxrf?J?14 zA`Gn!*eK}1E6@Y>9@5{cPuNl28EZ3i6zCaS7!i8<(9zR^B^9Ovt*#@>QI@|~fzO%B z+@ANDDf$mpr{Wujw`Gp5GEY~vK!LI{%Ycc>)U~$tXv9uW(87kn-o{fd`7h)CF-TDZPtM@$ufD=p7r(`+&pt=yl4TfAJFA`$1>M=xVZLPp z40rE<_QCxyJpKV}4t@yhEr(&SXakI9ErVI!3|JC)E=dV+i3vkMZZga#J7fQ${b;JK zz`ot9;TGn?meU_j+ZQ3}+G%)vwh5kJY=zt5Ik3ug#)5tMIQr#o+^zW@O%2zm4zHoE z@;XXye2L~;pQGdEIdtB+fZ_`Wv36EC^5QL#9jA{i%O~UD27jz6v_jrgbu3G;(GPUu+b{~APF$CvQLvY+Z5T8s*$H(3oI6_G6HuJ+yyI8Cx{ATDTp+GMe z(+rA`VvvGV#{i^x`Xa_=Dzd$jQ9#&bItL;~%MziZ^bk$^6shkDPYc4_CIF-D0x*D| zdEe0&eH^{Z4pFQcaxws7W6CmX%U1bB022TU|!<iFp3{!g(BgenZduIT~>%50i8gCI)1lgo_pkpx*+Li-h=B5S%$6?U0 z=nDhq5is){3mv<`RDFFg((FwPH+!AOH=t(O8|pUwp>015lk7)9-%bsNcH^OMF%Ehr z(jCIqe61z&!~ zBr*e5v6+aVkceBh9-#}C!7Yc@*ux7W2$XRw)5(bmm>d?h=OTW3Ho`KK6{{N*PZyUE21jp47+C4U*3B9& zlbzt^>w=)D0NBy$8dCkK>x_ezo+cX)HdanHFtd_eOhVF7M`^$u9at8&*2+v3D{EWG za$2Sq4j89lfWad)@s6y@^4>_i`Ti)vZwy|4UqVgk_j`AU4&EX7-W@KRh*R;6vQu*S z-dFd+yOPSH<%j-y!5C~5P4Fcs;8U~BL9a zgq7KzSe|Q(&GS95ex5h-A|_!?nj?0^dg5e21P(if;Ipty9PtUmF&|$Xo*aU$o>^FE zlaEa8Jml!lLB8R9WSV3m(>)kjzEd#Gdn)qW0+4Luh8Pn&B$_!RjS3^m$`{T?9xx{G z#t?Y#JNl!yE5n61fs-qSxe$1?(x#JrXklC+8~-@cS~%I*!_k(ih*r-pEEu~_9>&J~ zJF#~A2CUn%2Aj65#KuiauwnB;Y}m38>o+gL#;r@SYu{Sz*h|H}YbmzwU53rO7h}hv z71*?QF?JnWg{_B{V&j2@*mZK15*rUL!uo^rvHH*~tUOeNRflF^&EeVDaBKlKA76s4 z$CqQ<(G}QvYz@DMv40JA?&aUuwHDiU@VH|gcJJPdt=l$W%hnCpyk$Lu=UX;n`}VDT zZ725a+k^f4_cHb|_Ve*R?A^Op2|1VNZE0(R_f<D+;#+WHtfgDwR=#shOurRW^6owg7y1RxalBf z?>vb`2R_Dv{Su#G?WwPE;L960d+i=RzH|)-zxW2*&t1gUuP$Tbx7V@e+nZSZ-CeBt zz65J8m0{hba;%s5wiIhF-o=W~Z(!NEt62HvO)Niu73;pZh8^{5?I}Yr|mOb0B zW$!lZJ-#2uJ~^qx@w2CK^7D`I;l;B!^Y!OA_0@Tt{PHYLvVQJ6c@W1wI*Of#wqwIi zmdT+VICx?o4u5a}XU?6%-orbwar;`VSi2Of)~~>}U0bl1e{aX`?bx_^9oDYnJ+g5P zwzBNDZQaE36CP~Erp?>1VbgZ3Ubhtsm##(8oMo8K1|omfQcR7EI^Pg3abl zu-LO3W(NV+&d5SoQaU1&GLV#=kMx2<_=g1{ zH7OD`rI*lLcNyz8q#=nGGk&fuyk|JVKGzA>@wNyk48iOpD{%b!hd6y0H(>!2_~ z7t_OC5NN55tf`jRH7^0%r>7#DE$^O?cx?Cb$3}Nse3~4J(;-3F?e2zU_Qu#96^7-L zlTm1&gLKnEWO?ODGK?9FRGB^K5$VVGbW zfRRqY=u3sy+ldO#Z8G|~xzpY{Let$D7CxS^_hjpCOABYg7TVqsP6VFJpOyV?qf--5 z)>4P!#wrxom7~0_3{|zosI0z?s_Gl4sk_7TyNr9NZ!AY`Ln&(NOHk8Tf=a$t-EbGB zH8)VvcpIhlH&EVq6SW=pP~LnK_v)^oy!9rETd(78>s8!oxr)1t;?^4|k!WT#-ax4Y zKU-0Mi{Htpy@iUZoBaE?P%VGEsst64B`B{bMp?Nk%FFqo)< z@o}YcR6SS4^9nv!{xamef}g8EX;~R=-@AvKcZ+fD_C4IVSB`sC^|)P8hb#9gaQRL- zez;qS>m{|gSz3=Q-kmC*5SK*^|(~pgsW9;xKh!IAIh3>r}jST zI)6m_;}>Y|euT2dHe4;Q!}kQ$cNI;zRNaBA4Uh0$RTqA!dxUFE&vCQ;XWZ)i1-I`1 zhU@o##nt=2;D?T%aH;hf3O=x)E0^s)})^ zx$rC924B00Yd7xT`yX!N zt8cI3^NT;=?1jtt^o#G6`0PtQzIX*6eSR6Ae|Hb3KfeWs$w}xv!W?f6w^WjsC5Nx~ z1P{EgH3j{20tvow3^k6$D2o*IGPm)jjQ7GMBR48NH<%EDaY=bdBQWe}ePkDPnJVNU z{d%+oh6cuPa&#r&RA3Xp{ZrlbsJ|XtE32Xu6%}Fr{P~K)Teofrubm8w;Oh>T!gObn`mpef(=`9 zuw+XlHlNDDzAu-vWln{6s1=Ilr=XlR_SgSB#1B`_;@QJy;MZS(Uw#Mv{1f=?7qr~F z4gCHS{`|2UKRvC%j;$G(TjYvmGkvgYc@z$>iN}#u(I|}8Lsp;x0!$6yG*%l0zCl%8@`(N`b4r?_B$U@*3a#bRS*CJJ0)kY%5UEQc%@Nj`KuZ2CWt*mP6Kr3NyMbOd8NvE=d6IH!~H3B1pQQlO;3Rz8XRilQLp|-IS zwe_W_t1m`FLm6rb!?o+PCyfQB_;PzR>tqEC|3YiTU(2|x;iClYHF0{YHC#A`)5>DRr2%YC}yK}r??om z2#f1?igB~JLV@0`@>;@PDR z{9IKNZdEkmR#^j~(8BA}iRQNZXl(94MRgPI5_)&}`t9;Y0;qvNX~x~kcEYC{#f;lk z9k^cEhMRSrxYhgsw_6|MPU};Ycm0gAj%TQQ_!GK*`Zu~?{0mLpPjS19Zie4^yR;fN z?h@d|YyfKb8~MN8xpND5@7>~W=aJy6sH;LvQyr>U&ZUH1aV3Gpkn?huaYa2F0hV(q zk8(eh5P0`^&F@yQVXL}_iW*+$8os~sjsm*d#n*7{)+Jt->%4~7aqG^tmm%jj2s~9> zCv2|}d_P?M9@nn_pg`~D%^SFJgRkGbjq5jVDe${|t_Hc7rT*==f_{O{7T@6p#+$rd<8-&3I5g1__FYD>~ zQr-E$&{PzkCrmB95fz_y^XUipgckMU#jo(q<*WGM%Wtsq^ci?8 zSp=6I8)0>17i>Q{4zm-7;QZlf_<#5*0`?z3*yiQ%DoBJ!N+=R$W+Egl0twl%RF*NA zmJx^8AaBG6Psa4L7#!QZ5p}mNqvrNyG_Xawe)A&EetQC+d~*cHPH*S$S%G86wxN{@ z1%Et1J1bdRYZdU{KY$lM<6qqm@cWbd__4blRo6bjk4@j9@`pXRczh-ftPH~ZWK*ol z@x-akStv>|MQ(x-;sXs4=WPpLZA(OIPsUu^Y^-(5#qpRdoa65+U73mxldZ5LXd({B zJ2431z)UP~oR1>Ag~+l^L7sawW(7oIHtpt&IDgEDb-^@0ZKT+bLW;FMt*$##{DU!h zN(i()r(%HnRJ={a_x6-Xyh|HA&^-{tT_-DPDe7*nF!1q!xtBYvotI(4TnVbb9>~=;5IO=oMtxHS z>YDk!1{L(G8te}_D$0FSV{eTRzV z?mYrb1wB!Ew*(+1mAHJnl+a_`y-U@17dMIt$kGZFSJW^F8UpY--+QI3iYl)XKU7px zaq+o|THGkF;p;UhspNe`5LOU$ckWf;`t1sSe=VU(#mM8$@&+~#52ysG2p|58`nJb} zP#f-03Er+GKr5SZv!Vs}YCEaIT2a;d0QZ0T9glwg18onVvcYLVDSu08DgPdS?`;CQ zqOuz0gnBU>gW?J{F11v>4K*yQdMb1RQ9xG4YgxgvA>=9uy=sE4nhLO-jY3M(CAR+{V4q8@P7!d)&Tzjg7!9mJu5uMln_1?b|mMuw5tMt`Kxr zZd@k#E)#5*aOKKnD!pq8^i+zEaa~m34JyKG`1;$+xNz}1e172@CC-x*=P!Ow@G&l2 zqjQNwpW%jhbBH0{8fk&I$JpZSagOM%E{lr{`>Ggo_e?w1Hp4i&l+}!_RYh^mw&Ye4P z;J`i{J$?|UK0K}rSbX;R7x?1)t2lG%23CCd86vmsgztgMe%TIXl`~hY8P29b439U6{xcuE0Xkx{BNF{RV{4tc3TtN*hRV!`IW7?t) zTB-Wt%eeFX30(PnGp?OqiG|rVi1QkT-HXC-Vc!yLpO=FCczeuBn~YFLdnCJrAju>Y z%ROgcv*#Qf@Gr#IxzlhVHwXt~CgDJ=4t9hZ;c#*Q)`x^+_0$5|*!jpXnt^=hcr0V% zu_-$gOVb<)Q)A@v{V6_bh;!0J>SP~8O$mdCZx{?F2V*FsH{tgdANQUTiuc|8F^not z&BY57oZX;HK$?1a!Q9mi<~H`Uw61U<_>{S#&N7Y43IgrT=FMMOOV!s@sYGLIjS{tu6+Bj>zNM1tp$v5`w=G6^&FURE(8WmlbtXjI{*j%cx{U6QxWZ17Lc7J+O6pn&yeD|{;t#y|?Z3GH_-9_{E`pyht8PXyfq9SM zE3R%KAgM6xn^E1~N%*y)s-=zUtceX#E9&c8QC-!@>%ypSWEnK0vbh1Z9nGj~BXn7= zVsj+~dKGd#c^%7nO)IK+oe8>1K3_%13H)kU{*^5MQocu^SSo6dQN}1&g6fRWtq;VYcKa1j^z_}s-yw7XZ}a$_j6( z*-`D$=Bm5ny@@```g=nR!qLmh&P%bhMrK|xwvdc`Z-hl>ATd4n|3FXH1k;~nfS^Fh z&HJCw>p6<$O-!UM%gVy^>C+XJC$@I&+O^oab*s`@eC+5Ee0t_Xocr`Md?CcXd;`ZX z-@=N|zeViP58$+8H>~&W!=#NHV7_S+qCWTlX@`#@aqR|pB%~l~<{V_CB%vrf4b!qx zFry$Fd6`s}$x+Cr;#;{iAKTW?#=#wnaq+`l=&ZVq=WW&a?S32Z{73xp=rR8F=P&p# zXj}jI2|rQAUA}$^7cQN{H#q;t)T!+_c3=~}K7A0y7d}Jz#jjC!?HZb^DdX>a zi7gun;A}Srjy9uk>hKzDS~?f0LBUuvEg#F`^O0edfhC?Ru)=jAb_UMJzVK8WO9;nj zdBHf7<$=SAcGwYNkG+Wz*c_dN&HT(PuLLae&BN-DY%B}%!_FLk?3w9~C8=5{iXDlp z@Ue*Wo`mSYDe&@+g0X81#(0KdSYR{;1Vy6vTC^NJ0pzI8;kzK-iM>36`ENgngt-Vgtc{4NDY5S z4FM>suc1X{dmCG8m5_6JPGD9?c-0eLjqTNFXruaSAq3khXmbflg0HcyoSz{G3B$VP zQU#3FJl3`nlx>wNl}F%JQazT7g`$P7lwc(+Qy?esBLw+aDwZmEUP*XXso+;%Uq=8o z@K{d>inZl$XQhwBleamtfZ!$5b7f2+Hj+`jo@p=HG=AfD710{Q9wrvd!@JzH>z53i|@O|_uQ&! z!7Z`61ZO#c)&Afqo>95|^y|OT!Sf1&@NQKDZu9k<1m``zSKxTBp&6xZRDsQnxKdq# zyNwN~YHuf8Yk1unQPz`NUAhkG@I7vEn|%Laz3ul(h873fJ< zK?Rju6|ZkC8@LAA+9rOcPRgAPi}D`n)=(N9mEtZfyRAgo%cvCPO?XzbOsWaJ@-kXy zUejBI*tKg{si3Liq;KyQ?J&W2opJ5PRYFc+M~i!dc36&7d)KdCBlxc2haaxs+i$-k z^uEN|^XGByi!X8R%WswbJqNEayfHunZw=MQ8^et7+6Ys;KFShriq(B-bKjph1p_Aq zp_iGJ8-eFemFEpJYd@IVPC-O$7828PRPzoUWU&D|#ojtOIV#Yb8rZXfN{_u2g{J~w zOiVP=(o(4Ma+C!>7cX9d)vMQH)21!hwQDyHA32OuADqP5k3PZqa~E*_5-sgD0`L4q zMD0BY$4y&dy=@27<}HB1vXuy=>Wf;t5xJ|^DU;!&3A@y^L@X%C#gd|2ES{B*{LDmT zCPZL>g)uGhyVWjcl<%{fyaOU z`8$66*RN=M-i0T>x8dRQT6A?+qmz~Qw}-9xrM(&d>3$5}h4}r~7wCM{iq#vIK+i}U zAz=|XarhG|u_Z|N&O^Fo92UDw!6UiwH-)x$G$jU z?2nJe_PBJcjm*GWUh`EkDOeaZ6-Ay_*qP4vf~ zmP-`Id8cCN)MWIX8iU?`LFnT(8N)ohF^-|;a zUr~cS(5s^5t)_~U@9eQz5|y;DO66o_lTg7=1-)v%R!3MjHr6Zf6X>Svi$wRSi`Pfkw4=yM*?&vW_4jsAzGo5qj5XU2l|C;|}fY z9V)cjCDnvqE$&sdpoGvXX?jQiJ;1GoZo;nvH>(IDMsZaezlR{KY@q_|z;#0J8Wr2E zx;EUdZN;4$LX0r0BCOi(KSkH$7rb`egj@ryEQ2cY7LT{&x6o4Gtf7r1^lmrR<8nnQ zZr1X5H#O0QHW1V`RA$Y1@vr~j$-n-zgD7-*Sx-gjTX;E0m^lj=Tss6sB*n%Z3()PQrcMoUgbSrZ+?!du$GV%=vB~R z72mt7fJ~rw``&F_xp@^o(B|HF+0S?NI@RA*RsY^CB?Nlc2|!g}pV;0@m#FrnD~#|v z`#JeR%X{J5>#+9-#w+j19w_>FbLMsO7@^?=Uqq;~AZS>)%(Yd` zu}w^fK~`o83JYY#l$lt*Y&AA+*iMzFS_MRx?(9s=S(cCG%c+bCUYKmJYl z0r;IqK7*hC`)~aE-#_sCfBwK9fBXx|i#+fN!jT{OQ@Hm6_g_52{8g)?)*#z>HC7od!%2@tIOq|GlTmK?tiT6niX8CiOe=gg*A*Wxiov$zFsuuR#DTc! z*c6eD%?W8(n-GN!>A{#EW{DN?CRmrKgDq*gD3Tr6ri?&Rh#_V#%YkQZCWcH7!I-Hj z7&SEwLjvP5z&`?m{ev;gmzI_`c8HxVM%dUulSI1q)XbR5j85HP)e?0BLNlM+=o)Ya64rj^|QActaCNbxI#z z1!`YLjr8Y_?WbCsg*%2~0hs47HVs0L7YE`3hb1fSR{Ryt7@Qo-t|T570ZMD0m5H#gzYqld~M zMo+(<612V&GJqii9{29i&eGPFmdW??Z#OidxV(xguZ;Guny`}ox@z1K&{6eCAD;|x zhzh(*#doia!S~;-Bm`+G%bFjcyp77L^%-v0KfsOZ4nnO1ck9}fez@C&oy5&*Rfyud zUCY<%I&iO^K?oMtx1pp#lw&LI5_WfJoo`X?-Bf_b-@xBg*3^d5#uh%VqSYn5YwA(T zsNr*W%V^oFYti}mF&_W=Guod&K{bI_-qOG_Xhm~JD_T0+(Aw3;V=I++0~;1z4_<4z z=F-^I@jeg*FO8cL^;FW*wZUTzkCj!t{*_dB71Doqi`VZa;dg@#l2}{mXH_*w6$El= zsa)e*1Q=~BRi4BRD!uE3pPb*4$5eXKzt^MqZcy;Y zDsyVrtd{lfc4OzxgE(~LI8J_e3?HBUkP`JGKKuSMwtoF3qIYb9_qvr(PfvwGb}n3I z&qw&GbqM7#EHw+6S-F^zmy4MNnV7$_5c5{$BWHRPrsp%VBd~BrG8WHF#=6D1gk2$a zuAPAo_O3+nr89U?djl_MtA6gP$8V3?f!|&z^!k%PqjON0CdXg-_%}Hx-vofa5OBZn zLCkmCf}Orw@v+}}oc2h^<(x=- zH`@~zS9s&|RUSA!(-NOA3&OsFIBZNz!ST%b*cwcg7aWCMX>r(|9f%!y?%0%RhE?$z zSQDp?#o-f>=cj?fL=OZ}+Kb2}dRQ+|b+{qb80=pONq3^}(;;6++L< z%@`F_ZdFYL1r!Tb`i2{RN6! zo>TQb!ri6^xYN*~va*#_Y>e9k-kq8b!mbl{sSxET;JZz0e7BzJka3UjyDRp#R%L%h z@!jU{DQRdyWosuY8bv+U;x2z*DM47y*BV;d2`O6O$|{ucf30ih?_-p)Ov|X&OL%R{ zUfNqxdNQEV*w#o8HmRb$fiP@FTX!?h>sh{3dyQqNlK#~iHdqa!0(t%UxT;3LC(yge z>nDAAH&I?8=}jybHc$dS>3fv{jgpc(Y+!ET<}HHmA93sUHQc#-QyJ`#zP)SLuJ9-W z8>&a^)JnzjQ?z*tqc3Wk@=UqdTGZ-e|lvKohU^n}|0D84!FXcypM! zvbALI(YAPRtONR~yP}uAsVxjGs0ge*U|~Oniq8#E@mWY9Y-AZ974%fIF=VMJEp1%{ z2C0Cj1Z`~(^kmdFH6lR5CJ%-~aPvOj`r}629Px0wj7xBS2 z-y(necDNN5!6GjoI!S48m_7^POV=Q7;W9+zfgni{{P5$`$i4Z%H9CrzavM zBN7F<30OQW8EXpCv8FH`TNdVH>(T-o*t`HYzd4TT+vo7Cr5L|<*5Q|q7X0VMkF>PE z;l-bS-~}P~lwf=EE71MqPquO|Xr-RwRz)YSS9arS^&?!Z`Vp5)e!(|C{DMzTKgO?k%&69_!(z(Cmh2f)_L z2j()b(T3$n;5j+Ua#5<4Y>muKF?ON``VJp}HwM0jR|oXMtAw7pn-Qv7*upnc5ilBQ zeVPcore-QN!l;SRYo?8DZsW0?Hn_8vR+S2n(cDcy5_&E7YtZqy9`~O$;=yyqvwA#u zT8GX@)o3FC8wkF7+S`V1g0h=dxI>^v_0S?pk>F{dnxn<7Vdbi>lhhPZ4ODZ4Tmuz{ zL?dA+QBT!TNBdPP9qv_Sv{I~?gkE)ZC0bgV@ZiCHrEl+_(34}&0EYDE-ID=~;(NGP zN<|`|ph6O*B!d_wRRoOW=@D`&=t)XQy%J?qU-hkB{C^*!sq29Ppxf0=ih8@-_z-1; zUU|pQw4gttxb+EcH}L*a`uG}OLa&uj>qJRoH;Nm&l=C|T-aQ3&?SCn~V%pSu1fD8r zZv}46?QER*dQGDOy?d21f=pG)sHDAZXlX+wVSkrUub^tLZEHq3p;sycQ+zD_vz4^7 z(x+D;{dl~F{~WokXE{q>Z3FLxdNxE21Yc8YwGs_f`Hd_eiTZkJc&O}TRGm>(b%!?h z29I}ntqDaczVfoWC@a0gvMHu2FCpBnk=hK%)nGw{FR6 zFTr>92EO_32VDH-2b{Zb3C>=@c;o%i%2YLFk%9q}@cJObzrgqQNGrTM#twbQJE513 zp&9he>|kQ!1`7vo7??XDDj`#Wo~&P{+PY4lCo69V^d=#2YB0hhL;r$aXqe>dsW$V; zNKd8$%fg)5MTFFHY*Mbs9YL zsqhx8M%1juh|McNZqW?PoG~2>=M-Y~@_AUis0i74$w)|!L}q3prllugVHSZmEf?zx z^9jA#*ueLlIJ%21#aFm}^AcLCSpiFV1Kesx!@c{seWM*eT$`(?b0794e~vFQ&*N0U zMw|~>j<4gVZ`XVcwry1*A(Q~i+UVucx@5m=d$iM@H5*qRiB)lt#3vvJrN zAB zwieLTn}`7;Wsixsc$4(Pn}bzh?O{yt5Ja>=&8=+pTd3;9auRxta%`nCkQ<&VPoURI z*tK@k0(aH2;<|{ z|4x-8Jo9fzI!q7rMA7v?PeAui=+)AaRntaQRZ>;Fgr20X2=qETJN|-R5BNm+{nejG z&8DQ#^f6;h32S4HKO5;7$+h(PGcV1|sq-mOxlj}+H5qpF$T-}w-=9rvlE zXi;lhaZ9W$)m=&JbAs-e79 zS}7k@RM75LSKs3`Cgf;!D`X_Mf@R1?sisqo=8H8S@0zIiH$|9nyP_o>RwvIlg289xOVM?D~s4|ElGc%%; z-OI8Gp`yZRShR4ivg+}sja#v2_aPiPd=e)=_z>qlKZno0x_~u@4!}Jz8E%Q0@XeZu zh@zF4O3Rl}undVgvyqoyg!!{)VBxGhtXVPxD;5?aJ0lKhw1U~$sVF2^i*m9sFQ)*@ z^5(LAhPv<&lBtibvs$FTR@_tfs_-r$S&F5pHsT6#4NXNSsyxr_@4d#}yHFMHm%UKy{aaAzn!s z;S~op?{JKE^Th}UH^R;d>V%pxA?FenhQNe)xcU1l3tgI8$r9c6gdWS1>dxBM5>|E= zm}sbpq2q_(?LqJ0wf?W-onf!yz0t4Y{ZX&Ld5Q&99)Ti#d4xs_;nyUvqxBTvHMddO z(b@{|n%W64C8z>A8C|r$1YQ$i)=FF3^+eQO1Hsq$7x=mfzNQBPJysxsuaWPq?-b~% zMsaHiz1pTSs!T$af2)o5s;!NmX(Aj2a0H(mdla8yajRscP>uv6lI-1q4NGODDVCerR|SV-ui&=?;_w- z0F)4AS4{gWQO4IwMd{VGqNKK&f1B2}QbvgDc-}z8)`pt)Zj?8wpm(pTR_V(t<*}ls zju2}g#OesWR@Aq(pp-VYvWfpY9Zz|E71dxdN+n-UM!X3-$-}H|ln~fepq1CWkquwX zOZ!?~OVw6Kh`kH}ohZE8+IvdW*Ow?!M>y8V`>FhDYvnn4AIqwqN|Aq8)VPf5mP$@w zsie2uAn?T6-gpT>v9n@rRU^3)J>V0S*Q54s+`NrzH*Vw7m7BQm^$&3O3BeohjiA~a zgE#xDz7`@fz@~L z*{&{}+4Kk-@~g2h`ZjiFHDP;7CDtb1#NwEXm=paGR%M>Uj>3yLz2pacz4ki3TJ{CL zUvV0@Hy=gifqkexz7h8h&Bo=eX=wgn89tg5O*N;5{9t>m%S*tTj5I8bNX5>aC72VQ zhd>igMB8{H+sPg4{M~VWdKfOu_s78uW9&;ez(<7Lheh_-7^#C<9uu%A#tXA^G7yzj z1jD%L7!x-O!(!)Pc=Q~M3Ym`KlhbHr<1vCD9qr@`H9J>~wRMD!iyLeM0^vij`bS5> zFDz79kW*PciRGqDq?gr59Bg1|Yfd$z@y2)+Zw`44eaF6uLF#W|@c1|2 z6=*|SQ-#*HMzm8IbhI}UJhf=;AnZCC(AwFEwyp-WQQe8cYwZ@L$JYou1$bSwl@F@W zMn%{8xR&6nqs48&WGLTVQQ>j3&p+QD(oB1A9>WV+k~EyAY4iCQLU-KMfgzl-Il&LLaU_t zA&>WQv$hMj>hDwGJtp{mMCF5@Q2p=)Y9IWFiZ;H7V7p1XEB$ zNEnvzy9Ij1RGXqYE2!@3_?v1wx>459ObGM$D$uK=Etft$+ExOun6|y9i49fDrh8}`L z1bShr5nNWVh=@o;N5>#JIR$xnxyr(zOBOH0#trMSed{(HI&c`LKRAOg&R@it4?jV{ ztobl^^M_|l7N%y+LF}|8NXuS~{LDotOs1-vu?WlN&B5};GqG&JG|VDUrX@!pFEs|4 zSxJaZjYr0eLgX))kD2r5Vd0Xcm@|JVRxaO+?W^}=|Ek0IaKkBF+<5`t?Y)At>n~wj z&KWFCIF608zs9lErTAn)Db5u9fYXKNaCG`XoL;mCUu->qOUIAn=DCk?^UJfi^W6p9 z`Sx>^U%r6aE9Y_V`~ehyx)q(*j^kPRIefZv5i*1Q5a#BENbex5C|rys*)y>)B@fA7 z0Z4ImLz2}btP8WpnJjmFJ(cA;@Gm>lTO z#>xSfmX^w(g|l=~uBAFC zDzYv8($3%YkSg{O&$|c?6@>2Ayc~d_Jyq8 z`xC0U$4ctTo%(KquZybh0ZKZa5PXE*BU;|hCsc!-1Y@fba$eeepPze(G9HT+=!v~$ z@NbI6Evpkd-HwLthp1>Kgk`{mpu1O9LnzXUODO*jZ9A=dr3_+F?bf$8QAIbPriBWw zy-8(fYf4c;J1Zem3F`^G=GJPow24BN)F0`uE1@cuC?*8&DpB8XkFUvKlBl{nO7uK0 z$J#nRCIsbmvAsRuYm$rrmXknFmD~3>_+<8mguqWVxIxQHtNSk- z)^-<;9zH_oeTcK4eohGPhNIJDI8Tm+cX%!Wl4l}3br$XFGUO)|U`6gcEYF{X)k_v& z*|OP~Ju3_8NuemDwJpd@L`-}rvS;TZWp+L?=FdP5)!>rVD+#`pSigJ=KH7c;m-b%3 zjh&z4^Tmg8YUW;?UGgD5TXGKP*M5hu4;1604d3JJ(u??P{>S)g^@sRw>rqsl{RAC1 zzrd4*@A0(j8ag|!;K8H2c<_*RtLj@6U-<}4x6k8Y$py4t{RG!OJA@B+Z$o@QG$JR* zBPBc=*>PDYOi4safIH$`%&{cW9ml8p;oHSQ_94_}P&@P@9d2ONC; zF@+6IAQhf$v=kH?3=a=CSectE+Y>t4vh1u?%P848+Q8V#h`<|zexu&QJ0o60uR(+! zqxUF6PvcDtobU<;jOhhWKXcyrw5QE=R994VtSGIl2%@~2sPr0WeVRM#UgqGjl2F~X z-j`8YNsl1xUMf4Wu>w39xR9efC(wKHYb&~5G@zMkujwJJ)T1i2Kd#~X2wHwm_agxw z&pY_NRGiICtcNz)K3NXQX(m9(?9RClUqqtzy0Xd@fi?<@IoHO-xb4x{A()mS%5>N{0R zk2dsPV>^LG1=jTh&4gO>!=DHzs=L-le7!?SR=(SEA7x$7P|ep%3BB7?bvFpNTauGPmJm^pcDr72QPe=+%qYP59k!fZKtOR%hHyO2W7KPV%kJqu7 zs!f!(s5p5~u>w1RVk2K`YA)gD%Lq*Yo%Hny7zKbY`(g=ONjFl>;7}zsOMag!#O|t~ zr|Rbu=&90JWHeWfN)F$xo49)I8sBr5agXpTMq)}9Mo-YidxO;Qc0WSzy_ewYHvzBq z)1af#BKRbg#ehmr-4GM?Y*`_EU}VY4WaOme>BS~yLfNrOfu4<$uF{jT5*!+$*xLvJ zA1hjTco^d26Of*siRse|l*I+stXYk9Ygg0S?jiIJ;r!Y2_~gUS5FQ)@8~z>dDe0IR zIg1LVfXZzarc?1PrA=L#KNA}lEWnCow5+SZ~V@dfe7 zT#&1*_ce3g0xT$;hl6W&;gfC0@#%&W_;B$dT-$aLx3_(bJB)k#E}`l;rT5_ye7@{c ze7Explzny`r5CQEyP_7qJ?aMj^&Efvb|3ifWBmDhCmyyJSQ6kr@$!g;@c}4$#5U7&9Emb)r3}v4#@_?B}>>F^8tUCI*ff zfY%4Tssi3H#*kikd(bjvuKWpAwdj2~L8s zxwD+sxdPn}_&1tZd78`k8wkWUzE0TH*RX=IGB)vCMv=wZ3h=6`?)|0oYH8bQ#NyV{ zX4Be9pI$?QDqTf@CyK8}=~WOCwGEAEXlYf_J7mPOhIX-@3aXAFxo(y9&8ncKEay?I zWp#50YTF*5w*3)m7?lKF8C76q#{+`=N8JDU4?O(o5B{J3;qhN+y8n!pwG($5+Hr?! z?lzTRDZy7kt6P5m1&TYK^0@~pCD`_a3h+6KsSrz=A1P@pGOAnFbYFpAX=4ZKx*nst zjjE8)Dx%w7ZojlD^=ju#>Z{#6%09N8Mg8BFJD)Ik!8k) ztEbE)WME1{rW96G-g^l^Npe<&%%>LciPgPx=cY2M+XFvQdP?8kE!DyVHwnJX+0wq*; z7j~?`2P>A~^s3$1w_q1ysK9*fJP_sOf?0{)SXSrPQxJAbPV@Q#)!!=j1Ua- zoJ@u12}56BxP*qnkFfI%no3z8iRjp9O!o04;H(KeTf)v3&UOy4=TYWMTUc8{$4D1L z#ty|hL*K#cLtjU)!M)H+pvUMv_BD()?uRi(eK1txRrrS45ZQDCoQC<#O zG<7ym)io1z1lGfPf=ogbov1vOg?&aPC*bQ*dYw;d_?jp|e&-`v*Jt$v11+;akFRym z`bsWeTXz-i^ShosClLAf+o?7MdfgrT9vM85!4z69v0h@AdiwOl;);Thj`9X7nR-_8 z`g+MNL9w|K${+zl1~w`i2}!EG z8Y;iq_J<0D>K{Hs%d?*p;63^6-}vzl-bw!gUi|(aJoxcfG(C7q%PT1?gjvU9+?7QH zx?iC3(XXg@_zTK-E`5Gw9nY00Z-0)8cEYmdF@gFBRkX`h?Svga+wg!&kPdKI|eU5oqo|BkM%Ds*;I(RWlR(aQG;D4S@F8wtJ^ z$@3)m8eR@;NWP^cHw*YAk-2A9hccCo*0-$up3=uB%1;(6C@v-hX?6b|;J8i@-ci7J z^VS_?ILBRM#H)5s62WRH@2U`H=7Ouv=;c|56XT@blSF`{(OQ~TC19tTc~Tf zjNSTl)`Kuw{~h>6+M$&IYVRWSSV1~u;6nP>6e~-+`+!P;H+?G=S=STE!)sO)oYHUi zV*@LCJDw7151-egi%L#Hz$Z3W6kQv^*UtC05{Rt?Ve5T9N2S$E*43JS=nU$f8?Wk)b?AqEH1Rq0ySJx!!wONUJ zs@*zX51C@7jK;S$p-mdHZk8(>ujY1MD>fo6JhrlNk^VeU^#VPywW8LVMbS}pw^8YJ z3h=tBSstqBQW__!yB2vL)ty*iMw>Y>1$*IWn z%R@FnS`a=5%Lu)>@rBqlo$yVYfkQKv;j_h?aqsZQDF6IR;LqRipa1-VU;q0Pez^S& z?MkuIpY;6qHWXKWhuWrVyg{y`r1(pG|Mf{6*s&7ZS1rWh&FgSx&o1m-x(K_L&BfyU zI4md#!RonwD2PhkLkesvL!&$S6z= z4Mz~wUTj<(ffocPC#pJ@QO`yzj*hZeDm!ygGOJ|{}A?V$>KlFHI3^KQFX5hS%mqC;#ERRq<- zBdW(ow9Xy>fL<4&r|Mjn%2-W+{hgewq^`V7i4f>DH>rRpgBYUr1bRKdlcPYdiq=+A zRfCzDQ0pm$Co*LrIIgK*`Un6xL4n*vbIlu!h>J` zB=mrvsPuj%@O~5U{Ruq&_1|cDN}xTUn!Ep;;8R6e_m4^%OVxv)QA^cVOVwA^^#aul znZ{N@yDR;BWi7PR51ym`5#QfNyDOHKs;-9CysDAboc~vKBkeh1UtKSAqNTsG7Rm}a zGPqM$MGMPDp;^F2g(qo1EY~I~Jy|KIg(^?dkYwbxPV(`TzB-Bm+Ejs_SX@zh-E8E_gAzw(-uqr9zWkoV0Lw|p_An1dV!wUUj7}zuco#{0iT3Q z^~q9HcL=x}EFV#RcUd;Kl@QA-qr2B{Tv2SU%-)dxJsHi-%FdxEb5`VaWCtBXXIR)%>6v>&bCNw$GG`$wF5@rg$^HXk zZDlH&Z2cJWaxFappFl4%Dh!E2#;K-?OaQxIcELgY*%a$+1YO1^$bMuflBL_LNsOV-)N5ZrmL}g_l zCMOd`%N8M{a5jR&axv967g2r%Nc1g2qE{|beE6B*eB_7ZpwK@R+fwJ?XvTcTY-~@> z!=*#VPwXPs>2@8Cu;r8DDJ`3;;o_W^e7-GJ>o zR^iyeE!ebt7S_$r!``(8ICWqewyqAtig{s34w;12`9?Uo&K4hUx53$+j<~SLAG>FJ zV@+x-@}n~l96t@7d8?qAvKBf;2cbP{FVs@kVVM6S40I;&9A;p+*BlJ@FT`M7e3k#j{_U@%Tj}l^|{KW0kU# zbe3i+$+icqJha8#w6t9WXeTYweI6fDLH_ux6%X&%E7tbDz?W7`W`f9cw5DcRK8m1| zj`5fFR%T@MK(B?CPJq_~Jy|f}pU{*3J(-CiQ_m!1rDRD^E-n-3m8sw-fGZ&sC2x=C zCFLq5C_t3N-;zpM__JPto-6`T($YmJJwxY<-|>{d`-#94rT53n>5Web!qyl6qN@81 zm5lQHgdY`ONgIJCwv~_TAO4KG`!5t#C!@bIe^*9vC5@%Hse>S-&Fzvj7nzP$i?TY} zZCdlPT1i)_K@~w)OJyg(lYTr&Jg%&%fUJxnA^n|=jn!ywp^B!`>w47AMog?P%Y@Kt z<9Q>&RNqAVN$5#3dIuqQzq^jgyI$F9L7>+ueRx!M0=o`rsM_TC&k$8uqO!sS<$c~W zPae??$bG}_ZjuCNg0D_;{Rmkl;az5bP`Qikl{ve`B{HS$@6lbcx&l2ps&e`M&f~j% z`xbI?vQ+|2Hi5EtfSEN}rEi9TBQ^2*dm|N_`^x)jRD6Wq_=zT%WGs~=0QzR`gq|%@ zvt}VOHcf$^y@R8YvLb^Rs%dCCgr2OfqJmy{L$00Rw9%e4siLHmez=1Pg#`O z%=s^{_t-&f-M10@j&H(|5BS+bi*Rt?Y#iA;16$TcWAS`@@-m9beSl~Ch7v7rC z3-4+6f=ieMD_;egc(b<)@E#I6GBPXucA~)exb0ybL!igX@Ygi6T0Ew*Q$ep?0pG8G zcGBu~;`wiFcu4q3|6R*{>DwddsOTO(ZNj5xO?WJdk&hogtY-yj#Ea)`JW?qVc->u% zO1@qLRh=wcT1W8pD7=66>508WZ=~JmDs5=*cuR z0iUcaEFmi=i~18KC`Va!MM+}jYZ9fjx%bLt1_V_eZLW;k-fQkcMdxEQK7N7DAAhB5 z`17xM+&}*txc|$)(e&dVsDA#Z5_M01McpIG*L#j8D!{Iv|AU95WBc>p(a`-IH3VTL z)m=Hiw}jtc+T2bx*-e$#iQBcZG!#L{qX4g@nl@aP$EqdhsP3xjWIiy@YpWF4DQn`% z$|rWZD~(y~&~ce4Du?@QML&tLZIb++?0 z+Ex|NsNg!w`MRj_^8XRtRP3S%A3dnYvnS0g&vu?S@^b_xzh4G6WWYqMbSwXE2OG8a z4we(aCw+aQ_&w!Pw5hA8WqA+72QsU-d$=XBMdHPFi`rtEs zc=;OEY~P8cE0&`mF9)+{6(E1kG~~>khV-HwWYFHmr)3~MGY@I=79nBY3WUvAgMiGX zm=aL{Kfg4jN9I%Ir6Yf8Jm!SOV#oAC%nl2|%*YTVPw~RMoD6*O!Et=>!3oS=G#5+P zF2EPxom2WlWYK{~&wy(;TXE#XMI7UAI`Y8*>^Z!G;LF8<6InR%aTX4ripQ!=9w?e? zN^9o=w_~X79E^6F2X+6Y&b+neWQsZ}vPRypEe{%4G3piIbvfDm$vY?xzh(f8K*(WN+o&BzxR}2eMlQE*0xnT+Nnkaddli9^(t%I13d}p(`%u1 zYi*&*BlP~Zw$i^xmDofp$M`4odVu$T0LXLYIX+fkSRuN^}-6(B)fSS%nc<|y^{7j`M;CuA*ztH&TCsaM6y?y>4RR8#2G!kx2PkvG? z@5A5zi=TN9{>pg#>wnQq3*6ZK3=LgRP~F;1_)+Dxck_F@X>(ioH>vIz75rbysagem zWs-A9=vCBIE2FkzZDj#iv9*%MBfyhT61eNxXtc>dR4dD`QyG{N@HG*5l5{SqNnQ8Z z*bq`(RA+5%ROPJ|=pgXAI%^cQ_lV{7=wS^W^S+SdLzZbz$m9Fnl~n2iJ>CzGo9L)o z2tgT;B5)c(`r}Q84@Ag1X6rXC4LqN?kA@oG;^;lfh;D$_BQ!P$#^QNS(T&KNF zC-hhsWG^4q1#5dpSlT(mz|0P66n6cFYbjIIUVU#YdTC5DhoyrLY+VDW_#82Dk`>aj zidfl_2|byS@lWVEDsyjTaRI4_kz#Eb(NrEONh!$5nueK$3$S?MDrNfFh7IenXYV$| zCq%;C-3?ip(-9t$fr5f{gvn=Ez2_8mfA$p)oWF=u7rsK#y!pt?%BIqrj%-@n^jU;l z5pC=AX^2Xr;wxH+n0c!ZvurE8@|VFYb0M6Avfwi%1x2X~X=}5PKP4Kg5;L$aGZk|Z zf)N+s2se8@1Wa*8Y-|`#5PY&h%Fd$)kT#!P zOu(E%+U=z)5xx5aJhq>M+1gJqZpKcGP2YqG!5c8jVG$;IuEa$5`55IfjkY!gWBuY_ z8JCXWoI)hz6(A-#1u^llw6!wTZz_Qosx0Ub86J(`pfFlm8#p>SDR$P>%0f|i7JO_- z@EP$qL`N56Ei9o$o2%vQ1P$H{W3A0F$Y3Jg96y-K?j3^fRrDJ523{Te2417edy^{f zZLMCg3o}DK0oU}9pt#?l8nn32R-RD0Pv|{(+DzcFQao=$H&t5qQ-bR$Ezpnkxc{7B zd{#}sHR351-?QIZcvK}MKX}oE4!%$N^d3H=B^Gc}4YYSuD$ylYimKv%M-3iOX>_x4 zw2ShP{4rJrS<qDj_1nYMMEv4hN@1w1Q>*$L<1WKSroK} z*R1t^3!eS@6iqg32 zaxVpXqV`mId;+{%FM%gBIBw$3Jp!=guCnjO)$2bXEj?M8VkywGv6Btk?Fc?PSbJB3 z&mN=I4bgi5m0q7w=%r`o32Uby*f<4Zl57^NYl)Qfd_>14(IcyX*R#TkG9!b~3l9rJ zRCpk*T>zrPf)E!Gg>-^uS|-(1;UX+uv=-~vZNa83+p%uzR)oaG!7U&d!3mkjo4+0l z*B-{o4Ig0Bu1^%;oV|1lAAk1)a^@{SeC{+>`e{fe_>zk9kyx0II6@?P=0Zd+SdY*Z z`w+hR2&T^32+z3L2#A=2xUdD77Pc4*BKfx?W?)G~I@af8Vp(o1Qlni^m@fk@zHoK7 zLI{8Fo`XkGM+MtNpQWvlBi%;DL`0OvTAPG1##o8FQ9oBWL3#WFGwt;m0q)Vatawntu=m z`TH<2W<7?u%)xNGh0yg~2kpQGQ1eQMnrAGGLsQ|KRe;#SnMmUQo|KW!MkfZ5QL=w9 zuUl{sA|t{PL+Aws1i*z5vl7tB)|^iEl=8H*wzjZxafKlprXjkMpk{50Q8xBab8^LS z9*0`kV4R~XG@RX`?e2w zT16_bHr}{hyy5RZA-o8zc3RsGD!tBUR0PlE$d;TIxBV&M^-}{L5qc8$f2KwLk+%3b zRUV<&Lg+o9N_<3sK4!?E#Y2AoBdSUzs4~RXcC}MwurkPKZF37Ng!JVx1bPBHWk7?f zMHZ`UR#t+M`MFf3o&3!$0zI*}sy@9&QGL?iRY%~l(G$2yl$D9X`)5$uF)C5zU##=yr$2o z{JK!i|D{Cc(Nf7t4xVb|W9hr2m93RQ3zemnrSqi^udbfgiq}Y4i%09{1$|)4qj#Htd?|_Lq7D%Tei;hhs^rRo}f3C8k zttCn?jEW-=Q4tI(zF69=w1k(?E1FN}t;G8E8?j^eZmin45&n_U2#QZbbk$?h4L-dkaTD{St}OX2Lr(mdb86;TNt=5NTt=3|*ye=rhrSG!`_ zdOu|6IUz2_3K7BP*s>}H=Z;6?vtuFn;9w%Q?#xBex;e<%y$6Yeoy+D=V72Bm=*>O} zjl}I3?YjbFTo*&ncRh@!ZiCj8r5Np*2Az;JIHycU*z`F_nK=iUg@s7TOrtW6rK*$p z%&Ohyl&%D->(Jm3_;`B2PBve+w}!K;Bc;3@ENyMra8b$=bZSN>7-eb>b!Rt>bMe4f z*$K_g6+>-YFw)iyBduLAmhe;ebU+^?O8v3D3BNb+ruN%-TmKEbLg<-A8ljeeYw2o7 zGc8cFK#!`gjd1FIB=)um?X;zB1Xnw4Y6qd$DWkUpo8;bg(B?`A>>mBrO8eGIHPeFb zpQ$EZ(0WouK7LLWNVO;GL7=DVBp2%>3Xh+q-RW%SceRNs;NNPef}_%FP&&LN7mpQ1 zEbdF_H8j@n`8vhYc69J}H~$Si<@gu$2(N!YPc{$wpP_nQpjRb%dsULpM>||qkLo(Y zjw-IUsU4L9yy{xqB^V{2PiAkF%X)XRC;$Oi-SGgGoexn>ut~pNRp(P>6t=4M2^HIq zyk@`gTK$HWCqJW>R<@c^+4WSJnpVa0`p1Oc)1UEBMuDjSU(hDEK6<1myfVVAvR)F8 zRm(zE*6_CycFHKN611}~Lw2iERyk%vQ7_l6Np-BH;(PeG8-M)yEB^KG-|+1DLnS?_ zmDgJ}-b5=&IoJ0=X&$c#r4~9?{}H z;AfQJ=ey;*S}PU!b zCe_|;LhrUfulP37SZ_=%OkrU~=W1=KKu@}VELkV4Y#d-=Z4W&oGmKDEM=xWWK-jp1 zE6~%{vxla(IkIvHnwSKBP&H!P13f!F7U+da1(eal$Z#Gbs7PXwoScT7oIK2&Nu{-9 z0oJTtitW2LV&&T92n-29Qd&Opi&kRE@`G5oWFM9+-GL*=&f(*8mvQFoCCp#48Iyfu zkQh&emso(fL;^3h2;sT&5L&PVQwvwXzhE_jW^P9OoZU#z--4vL#R&7uK`ud;ADxdB z-)Iy@CSgWg0;UrjGjh}UJrVEa%9OO;QL~edOt!y;ru8c!Ufj3g}eGrx8its2Wc)IH&JJl2WwxnS9-W<%@T!7?l z%Mp9{FsAG}3FlR(;J);8SfzakP=M z3sxa(;RPu_;H|YL; zUaW0pC6yDETyU9@E1UWV^pwF00iN{TN&E#p$$^u;v2tbLLKJc%6|?k-s-~Wm zqvL)ne*F0fp1pX4`w!Y6`IrJcSy4w8jg`Kxwi1pkV*y_mzpq>7b~melC*W)2-;5$nV<_z0 zB4Fbf1W8{J=;chC{r`rZwypv_nffI2J%gokMiW*kDae+6F=iDIXmhb)(=u$|xegmQ zuA(B0L{@qMmM_|l6^r&`*7Wt*wc{wxocSE*&R)jS#XB&?D-k~KiBydoH7w;J(b(-G;P0)Lkv1bYS|(9H)SlYJ2uI2HZ@ zfpGTmfhUzxbP=uNg1MNSpAC<^3`{A=gF}2GrczBF`1A{u@g_ccVTEVL$&f z7{(mNB+tFjvD*c+$;V+CbOK|XmSKQpE_D26!zE=Y5*Kbl){?czp(@UpF;`hEH9j#x z0bXQSD581ILV`jN5-N>K9Q^%*VapcF+?2qRjk#qIgYc6@A!G?D9cAFc0-Cmt&~R|Y zC@Tl5I%h)82}3LiKu2!^&<7(Nyr9GH8sp}KS9R2=^!lQg#`}0vuP@#<>4VqJUxj(B z2`e3MezyFLw405OsImllgkAR&Dz>Kt3ss&dysjVl+)pipoXWxq@C0%KI|X!vUnf;x z7ZqSPU%$`yJ>X}a{Me!-Axk`x0gbL2RcHCj85m-3A9NF%5BT~cv01GO^c3JRBr#b= zY@2DzYv4 z(25$F^IF@2^6F-kRW~WKG-P3druHuUL?Hc1fIa!?ceFfs%Ioqo9{l=m!tOC{Q5}}F zbh8nmoqh1r%T$#6w5`A58P(f!ey-!^KT-ck1-;U?hbU=%fO6ViQHhfD+#)vj36BpS zqqbFcJ5xbVSqo25crSrR@C)!{A7BCA|9~FVoj|W%`pjg?8P#*AG$xEzs%Qb;1FHJ_ zY~dF7Po;4uR$4&SIwywi!0GHO-)gI1m7)LW%(ua?v~v{&h%VZ zJJ8A5TEfV}1SV8`R&+r&viJtUCt+ph08>jF^s=P2wRKe~Jq;aejMp?)pcfe}(EHoo z%1Rx!{GdQDj35b#WQB|lL2y(E!eSzkkdlln1$sqTymSFJZd#3P+ty>v%H@a*4#)J& z8Cbh$8)gw=OJ;7snUf#k>_=y@ap_Kk`((n$E(JkTX2XB#0tCdYhELpDcqgobZ{h|_ zOD;p7v?@gm-!d{L<6l9Zh(}M#0w42aW;$$eXhOpPapjTi5Pl$NJ5fJ1q|dnaRk>h($tD zFcPvNX@?^bm=+DM#8?F8{fmR)50?_jynxa*WEDo zH~`0~C!y!D1%pfqG1|TW*5Qj0S-251*6zd1m76eq!BS)u%tU&2E>hTN#K*)SiolBu z4MkKqVHY071}2IC4}r6bCoC)}*DY+6JuVz<9jN*oVC(1tBO5!Y85(1(xfQgWJ)ll? zH^I>p;~YFOn(A((jXTwyAI7-{U<^Y)EEYzD>JSGj46-xC5XVV)%XkQ0GkOm z1as8NKn9^xPZiccE89v{*71zMd)BIiqVireqw6PCbker=s62t*qu&UwAR)QZQC+X+3{;766V zBG7ATsZ~^7YYP=;TO*&7X=kjw9c_GHivqfTMx#v2`XA7v#Z~(4{tkhigxFe9cr{dW zwe`&eUJC&ub6cAg=t-1T)Y2vsdaa#!`qOX942(xS-lt_1W%isvYkB@F%G&PZPGbj3 z39>3etxD$H5@<4QO=fVkzxYkb?-Mmx$q?w>YwlK3V2YbNmE62`s>nyb{mJY3D;m1G zQQjac!2Dn6spcK4pjSiS$xdcsZzZ>}K?Z-Mv7l0xDQ6GlnljqEcZ34tf zKCY+wqh%)aI$FvV=t+p(73ej}0tbX$v(jxrXv^GO*=s`GY3oPTNuNH%FohXoRB@CN0DG-151j9URF*aR0<)muw$jrG{@-i z`pC+gg@~wlr6aIswOtkTY@wy4t?FlzRg)t_6`LCo9Yv*=O6cXGXx1!3Z!y-cUyU8x zH)GNKc?j_F#loTmSi5ioW~9x-fi1`J(aF!Sam_AF^-hD2Ll*r1KgRw7I*u!i+J((_ z(1~Mau`Fgr%gnYc$&xM0Y?+yvnVB((!^{rzB$-JjU=%a6OiA|L`?TBRneV&*{g>9e zs=BJW)m>HRsl5-pr}t_M>a!8UNAAV&q=Oiq%ox1~aY@@THf1xWOkRW3skFY6$7AgH zc&y#B8k=`*LDINnjGZ(I17i~~cE&tRSh5CFHf%@2$_@_0DMjzI9_WJVm?_HscZzm{kg{um->Md7NTS4dD4 zU&qbT>&PVJatOV=ifn=`3%OKl51>aymtRZhF{JJUz0%uRDB|k^+F1e5q?O{aB~?l! znt>vIo-D5F_R6XH%BUU-3E1MIG?bTRp@LD&LRehH&oUM27U;>=O66Jdvt7Rry>u#* zbSjffqxAmYpm$xJcCY?H=vjdGtLo!4{`P(t*j-T?c`3(q!X%Byr{VXj=>(pFDv$9S z{hn89KqjDFrQ*w|yoK!QdIGPG zz$-I0SaAHKpd42;u9o&zV{~PWf6FaGA%Umq4T@@S;buV*u4m_%hKxUM@H-peJ=8VD z3cV|86i^55-wpIMP4J|yAsT5@KxL4EUmYiRZVJd?vi>b|bvtn3 z*b6wg_j$y}FGWn>#f(jqo}1CH-x2ihxd)>M9mLGI16Yu-1B;W^WAWH!STtom=Fgfz zRYWzE8jA^wCSdEC-I%s^0g@LHn$sp>#+Hp3y=(~r6XFm>7|z+b4WC{69Y3buz~VKl zFnH)-#10vZDO9ElrccD&$rF$?G=a+n*t6vTUVQdt?B2Nx^A;|^WWp$MYAQz0pO5L= zcVp77qZqsUS;TIA11X2TMdGf{5WC?c3|dG_JMLL{4nBl{5hoEf`gwTt--j-N%i&0M z7e0I;CNJKGEeDTd>;3~+xp6C2Z`g{(OO|2&+_{)ObqXe?jHRuo4JYhI3wmm}NJym3 zABFw{1{>G~1cn&ptu92tA(ZODQ3#-Qb?^>EM^9gLmB}AiTFB{$vkHUXS5U!M6MMiPD z0bW`e0Z2QWRgr<*${gBQDg=Tmmk`UXp{*t8@&vuQbQIPTXoOzboop2A^+UibR=O#5 zaAsl1rD9O1pI#{~ZaG0(sxA?PT~%c+3m73-Als{M6Pbjb>gv=`am9o(z)0a)IwMQr zu>j}g7;CF3A2T!6IoT3|-c5Dspe_94s+1m8-Tg)iYY28%uU;oC)Q|5b;c?R_IYIB* zO=VrlGmRL(-^j-AYS6fzVXUu#AC+KcVJV7fZ%c0diPAejUd>&elY4~QU8I-Sn}!P2 z*WE0uL;kJ5QFP}o+S=RPUP2(|(9-5p>1kv}h3T|xqPUXhzM62Xr-HtXd@8=2@+xGP zmYL+BSJN~PF+pvG-hHK~FrAR8@&rBAef|e}S4~m@WurD@s&lWKzH&Fy|BZ|+s1?4<3P6qJDZ7r6*f!g`QRMmGM|9J!^x8mQsBRfl%P9DCNVg zI1MFKl@{m`x(sz*R#qJ)Cr~;nwc=X4uxPqZ>$u#X3A|r^`IklQ5qfh7y`F=6BW74% ziklb&^@)IYL;%6(1OMJ;#EDO=N#0i)>aXRMAnosC0$GVLhsPuN=@X~`!Jvjux5t;fcRi?Dd|49uN1 z4b$dN#rOpiku-lC=I&UAH79mp_Kwv^Ubz6V^9etyw6O$V?DT1fr-C|q;i4HcdFbE~ zOddB0@q>qB*0>2+NZ?JU#haE$=)|XD!^)jFcH{!K?>&OKt2SZMl1-So`3UCizlfC0 zCy;vhbxb~d8OeLUMbh>k5WD;n44?ZUqT-)N)Ub=_mv9L&v1j1fdmB3Y&xKvsEDWEr z9qaa7z}_Qgu!|3Y?K`(%{iY2B-crn)I|p-T&fs~QU{QFZMsb;BZ0#tjyyVfe)BOh$ zc$TJj!A`T6se)GZcwv!!VDCm{?hydnu%74~8AH1ogLbsGt;2euGgV&a$iY;e32+&b zOjS1?E~BTyK6wfpQc~fho7o zlZ(n~g05VOFAJ3wv|fd@&$${2z`{f5DRMPFG*fkWYEjgFsyMaS@}ZWWpUcQIDla=b z`~N5Oe!oJ6L^bz^pl0H_v9p5Rb>-6|%*=Kwxf^NO$j&Q49s!g|C|#rNR71u!LhlA) zXW*Bf!{1qlD%x7r<)y3T^?uiro5eLeXLo3IZxduSxKU1U@t86xH-aytvWDu85GyXn zwUP>46X?oojWsT#MJ`vXZAA?-xUabG4$5x-iTp|eF*CztP|-A<0^ik}7VurWulOt( zXJ~0@YfUF^HTp;)XIn-;6jC{Bss4Crs!vlw0IDw8+Pr*1P1lBiD!Sj<*sAc#=m4tt z@RHKAw%(Q&ss2u_w!fKWW!Viv?fQMdT`>TZ@)Pv5tf9KUU6+S?UUPvjkFd>5{~6h4 ze6Kn}tE0JKt$vulnC?lM`tv`Ni__}k_o4R#X48#D4xUWgvvm*5MB zSGXU%LVQhKUr2Z;85V*Dk$pxXBsvy8LH!9mUvziyHqeVt8f&1Z*1>QRQVkg)At7*a zab>|AgK?t-y)j6ROT_pwV=*IjD(23afyMmi>o%>zp8Y#;@X&tj+`bFFBKjjGb}H5_ z*o8^43os&TCWc0=MDKv*=pVWaBYG~!sF+2V(r+=A52qrGTaFzGi*aE5a%`Wp5bIJG zVs0uSv1kRREM83uyBtY#QjxMS74x>N#HN!65I2)HbP126%3Qj2H%jC%(m^^FL$ep&yXA>Qf{w`T}v2KE!|#uVC=VSCKIGUGy4q0nYyG(1X7x zI&M7{Za#-?ho8gY6VGG+{*&0bbsx5F+=i8_mt)?7S(rmOPZ~cCDPwsaQ&LPzZEWmF zBv9=s3-t)DE6saAP@u_&rw31PScpmc9zcoi<{OIc9zn2`+Uq?K9eVabtH@qx6Vo3Z zdMoeVNObKT3&(+@;T|^`Zb_-|m^26O(-**L(kysQnFrq~bI~JZ0$j$Af$ivlXcB3Q zNBeh%|FlS4;f<}XD(Qq@M&&gYLMlDptXYI!c2zcQTN=0X7)E9l6$8~?E|py#qk!;x zV08&ZrH8Mm%SO$uJXF?XqfoP?sdF=*_KIS`j<(qZm0n3nrWuKmn?ozcD9})}0@+tu zSyjO23V7+p-lnm@t244}ty*oR^d5$tb=^{*chx}e$~9U;13D=>LCz9VcM5G6^nUw; z%8yXX$}J|KtB^zMnMT`rGqZr7orh~wfxl7dT_yCgxWD8!6&ypEP?QS(kK8icD71jj z1VNZleH(eV|3WFDXTWz4$gQUutRy7Ms&J#U+H_UPskwtH+GMrx7Ss}&rIiMFHG1Ez zJIEm5Z)Rs0#iyCgt`Yv4g#8*VuNh;Te$6!Qs3WrK^Q83D5F|S*wO6QzNIoAT%2jBB zpt%pdB8`wKpvudqLLhrmeP9T>ml2%$n)il=HI!sM{_B6kSIxA~_6m?fp{p7E|5qkgD z>}o&bm!JQMf0=G8KVZ(X`RGO9#SnHqNAxw&3mXuPsKLDu6delh03U>e1tWqyYY^2p z(R57q4(vm8EON)n;1q+($^@4+g4fIAQjz;3hIHag$G%4 z#!bY`B`dM>@F`50vlIy_GqG;V5iD7;3#rpqValYXm^xuGCXAVlnG+Xc@r)IiJ#ikD z-6pJDvH|mEuEX5fo3U!`5p3N19M&Fw1y)UAZjGijr8e={R5$_Xw(&nP{H6>|R@g5j;&Ea0zVLM;ci zyf-T86a>Di@|#@Vd;q-~wcM9dF)FzP)&8xz0y92WjaX9nQhI61lAWPeWJ^Cy{T2cJ zni_93xAAYk;NQRfOegmv<`H^*hW9tniyqzwz2gR=*N6e=J7OTk$smOHv?NL~oqS-dK>uMIv2x=joVoNdPCfS)PMmuMhmT#vuAN7* zdBaYuS-BQV7cDXhZ`!me7&k7(Bn?QQ#U4RB8%Om#h5%REsOVn3;TIS{>mNdut;u`> z`F{w#$T0X4fDXQXu+f;pkY4B-)gSF5`{HTZ-F5_C=b@v}VNg6e4N8Q~@D$jM7!RjW zQxPze3UuXGM6BM5prz{(vS>Ym=dXs}oW%%VwhaCYrondNU^MF89f7k3Q0b{tGHqf7 zm0aa@0*{ap^s1@y2)(Ro!H+=UwhTfovxfT!zRVh_J}NN+uh4W`$zUO++T-z+v^3Rx z!dKPiQSDK=@yV_k)ii%@j?zcb(n{57K92l+buzwTqM(4Epb^pycaxC2e$9aI+6_$%l+Li!%l${?33wK(tFSaw zq(PaKa|%mQR8fmUsxVpIjPzXGxSomMsRT6|bnm>S3=90TYJ|;Mn#<{1|bGBM?v6#f=_;#FThUoIU|F=FY&vrHip) zqvpFkh*L+-AvI+d0-S>p?iz(?w-}5F9f|S%k}+fGXv`cu5~+O#W9Gn-SeZNntEVi( zyoswYZQ@!?o4y58XR3vBGZrjbM_{Zb2_*qCDI|nIovoJnk0g{K$!T8wum>9PR z%VuoD>iN5{YT-6o?&(;xU=`*sSc+vUs3=osVdBK;NJ^Z5S+uuvR&2wv?I*C|$cxx@ z@dF%u{Y&h6<{Rq7C>k+hg zJtCKHLCosy2w%P)q03exY~^C~-m(bp^M)Z}Q5>O{i>%T-WKr>DRk6TSQR(qU&Y(Tb zsL3(#%OuD$st7)9C-^c5zKr|e%OUg%>a%FgG6_8bkoKmO@2jd4^a@Z?o`d`%Sw~tI z*;s-ukD!yXQ-g&Roz>2ob%t6V6(vp~;; zk`54himMM4o?vH*KN!Eu{&KtKj!nzXqYba1MZZJ1+@eaYin)3R^m#YGA~zB9p6P6 zm0&qT!_U$PxHQ5xlfcU;ChQ2kwEPlW&B`PEO3Vn2tYUt4VIlI%N)7n(ON(%m4}k*0 zFRze_T)FdjzLYIl%bNs9ZUI#}&!_3WVw99dUZ`XD?_B?bHkRtz3_1JVwA`wjit6VC zDuGX}v(^R-0Z$4~A;3x9N$m-AR^Z9<3WC~a0imUSPoJko4Rv6ypw*Sd)u zsXR08I-LO2Xp)<<>3jLgJV) z=-H<){DVUf-McRmlgDGm?1fmmbRE`GO>En|7kjoJM8dF92=xg-l)pcQ#zZ1%a33TO z>5qvcMi5q`kTh~E#wO3g9+)T_N2or}UU_}2Rm_2C<_HW*d?W>k!_r}b z1bg=G!|olsuyy?gtX{GNi{{bR&YX_%Q^q49c@$xnfRS-=h*##6aP#uu%LZS$~f#}wI2s#sb9eNHzhu%ZcX~0Ny(_u2vNgdQ=S+8OSE^vNe3nK#!m^&|`s8-5%|3h7@0og8QlLG8tKn+`25}vjCV5 z%!Fexk1tc@b#)#}D{@gti>2($CQZ43N82hDXK9&LrewlRfMaCbPd1!vZ|npEEn!j8Zb*d7L+QG1L(c&z5lxW@P1 z%%Z(5E~Q$nML|V1GV_X!%DZwU9lxv7GOeu|BW_ZuWz^ln&C1)jR#;7iSBBrxi*O~A zcDS(0bVycRT{h!-aTPMkczkIst`d40gR3EF#r1!h+ks{|qGDv1mLh}DOV8)^;xaQo&&+;SR$Y$#;yj)&o;ywDnV&`AU8icjiUQ@) zqsmjmhFZcc)d+gH5Oh>pCBPM^CM=)$iWyGHy8uB>>oP_17e3@cw!vJO__w1Yu1?TP+?>mm0m2B-e5Snhoh5?7o0sq zjJ+M6FouOK(&T7UV}{k@3VIp~I%Mc@xVd|wW2dfgbaF*RL@%VIOu=$OZylkxd&fb{ znm!L9fmAAfzK9C*!>~b-NQjNb=u!PJE;*hyaT3N(oJ0A!3}Ys2;OlLeGj10x?p|yj zyA>PbS7O76Mc6oEA=Zyrf)xW7V&%YPSUGGtmXBG2*`uao+>k^}CNO4=PsX&dahNkP zfzX?ey&G0y-QtB?rmuVdli53us|M_6|JO>Dh%8D~BwB1W{3ks-;}j2^Pht&XsO~!Laf2hkZ~KT>A_~m!1RAF=h}|o*FR* zqf6gdbmJ127z6?Pt=OXs#D$F>y6;~^ZSfGkn@YE-dR#SC$M#*I%U22ceGvHJ3b-G|k z$Ys*b@S7%Su!MBpqF>QyNb0=Pp!8PLobbL?b>y!w(C-OH;mFVv!HRC04LB1 zhz5#ghBO{WFkVYbhvv%o{brh(ha#h(2x)l*xSEzl;Bnh^o{QXKV?(dgj^5B*7=&JC z?Vm_1y=BJP{(7ST*RlwvqH0=H<^8x1_QR*#-RV zJUv_I;h#2L}hln-9NiJ1)LH-Q_Pbs}L4qBa&uhDY;fxK*WPm!{- zbUl$;vo>fbtSV1{vnsoX)txm2M1fC=v8vo+a}5B?(hT&JT1?3etkBcn;42A@xvJ-=vd=1cEWgU*}elaX3jDFTpL979%-Ow z?-GIz-8|sr5p0y+uy`rG|3J^G^jutAOl~({PY*cQbc3_KEyD)(-MgT7&nPTexB%O? z5PCazW59qx@bL7eS_wvUOceSIh{3Rt1Cca-6vj@QfU#4jV*JFpm@;+=W+tw|@}%w9 zG-?mF58s15gZ5!huRYimu^R_^AH=@C2e5m<9&G8i85;+0!m@#DF>k;U%o#ijiwM6N zaig$o>O?H&=T1&e!n|1vXlu74b>23lE;)qMHD@tz&+C}H?FGy@@D`>Ycn52r`x-~y z`zN0J_&2=u$qiioBmVs@&0#Dsg z6uG&C9z*p~QYtF2$_!#cH~xL-WoHqXgr3#j3VJy?mij!az5NgLEa0x4yPmUrWP=z*nhp*yZKS)*eQmLet4Nn}%_TGcY}V2^Pm~po-g!P5pLZPhUbK z`T&lE9>S^6<2V&`3eUwnkJHiTa6IM&c17;Qw&<oI?4%m*3!#7|~@9Ee$X$2P2 zCNE~8o|U>3Q>LxQ*wk&9wfb4i+W0C`H@t$xW#=$y%gdO)=Pj&0^DXwg{BN9k>leIs z`6|BrIuBo7&cNl@|G?Mp-o#IzXW^|EzQ=2qe!$_q?_u`zgV?p>O?>`Y7CyQBD=xqP z89sRXeZ2P4n|R^E%Xs$mvp99|2o7x9ik%zRV&mGCShR#HbM9W~k+M8QciL9@7@W zb>?D3uG@r|ts4=tZZ0}Zh(Pr6QMg76np;+gJgTxh!Yh{$%O=1CJt@1a8Ulrpsi-CV z2)zvLBlt2EJYEeQ*?KRbmnZ1a-U@sLf*w^~excx}t`ix^$x}U(I?EDvgkLsaXE8EW z8BefDu_$h4C`}Y2P4@PNX~f85;TH6)Dla#e-zn2#aRt9LDmDYQ`=LpJH1(qi+T0sl z-cVL$f=+Rr>&5~z{vZ_p;C{_RaV1?-K~nh zc{N-wu0?KXJt}U&Kuq@4nswz`fdzWm+>yiM(~D(?HM5#PR*M{^mMSbW(7TyWi%g5H zxS3BYD}_g!omWM>P0O8Ip>erXezdrm1qB9r*E7SEI}uDa9`s_%;O9MdseN>7<@WH$vqSx{>yV=XP+P^9844HH_g=lxN~ z>n%km=+)P%fr4tUTCfxJZlJ1yDzDUHht-$QQnzQZwn{dtZYvs|-{;$eo z@PVg%f$E~8l=v&wEH@*p6hlXH89NL~$)gcVfi`aZ1Z>^51AF!!Ky=SOXb=`N82%xB z%t(wj9qeK2=!J!<(~C$Pw2<)^>n ztIvPKWfEJ7auQcJK6%k(PJ@SAkV*l00II65UIR{5n()MmX4IZJnt@nL9h!5MR%UZ?vVq~ zC3+}2MG<&>SUC?$LbpDIUcY$Q4jl`-5fk8+G8cg}S0Q-rdW0?{%vSG0ueEzHV9P;7 zuG$9I8H?dPe>Hp-u1CPq&4^gN4H0YCp~u|ucqSzjkB#nzs5O0Xt(H&Raw;(mIisq} zsijpU;Iaw4Y(h-S9D*+Ueq`~r8aA>7JKp>PqzS3LDobQn5@xC+ETfVuWSPMcrATcijYGcRvKf8w8x{^fVo$soSGkyvn$8Q`^=w7~VIK_i{8E3B+EExB0*Qh7WV1-ZDcEYh^O1$=-N zRTdGzl_;(%HWs(Ku8hI|q1N)E4BY**3U#-XaYfyP1fLsJewrVH7F2l@X>-+$MH!LH zilx-jaqo7aX_Nl@UNJ$JK?Qg7zrbfvcT{?nw8Z6Vuuxw=OG~egt>*nuU6J_!cuFE` z>6mV5#4`1|nKx)}dA(IX$p@b1Gqxm&<%3DtX0HB@H5=9-cGO4$Z#aQB9B~Pnu#Dn@ zkvw_~c5dH^GiT01iI5wFM)!wLP&7Jpal+GWy1~}LgV0-z!NW$H!MTE7i0l@j7Zel% zcQ-GLj2!`AA7^y#)Dm`foeL-**@W zdX2!Ska3t2HVgCpmtuwYTCDfphMm3#am4>5&IAx>e#h~g&q2K4yBDtn?8U3W`|xte ze!LiV1TRLO!lfueG5QqF_CA3_y^df{|6|xT{0x>2KZ(hM4r27MgGfv|iP4ju#iaRf zAbIx77>3OkDg1rmuPr^EZ8p`K#Z>`nB)ixzqo`hi_%z4(4L-8a9- zo3DL=mo9yPw_g7YAAj&2zWC$^eDmc$@$Fat#AhFWgZJL~2(P{R23~l9z&m%&^hZ2! z-~hI7-;Q%@go8!*BLl1k789h~?WbX!}tN+I0-!w6u{Mc45HY;~2Q-1cvN6iGJI5 z!DsnYv`qF!v&5&+duudq-2N5$e0t|sKfzn82c<$SLLVj2znDtTKKERbZz22eJ^R6$pH4J+(m3WK3aX z(&{Nso?2<8>hkjPOju#}{{=lO_$;vd@3=w0T^HmSR@mL3tyNs-|J7^7&CG0ECHSuA z6q$Ottm;3JUU?5`q+Axkr&d_aj8;;A2bK5!M%mrJky~>cH>vh+lxliM%ao3)+smuD zhvM7ngnSQK<#h&rSyX$b^|pv=k8vZv*tF!z)@BsbQq#^#-Q|{-Bb(66EiFZ9O*NrM zRjBDN^K$sWAe5Z&2hl5{}OE8;n?zf+FtTEbCtla!>P zqB0xhROThLoyvJA@G1SfmI75R_ur{2An;1@=ba)dy=+rYX9;UPo<-qV!Bhh>AHEWvc^9p(G)d^kwF-;w>enY+%fgqc}F;B6jpWhxPr>V)>Bsm^t(;#tb=u zg!nTUJ@Ey^j=P8<$!9Py=>*11e*rTVy^U$}UZ-_`2dme9ghPA2#&f5B!YA)!;j7Ep zc>j%G@ZK9g;QhD1$Ez=0#z!A~i*LUA1^@c#D*pN7ANcND0`JqW@&0?4@y6?KVz{YKjOrf~7|FdxTak0i}Ao`(bIl zF9W>mw6iw|GSf<1sXlfV8>)46J~l}O3M+AwYVQV@8B~6n zBSV>4it6s6fbXlg2a{kRud0r=mylDJ6-E|SUv6a$uSpGIS7BOiWoa`C3aON--YP3l zR#S!S!hEW{JOe#-TF$4+E2}LxDzBub)I@1jk(udEQ9zYlR+)!NKHQXPC9gm#>k29> zb5KjDRS|C0RXMb(`LwFJgjOc1_<*XZ$VMF>iY9oksKc?s0x~sP{0Ds1mDf$k-d2_= zZty)>xK*3W{h7u_8*5u=saGs5$tL&+x?DbZ^DaI@)w04E;= zJNUt;y*na1hoE1l7{qp=zwSa?+jRowI?lyn=fzm#z6NVOmSVlvbZqoW#x}2b?D2`m zLBAv%@=L)U?@2fqG#`h|SS)5`7G@ z(T6d(_Yn*t_y&zUjUfqVkvRSmQYODh_*}x23C|*R+y%_#`tsSY;NaFzaOTjLICty| zJb&g(+SwoR?i=6XgLl5gcVGX8U;cFiKmYS8e)z{P`0C5=@$tu>;r;hN#G7xvg_mD` z85b{J#Oc$gap=$??Ao;pn>TO9nl)>%NF9*p%)yk@sTeyh7p*q)upD)j~orfnJ1B2ld9t}G_T)Oh%(lurfZ2HH-c5nimMy9}R z%v3lfO@>|KB-kZSgX@$92w%Pxeb(yO~A>$any{`*D+?OOujR5O`U% zk_9DYw1XA2<0a;GMsB`o%~k!H0TDr^=^d|?)Z+JoN|P5)x%6&Op=D~8J%-lRfT0=F zlzbq)q#8F1rQ|Bih>Ox&eq)Mhm z&LiN8XlV-z3A+N+-o1_6fAh173#pJ*jll0;Rzww8W>j8waSl}qP?u@n_x}%4yHx@2lj-kUwATZdXJT%dU=E(>O3L^CUFnHiF_Zgxy7WNrPTm;UJ_t!&`(d)p2&8t2!vfo6EO8uzrEalU?lk~w z{CZ+TNI16i45M=EgY7*>VN1v)Y!6s~J)!HcCvr2kM{mc5n7vpXa{x;s_hEX_Zj22& zgoNN@7#>W}g|IM39>w5(r!jok1;oWYi;=XvaYK${eB2q#9s4{MCO?bWvB$AA=`8jx zeis`TUc#aohp}x!nJC9SRPU6UsBiOxrw*lVTwQI3-=~4r{Y15`*JWueLvB^l%JQq}W0-e@% zG5YrF$8#BI8Y?upk6;%NLg;Zlkm}FT+ZQ%oesBm1ha-WfbWvTS`k{O8!LS`bl{Y93 zc5!3TBV`&K#-_r4+;q66E7|y-fkrfH>1ynJ?Ot> zKS8$z{;OBQpOB8+xDUOzA42TuBXC}xgvTd4AY^?{T)%sRg)yJ1q7=DREG#uF@O;we z)~Vz14P@05Qn&KBv_xiIu8C}}D{^laAh(`&xQ^L*dc&43I&efl*(JyP%z}t|X)jP+UP}#|Kw=T?Hzs z^zzE`QCLyHeFREz7J*l6>h-E?i)m$f9`mX4a<3CO8K|i#q}r3Z(lE3P+`d(Udw0sH z_zG!fD{#B61Qmp1evZYKYRXPc-KnUnq57*K7^|~sZK?kFK&z^x?PXNb)=ClHyIq7o z@0Rj73rv-0qPi-d$M8E+fvRP>u#lf$kcC`=FPjiIz>~El@X|BX=yV-QxUh2L8Vnm1 zi=lDDF(QEigJmv(z!UhCt8&$4b0fp3^!(AIhYNhXe5vki;M~m)9`?5Ibm#(a$My(u?}9Lw&Ist*8j*xw&yKd} z-NpfNUHp*JJronVgkYv!6z25^#{w4Ybv=WzZ+LGUjO&YCBYI)mkb&6LKM9-qOvk3E zC0G}}2FrriV2=Mfr222cq=2265U?9b{(CTa;PV*O>pVt8pT>xoGZ@gcR%C%?|#BJUw?-$zW53sUA~NW-g^hHyhcm=+*zDDbDY3CjD7p|Ve8hd z26)zbyy?@YW6~sxt(B@1@Dx&fia|pLBOs8LmgiO}-!)0M>P?mMJ%GS-3kpSNH!nEy zx^xSQK$nnc*v1Ti7K#w~xZLyBUNYZ}_T0>jwip?OrMM>RoL@vWM;lur7?z@t zHo4-?J(LrEIs7hZxwP3ixhSC3&7;*-$K=BDQdHE{(7slnq@oPfw`y_g&)WpyJrfo6 zw78W;sJT^+JAc*?kKFfHEvk8aH6f5XifeeZ{IDpXa`;u1!+grJ~QM$m!# zZc+W05{^Yw;#K9jxO=-4x9bT}+G1mYE3)oGP)oh0?Jd{H3nd~@pT3+2z|)+#%AcsD zvC5RK@znxeVG*x$p;FoNTI6Jy`aG#U0Z*;?*VC0n`VTB$PpHKYr^_3P;e=kC>dsm2 zl17il?Afz1bLLFM^y+0?6`>ai2WJm->SB*)+H``eyAPHuU16Xng=ZcA8LnYazCN(E zwS`UBu5h;L1}~@X2={Y9OemFIKu3glw?eqrGl=qRiwLJSh_Y{ws4i{Mzk4Ud5{#o= zx?*~OBj$xWVo@(UY>o@SzHx)Fi(hg>p9HK9V?>Oiq8o=fktvuQLW}6P5XnAkG2VAO zCiow~#NbmHA8`(&B2Qpg_#q5u#77@Re8fS-h3vuD$OD)?@D%0^IEJ}>4`A`2gIE=N z1e?a5#?Gl1uxIA8IKJi$yngN*Tsm_Zm*4&o*M7~%%`3V1`KLedO5|7y4}^?0*pVe;h3NJ&XCz>}?&veQy3Z`g?8 zh>VVcUw|Kh7h*!THY6g#C_MMzFxdJ8qMKg`?81AYOK4AY3hRaLy#~Q!AFmwH*Nahb4M}g;&x1bV?VCnqb(w&3TRjJ2r7%c zEuqq*g{-4OqjeSZtbnsZEw^3@uK;;k)^VSN+TuUS)r$}T3u@+hU~4t_J4z&H3UYblDC9Zo<-?pWaT`l z+tYexcCJ|pdWE#QRfJFN-8)`6rJ7;dzJq zDoV3W-JFzP6|HeCAD*fU6aY2UO!aq~&|NLIme;h|rbDzEFt}bsg;$`+z7M?|V{dcw zc;Dq(Lcmk^C1scS?ZzKix^@Ky5_kh+hfwj2H2w1gKDn(0ESoBS;gJ!>RW`g`?o=#fX)~gV1vFM9nsgd4F-F(!sy_3m@~)@OJkj}G07jB62q}( zL~ktV(;rL1 zsIV}2g@(hyFNnYkhJ8eDbPA3}x99=1v_sK>%Qg{x(UJDnhDxvd&{41-H3{}(rlH5g zIdGe{2o96yp~v(k2&Rnj%;nZsnaVc$YB@JH}J{sX3)xJ z<A9+N%QX!m*#-O^gjg=Yr^)99!Ax$uA+<;F<ba@Oxub`re=cUNJ zued^)UkFHoKChUH+;mFLK@QK4lwLt;kr|R!d#4V!srddR{wiygDTZ((V2Y`#G#W-3 zSOh(F7%wX?ptUWf+N)$#pqAIYii*FYA`f-6v@+P$REvuG+A>;HD#kwvJRVz7#_OQE z!h&?82y3bf2+3k&jcaT2P2He|xe17Zoi&OGNsR_6D&+Rk926HbigF1(Dp>{yP@0S#E}%DFnS(EA;5N7B5|b!9#~2NTZy>Xm3@Y*Qu)` zo_eMeo@&(*UOqvD-bxG_Jj7Bs??W##Dhz)9-ssk?3!EJ7;AqnYZjN2x;@Acuf!)wM z+75kVYzV;i7|_!meL`%}Kg6D@uOoVScfiQV?wA?xiPcksu_VzCb9)70end3pg~nh} zRDa9}>xm@;;;^KD3g$;m!h)b#SQ)Vf3j)?+PQVu0+g(WZ*o+kKjTqy#3bT4|!<5K1 zm=?7T3kPh+%AvclZp2<}9!Vf2?#J4pn{jaJahzOy5qlRL$F@aBaAMEPc=z>x;JdG{ z4 zBAk+@!gc&ycuZLY_teGkoV5Zz^VT3>(FTOC+K!&<_9Ef%Ma1qugQ>5y|Mac^~?0 zKZwBxP9y&GtLVS~EPC%fh~a0?W8@2OqW}5l(d*0+to!~WT>q;Ag}3uibn7O;M-@<8 zNa&RzpK8wnG%7)1}z$(K~qbNpEC|iHZ4Xz zZ~lB*r7JntuwnNmj7*6~gXTP@NdxpA+M74~;G;Y)o zCwU#Lz*AV2-E+@9hXxHApi!enMsaIWZ>c<8mxc`+qG{8nX1_I>H*b#N!-tz{3PDkz zRHIbmCOWPm_U=1i)SaNG<=*`VxxEn@G;E9$=bpu(Q|I`-A47vi&A5%xsJSH?-j62S z*YFYSI&cElGpX9B@}7VBt^50QOk+g!>_w}2mw>9mho63qhRq(~XEZ^B#{3RiDw^oC zxomFPH)+N!1Kx>KQ~4Q<(5P8+eEj(r$mO{xE-%Biw42ztm7;+5u0dn|)*y_+c2N&dCX}X84(=u~4FcX)~sq@~4qy{y~9g;LyVt?K?RTdR@@6RVP9(m}Fdq zK|_ZbDX2Vd0$->KXrPpy9b8?U(X~?tbm`a{ojN=YC;O-2>qSc&(go3B-O)S39??{J zy#gIDD9jCs{Q{8G*Art0xMSK#FHGp`im6c{gqR=3diY~e??G5JU@+kohuKtlb3&(K zLEvI64P1+9E{ie2X)#h}YGb2`FVgJ>b-+vkQOgW4rvrb~gu+`WwY7;K4 zx`a2kzJW7KPT}I(3%IoNMZA3U4ZQQ>r}*+SWm-+cFTZEt-~YOTe+zd1_z6FJ_Z`0d z`U`yV`N#P9^85JU{de%zTW{i(S6;!h&t1alGv{#h*h%c;O}~A|PHfz?8LKsy#-c^0 zzCgemrw+(t#u(@+A*hs|)#COW(BE|MR>P3BrB=xV!lP(w`MQUHfJr1^uX?<&7_ZHBXl;)p{*||p z{NaZf_3ran`@_40UO5VBU5js}8R+E^a(Q*7gkC8vZ!xVe6&+z$bgK|Ws_U!FrJYmd z{7v3;LoJ1kH+jQHElVr-Cd?j(tgZkY0Z6Wj@{2Vwa6c z6DG1yH8jz;ci;a4UU_-BQFtRqjx;b6^rX18Pm!FQY@nxojT<*M>(-@p+qP}n4EPQn z6xjIx4fOUS(+WMRJY9b)^bQ<5jr~VYb6vm_pa`)Bf+d678XDlW;OiFHd+-!)WYC7@ zlwjtZ#rKbCYzab4EyB0|1U~%aGYbq25bp!889zhIrrdUaIc@Gd9{(4n&X~z<`kY4i z_J<#+;((uj`xQOh-O#u>zgJ^@SN;|Or-lBfW&QmR-$ivz87ixaFnNjvdWz;P8e_%E zrKqc~Mon!c&ucMe&X|fOx{loc)mI;x`n}4^0)i?RfBgP48uDBzn(&%wIe*>^0*scH z_sQM6)dr9=W=!BYwtU8S-+fB;XQ>;kEXy$=MQ8`ocO(4l9GBPV#)GlrW~pVij;TS4`;}f?56g zVoJ~eO!JS!OuzAr=@{)i8F3D&h_#)L@m@1i3G1e=#g;jn@yg+s@bcjo@#g8*@%ihY;g_#}!IfX={r`OpKmF@B{O}{; z_m3Y9?7sW@OMLz1r&MZ^t-UTF|ehDKFUqH;(1L#SWH-H6jz~NKyU%Lr`8@3{3(;kFu-;bWBP9gEb zmoVz`Q7ruO6t4YEuu;7g*JqnB_Oz~uz@zQ0q1vm?N9k=IL)BJXOPf_wgc5GQ_|gRn zw4~e^V<#tL>g>swFk>8?yzH_4&=zF#CSSF2^#izh`g!Bvu|qg?>>&CM>|-f}+}^8K z4AoZ_@^kYrbFlTK`$ed1x@vNgr1b21$s@e@5pI<^W$&Wf8->NpE{3& zhmKMq3Vcn_kO17Z_b|5ZJb-#ry( zE5)X5Tlt#jfalP~!yWq$9>Afahgsl*&9)}Z8l(S!KE&f){Q1`{D!uV&*8Dyk`I{ek zq&a^7{a4!BI;y%FOrODXr|ZQ1pMLr-!DN}MMB}5+pF3qhCk3Zv^JYAkyl$n0RTb6Z zpZDr$lS^1e#-arkzo7TaFW=!FL0GP?GG*%DSA-f`=Q6&os3@QXCX8vDWwFaEicnfs zXrN~vevE9w?|-59>y1CKaP?C39We;~M+`R5lf4~1HW{netwpasy-m3bU>OYLvVk(I zJk!A$&$M?!+m6og3yL8f*PGD->YS`PCpBgm=mq)_c(!QYz8z1_qj-wY>(sFgI(BFY zo32lxbGsIB?A8(Cw}$aOg5@m_TbeaT#_} z*=%371$)=+#&gG?!%OF0!o?F8@$!Y2@byPu;`>j(!N0!!5x@QT3x59LpZNZpZ}A=D z$AA1l*nN$!zxW(qe0G`Y?mc`+S^v)4ui~{=U%*Q*K1ZwcEY6-ihhs+xynTnUW9MEf zylq&yYAx-*T51=Xt|=;9)_OcMIX5FAA;I*~8y-8tC_ANmx7gYc_yzbAdf}$E)=jCR zyu4v6l@}TXTlK}Gt?fiB+p%vfIuA-j$04I(lQ0PmW2VD#>`Zv1E`rzerDn86=n}$i z^ZK< zLINxFN_m4%o1KbA%~|Mp^B*|69~HF3s+X&vEy~w8-imCL))m9e+uanThD{pdw<~|3 zu%yUTQ0EusAu>AB6hMLMyKlcmZgvjFC8rqJwP^APmyNM|*B)bWrSPQYq|~giQs}x^ z>-4m4-L86C9dE76i;Igh+nY3Lf)A;5)ycd?ixvikf}@}(YpXiGhoQIs=vidsSD?6} z4n-xEw1hR5ef(~MTA^BTGxAYXUT0KZdUg>N7L^?LHEP@pGiJ>*pVPScqb8qXDc_%4 zREqMNdcLO;=~S2Fr_jC%jEuM5|IoA=tKLube|bg4n4CJz0IvbR>o@=Sp7yo?&Tg(2 z=rMF&t7>bISCEg2stWWU*q{I3avpyF;}`t(_Z`fbIn{C<7>%V2xlcV4?%lgXF#bj8 zQRz{k3w&RE`JtJaPO1CL38Se~CK%8Ocx%@zH%rCkkKRRHtD?6YZeBB!BmHviH_TtL7=4EiG)m8EZ)eY&i@EdXner#mA?TTgjJ939@N|0@Mh^o& zZ~vZ{zj!SM436c~LC{ko%TR>Sjs*nx!Opfj+P7(J>=2EiM&{#xKLlnQO6W#U|{f@_O_2 zxA67X-{OU@W$&es0$Mesf$NBSTaQgHq96NR#`}Q8hj_td#Nwb(y;VsdmKJ(_A zCh!RpCK&4^TWc&W3y_vldulFM!-Y<`q_q`#M)ie;!Y3pQwtN_LcJ+WmP#9cd`oSS) zfa!qTrSAxI8=M5Y_=)JI7&R5P$+O@*X(2pku7oQst=r7i2wAxs{dSxp)SkuQ0~gV6 z&shvQZ~>!Ezk+G6evI+Yy^S%1-jD<5(0k`m#GZN{qb|Hc=p9Dzrgezfy9Lqvwxah* z#tW<9c_I;QH+iD}xp-VdCW;8EQYyQWnqsc!)6x=zgrI;|MBu6OuwIuCV%aPVmArY! zj2~^Ws)XKmKM`0Ze1g}Pp{Sa6*d!*+Aoy|#M%gk8^a_fL`1Gd5A^37~vsp-FsboJJ z;G>T|GGO!a@Iv#Zgb|^lh>D6f4H%`RrKUcwU%!4v$q8O6xK2(^50s#;hwAUN@BR1R zH^=AFGH0ZxnYuqgPVc#V`LY>ZqxJtU=}%?IseZiJf<;i z82`8W-xZW0PxEUOmf^sW6XyL5soq|E^$iRf9*2g_9_6`ehF|`mdMYk6x%;y63z1(^ zhDpkHQZQC~zsJC(FZY}N-dedi2vV14T4?li|GAg_(0#99D{*(p2 zQ)LvPEU_9;N=ozj`9%hJC1rfD713o7dbtd1eV&2d4NZu275~2a8x{z9!z|EKLq`0V zB&^@G5wRmj!q4CTfzngfq6Y13{P1)KcRbbJ6|FkB&}zhB-lDY_I5Yu4VV252lUtth zSgp6tw70G4Dk=cF#gY>z5~$Hz88Ag55h3_cns??1o7Sj3CR9b zgo7|8Aqlf5O~=9+i?L?e2An;44zIrY8s6j6`8^hlcX`p@z5F3Q{On_V`OQ~+qQ6hD zUB;K6eu6JP`IvDTpMCrhKKkHYy#3ajc<-I}@%EeV;*Hl{$E&Zrj2B;c9?xDpN7$Xf zi4(_g_^|rn?Z@_QJF#(tMq5}qAJ5~{W5x{S#+zb*XL@}zr0x_dV1i)Jn3xBaR=^Yd ztn0pkL2&S-rFHW{XE$$j_YZ+%R3A9?9fGdCXlbJcqC@{U+Sv){5;qB*2|b&XIfUH` z_$}NF4_aFH1)C7E{t)`?J&!?$pU05nFJkD4mk@XEb&PxQ10+BDHWJUgY5*8}?0F2N z+8cQAEQTDrh(SkBW8~SBNV;?!gU@V1USr>T*z2EAZuUJs+ANsF)U4R=1$40L9hCn4}w=z-!#1F)n{`+4TM|zg3RR;tUkm z6{7Ob3gnb!n`4?k(t>uYDQ{#8@Cu5kB#R4>!h)y*BIsS_byde=)v*Z}&8dtU5gLtX z>jW@?@6%5|eNbSnz*0!*33|F7)_w21^A1dM0zx+J<_(NX8EX`!_DShk>+~LmUKW++ zf7j_X#g=`?crN}%L3usZ*=-b;R&bxbqvd<1=kS~;{mO8;^xb*6qxREK~6&d(t<`rVxWT`MqL&dj0{D_L0TI3WI;$}uBatOY$DdP;} z^x(M6`}{^mI*)IFW{(oQT$WW;qO7ulkSaFkN%!o3)Y$v9ltq{^J=L6N{buzqwKn!# zym*0jvkVg_@qcJP;M2bz&Ysl*@ zND6vVbW(Qpgj^k?vQnL(3sF@ig(piZYg^1E9R$Cl?g@d<3O&nk#57!&(j)i;y+x~* z8sH69rWHbOV(JvE-LM{EbX6Ku5vWjkRO@YoY2qT6Foh9ppTC$qFo&@(AyV-J)_afCK`k6hhac>erdab80^>!BfWYe zF*F8Kh7QH_QE`~fr_^#PtesnS;Kk=&#`|v(TCcs0ci(!?0p*f((L`IiX0=S<>&lP6CYX|JdK zu3fvab;~wvSho?YR;z~fCaW%5Lf9h+P^rQcwIgG(9zu&?L7R^B`69GeTJZW-(d!N?KH8^$noehY6^ij8;(}~GS+y5I ztM?&b-9bd`JcC{bpT$7I?%8)R`lTnYPxt@;|MW>jK~xWsckR;Is&SmHkGQc=xzxLZWST7rT}GkXt^p1Fmc9YG-yUJ@FxHCo6k^v zw}!yuduV^v;E`6GhFmH=Q=fOgkO+FE<)!9no}HVC$x|nyQ4@<|`QW1uP{zV%6bykf zGA_YDMeCb3Z8BC>4N8wc{D=*x>R9y4hvh>edrnG)fD?qK8NheyQcqLUP%SvQuW~X2vX`tcQdcZq<2@B zmVaHnfrc&A`q}_dy?W8A7UJbM-{M){QK{}VQT6m1FObV@1K3~9`Xxsnqg`iHm<@W zj|$)o@Y2iA8MyU~2{%el+caHcU45mgmz%D8;{Nyg@~e+ceO*;m5o+lmN|f}Vh*phCnEx4KK560c9bMmrvD`{&-V0hw4j7&608%+(MIZ*Na{ zd3j>Q@F8&PVS`R>pF+oHTA_RUE@;`bCG6Wf!rwUvUbbFv>FkIuE!)7QOBeV$*wFHJ zf}dk&c-ysyzkO%)@o>iwuMqTijz&=HAjEX;kG|aoV2E=+40ek`Y)~W;d-uZFp#w2$ zaDU97J{89gAENc7{XBOOmo8qyOV7Q8S6+A(ufF&yUVHg9y!z5Bc>R@E@z(2a;LTTF zEUAf#v_IGi9S^p~@YeDSxm1CL zw7|*%$M?|==T+pH!r!EsrO=d=(H<)iC~xYloD58|S}KC?BdYUk7ETNFs75AF!K&43 zj8#=`$KQVY4Ss%pW}mtbNzG_oA$23QrUcG+3slWUA4=UIu1YzSAaB%gcRW2p;cz*c4lFDjJou0bS5PaYN^b?g^6*5#G z$?uc^>Od90mtTMh*1Afbs}HD%^9l=$t!>nl=auK{?q7crP-V2{8p&XRzxDTC{^&z0 zy<$vDwQOrl<^1WVpV8%<F9^YTDhQQ8&~uhFb2z4Z^wY4BjHanhDCgtzB2xceaC57khX)xS;DZ?a-Na*QRZ2ICN+Ym#*#M zN$`2wcS4A>BYHXcBecP&pwOipMA~}J;IXwI9 zMO=9H0?u7Lk2B}b;`F(*I7!f*IB|-~?kJVr!3VI@uretKbxC=6q=luGb{xSrhUzX6 zqedqpJ~09DqvA0#e&hp-8x}%k7epHy9$`+nfEN-Oj==Cxcn12x*~<;K9&T_BiG*En z6k5Ca;b}%UP4v@iFl_n^gVV6l=n+2wE~8W7Gi4#d=B-2YvTcZ7z5_j1?ndvm2QhT# zIm8`&8KaK9gM<_BVf6XSnDO>MkU(`e^!T$FdHQ*bx%euQFTRer<1d&ux zcMj1zr=d-%3)(F1hK}31qVtKa=yawt+8^$K!7t0+UNzlOO6zDh7=>IfyhRIJZwYl- zDZW#R(mT9SYpEnE^H5w{jHz=89ktvNd|&_c74piJw1CH|0fZJ<{rHM&2?!R9MgkcN zLSbP(pZMx`N6Va@Nw8UoK|ejK zZBtl3tC1iiP;q_Bmd!>1?%uW2^aT_M4(#8D0@}!Iwcs+29kbNQsm^ZSk&`H>s5KTg zr>M+;$}~*0c$CY=25O~MwJ0pBL~%tmrp=yT(Beb_8jLHj((xgCwp6ppL+&w)J5fOz^aicJ7;7~Z(I-)%R=g`d- zww=15OWStn)an^@XxS3&pL_z{+dKpN4s8j&j_|VYPQ}+10rpOCf7$_lZ9Eau)gK|Y z{_wZ)MwC|=5(bULvGvN#Y46@Dk#XoRVymo+d<()Vv~rO8FjUInjh(Frnug8UzOq7j$;AL3<}V zbfLBH9vFsBRDA6jUBfiwtS35z_kvBop>T~K3(w>!@S8Xbev{|IZ|Zym&R9YKu156Y zO-R^%7D@YGz{uS%V&wkUG3LyNnEA$c1l?O0e*7YmpL-RPUV0m;uY82bFTRh-FT9Hh z&%KUBD%*rJd*QyUFIr9Qh|X)fqU-*yXm_F`S{-YPHU~Rk*lQ!G^R61WmC}-y)E83G zF>b31i(b>JG71Dc({-g3<+saFb*~Z$^ve00TkvxUsV2=)fA=q{Kb{xDEj?Ryp3qhI5~(PyHy|LJ zs_xCQE>P~|{asjIem_lACC1B^%ic0Rp!F8iGO0#oZwq+LIN4u9u@Qm(;m03SA*o)` za^4iOv}y!WE4M=MR|8RFD(Fu?|HRnEDXA9g+2YaW`0CrQP*GKmCCe6@Z3+)hLgN1V z<&WQ|daGdhIf$leXyX5{#v?5(4I~PIPhD5aXisaZ%Q0(a>Vth>efbHkY$?^9GBg($ z*i}`QavRkxAy;2tjVdZWYo}!04@xIxnGPV!JQM}J3_@SYUZwOj4dn7wi;<8tg5@#} zbLY=Cvkgjg`1#A$s@t9F^!(uI(}VW5qYoZ=${CM6?ToH=ffzSwA@&|TjediMnU;=F zGjc${3o;`#ynTI*?;kp7Fj7X3fv=|*q1PJSJ9a|1_8ri*T|0E3!t26q3P*15(YXuU zx)FG`ws580b?amU*S1dZ>gWm2P9E^=>VYtiPz<5cNf;K7gpmoDJaIDS&Yepuvl<&V zZ^HVGo3L)fCT!Zg72CG&FymeYEA^n?vuBTimnxoRNd&Y14Qg^f~Dt3Terfdgs(sagp>O^O~GW|>7! z{Tu`Mu#m!Q)2#y@Yugf!v~PvRZJ)stb~b3`(gW>%ywN>83|(o7yF^j}_Ui}xL4)8! z$cFK09-KN2KD@#Gr%pr6!lf9zW-~@^J&2UU&mv*pbBNvZJcjMPgi$A6NAiWYFz&gx zG3BNAFzby^u<*TaG5_r^G2z))G4kkf3_7q2{SPdM^P)(!nb{fLH`}7y!S3jAvJ;*m z^jhz4hXF4P!gc-$a8h|C>Yqm&t0*SyEYPF%rOGRk5+w8rje;w}{FU?2;1L!Qs=Xs; z4)gsbD6KCu;LD}ztNpu<3aJ2|z8(+i?bHfaR#9r|Zd6G%0y|txQFw-?wYpt%AfG0Kg?|=WB1tsT!8dRuGPsim_!R2b~Dj_2Kr)`kKXEa6it$V1y2V+-X zed`@G7A%c5R_J>Rbe2X8D=0^dN;GMr^2^GRSx|^E6D?3vU%mG}BJ}t@H0nZ;Nf@W9 zbMk#Ce(}}U<~O-}dhj)ECso+5wEk(zKgfrH&Y2zzf}ZyO^sgUHwZgawDTKcTYF~cy zCBd3y(y%wugNxgw@T|*EsnRs?KPf5UA;9rr#C`vH{h#&EKf8?Dnu-TXPsjc6k8cd* zr0%TX)9bogDq@o_Pl-zjeL~NgKhH81fEuL;Jmt~TG@raKn!xGGbxq*@8JIoxS0pfN6(&=)BgSs>hlCV zP15b^?T+@{JED23$I$Re7L2EOqd(OgjoJ}_-P@y$ha+sm{bAoT1l^;9&^;y;w*8{u z8aohf2}96zKu&~x`RI4lfA$NAl0v&9CT_I5&>qwVm_(Kcwi zzdiatHvl*IS9+@$WrAD18Z2l}8O409E-EDiUD;iwR-*E|RbrH$p!e*n7mUg?^?HoS zvnL}ZH3d`WrNT4V3+s1oFiLOL`c-InA9_CiKG=6~9~Jx_-nfaDLdN$-@}VosX>FaA zN|Vx)ZB<8NEAX@w7-ivfy{z_CfV1|~({X~NKFie8$+qTbY-h2tfKrnJ-}m2tXWlDt zKJmm8rkZ2N&Rtl$ZUeXLe^~(a{l9)ie!0feRuWQG$SQAgkvSJoQn?cApZ z5J9d9rp=m*)90VXNhw-O17BdOe{V#chQi0;|TC_GceM1}?M-t!4?hEJd! z%U*joJpK%!_p}`g=s;S?Q)Z5gsGfaH%WXiAGW#mqilyZ-G%NyMv}U$8-4GfafTZ}5 zh#fi@Ap!nyw6R5dTH8*9UB{=}pwm-rVArk-?AvvN1H-Ys4QyL=hF#n4=-Q?`f_!NQ z!+ImYF9eYhJPGaiW zOGr6!5hWj~z*Y+iFSvVZ+C;OoFWE-@ZLuJ0X zE86dDi>LNIjVJd%jgAL9BlgvixGCrnaAj)2Wfb1FfUl&k)P$6tz$fTg?QQw3QryVB zhWM0`55U(@V{f&7e;C@h>Elh;mS3;`h90y|4b@*yYC^%|n>26oKzV%p>Bm%B*(Uje zY^)0HWZoZngq4)bk3as1&p-bhKmGI*ifQ+x%B)sa$A0q3CuX}WueK?)&2(s{QpzV- zq~KIvCp9M+3Vyn0tjDX)ta*z^X(3e~X93r$^_y`so7TVf4u8un6qMJRsUVGNlhSM4 z%ydza?UntF={Jb`EMWWg$DeWQPoT7_9yx{OD6PJYIg3`9_cnaw2^>3jk>{|$q?H;! zjY>}SeT3e-AAgGLnK^uqIwupfgkI{*Sv z#+a@tnx?bI%cNM#H{Iz znKKQjqzGk$1uhNz7sS-~pb!|Xq1W24<8-`2Fcs)jIWAlDaJ}6Fz>TsbB78WMoKOmH z)aWErXQ%!@0-o%wX}I9|3XTYYZ?GTiUG4E?`<7_jvI&9L1Wli5ibky(p=tZ3XxxsH zze5u=>C_C3I`ii4%tFsr~TGSa&=< z-Ud%i?to|J^5L|h1KRCqjkZU4%^2N|*dX@hSfmkq0^WZ^uY|xWGjl8yQSBA;wUnQr zS5TEtD_e}pJLP5s#<8#d(Xg3 z4=;62mf90QO-7cY5;Ly0fZ!`BDWxLR@G8q#UB#tKmsqGLvHNQCrrj`Z@-V6 zqH+{f)bV$gqnwhixat=2N~`((3C8;g2&4cDs09D{+cnE}Ld`cgf>!trD(QRkdAy>i zvL5GOc#Y>ros^qn_L3FIF0DXDQ7Ohxr;-yyn?8cq-@j}=v#5fA%_%@--7SoxBQo}v zp?-YnJRf;QMJD0E=Iz@|!U5BgOVC6_M`0cBkI%pQl1h%siC`(OETPh)_2>Sk%^UF{ z^PvG=dgcw<+)M&-+ylS~SRa4#K9((CXrQKe_PH}=jBLFo8{)FEN{L7d;p*o2;61~eMO1dy#*5>p* zbmNvyraaDDun0qjjv(v=Ju*vWGuRvcK~C@u>_X_ZM}tS7>Q0r{9geO+n3OsnYd7p5 zgod*qhnaNd)_!JMhHCf}ffp7YVL)i-U<(IF($n7;0|ySk@S(#HLLcqk!wsFn2d!eW1*yz(ALr07x=td)sUpQ_wfkJzuh)aq$Eu^D) z!|J9@9-WMF$>T6Fg%F%L**x*3$^=9~ON0IudeRGIvZxP-wrQV^)qC__*&Y=VsXD>V zT5qRTSSw%yN0cJ1GKh8!}h!pI`IbR#2cwAVQAM3&7E4Hh3jKz?A#p9Tpz{L0j<#~npU*G z1DuAqz$Mlf4nu|MbUV`xolkUu&0$*%eQp?di&43iJp?_1 zjY_WM{=_n++^70HHDm~UQhGWzzdR2$cdJl+r^dWO50>dm25y@Q!B0D zC-7M-@3j=*6goyr0j;pG$UrYGJqwkU)l?GdD^|> zcouSBh9&?nP~B&}nFJ`0)>vuc)z9xrCLzu3*QqYEizlh_v zJO_Ea{(3M`#VOl~mKtH9L73xG>fLdPDTek`M!Ym$}V_JBn`gE+ip4|KM z7XSM_9#_fpN~mUBH>yriOvqL8nk!#k1(#BIYRR`kPr$Rr!((o*Gyb5$`~CVK27W8o zuEC_KEdMj+(AxGTr=kcu@+w$^0=(cKXv^|RkrvfX(CdoFp6W~j#$W+$?TpzA3Ard! z2`IQlMnxNmsEYZBNNrcI<}gad06sk;;qB*x?zV)clM@2DPqU^)MMT5X-3w0kPH=JP z0XHW%xY)6RIO&0ax&^GPUasLZBYdoy*wEn-=_Tkq91`dL+pZfl& z{?0O7Phk=X5PELD?r7VsEfpRi*Rmm+JVQm-n$TB@NM%EziSmz&mP3sBfBts_e%ILjYpeg7qp*BxXtT=&P%$&b`w8uzb$Nz zbVavQw7G{nz~-PG;$Dm=@_5tN)3Ta|h!PYLdPTR&2`@pf9EH^uTWeHa4R3xzuZY`f z|EytQFEp^LRDQMkaw?QuGn}osMsTb|UU?w_S%u6ZS{T_W!Y@w)%L%?hMyAH#7Euir zD?wLm^lMt*k7FWxh;z|Q1y%qos1PQ-< zg0JfK-+UN==Tz^hH+l97DsR!w@*HzpzTQuDSbB>}P&xXzo?f6a%e91}&hIUh)ZFI# zDj22wKSflIvevm&fP)ZV>Ixbl0|)}Xq+4)u5M5I29LlrStH3~O%H6M`j#;9mq@ z9cqpQ5qR|-YoGiOq$7@}GtD5`tup!j=zZ6n-YP2xJ%?QBqay{Ur+BM-*j;VLF!p;&J zjgf9iG>~?UD(^Ql_BM_7cj?NNn7eQ>#!Z+?!KRrTBgratRq-bP-Kg}sBC=-}3>e&% zE}(fUw58n~G9n4nXDz_U_@oD|9-%DI>Tf2HX&K7GZ6>o(MLi=(_E)`}8JgA0)5iy% zUOw>lVS#iZ=oHQ_aCdcwAGbwCL}BRQp-4<1=!}{o%w%U72?CrViQA?2xEz(F{TBEn zQ4J;$cuAwUBmk`{%oXC(HMGt8KCAN6XK7u%)%^o3P2qx` z9s&k>RD1rk`yT#YXxp_N9&f99yvAtsbR#r+hW725`|7S8msEICca5p;nslcMRM--J zj!n_5hZJ5jJnr)t+J?18*Iu0oy>77W*A0$??BFuomEi-I5dr8iG62pA{%}h4Lyxfm zaGw+cuheLS%pZZsC1Vh>a5P+}4Mdl5{%AkJ37uyWaEm&_VS@u4b~wOpe|Oj(?TRi` zd7ZV~(+$Hf#)77T1)5K4jlaDQy<$SItggb$w@`k&9NFc{z(N&RQ)JRgmDJMmRu{2w z6HtVlfTy`u73K96W(q>7#9S(Y0xEza!Y+dU4x0~!LIaFEP zr$PSZw3N601}X?8!A&qLrEC-Iw4Pg1Wq>EZXUJ)VAheuUit$|>5eJyU?y@fyjzH6YTdOQKIlGj5~K~Ty1R@YV-(B)F~ zN$u70x@##LE7&RHvVd2?hn%3NZMAd)0;80kIwwC2H_MD>mZYH8ZR(yPg=Ynxx-Av+ zVP~?c+|Qq91)gcZNKeD{G^#u*ysK#zrFS*$IyP+Cg2l^MqEG)J2nf=!&j?dCgMxi1 zPVA`m1Ux%}?+G+${zNuwC8-PFtPexW@^0 z2fCsA;m+uEq?2jH=)AKVVlRvYpRu%17U-49)>6rp5OkVjp@Pbw{5BzQ*AkVqvu4x< zEo>ESttOt)NDhIooQ0r-c2-eH_!VoNwnvbYx+|v&(6Xee3^}UTsn9ShDzefdQ>Rx< zn^s-Tf=OjpTFio2S%n;9Lkmz}Pw23)3ux+-r)>%~MkuVxPO!5oLa9SpS=rm7Vtx;* zI6+R9SKBM7DD(6A8ML?pU!jgErkW-Asx*h~t-G}Ef73=%l@Vse0v~N{)h$|8!Y-2* z)e1h@T|rL(lp>^zCU$pZ6pGWk9MuY#XnQcuNpn~IRORn~a0I;tQlHG;2@4-X|675K`lt59A= z6<$RsOJNZ@s^hERH7F`6FiSyD(35Jrb-Vh(kTQ#96|9s2h3b%?tI>7ly0v~!$Lcsi zQmLT?x~%Lpv$W*L6a4bHPa`k*aI8>ug#~!(j4a@p(K%9j26~DtsyqwyRG)X{#uZ$r zO;&y0m7CYF|L|d?&YVs6*#|+iwwi-m4mB*ym#WVmF}>_CWSA|X_Xrxa>*9c3{Rtfc zuV|*9#MX>YQ@sjZ#Yf#gUxtPbR6e^_LI7x+uTmDTh$eg+Z|!Q#~$7HQQ__HfDQ*cpwoWt z-(ib{XA{BaEER!f(4xhajlFvxdIVmj><|H0LKteKg`g+cD9#oGf;Hhpem&&jKglmGG%A=*ixy%Ssj1jO=VRmr{BHpVi(9 zXo8;gwLm8|C)nxr!xqX(;A5UEi?%1Yf+)Id>KUPI8|rW(G(-&sS**BanS2?~HxgIZS8<_eS*#^%=X_g^L-C_71ex*Pz@GS-SV??XlXMGOYb+L&?*zYN;PlI$fBsIBOv+REBP>} zy?w_dC{T;8z^6Wb1^oT8x0)p_w;+#Dt1^uP8kwPi{pu^J^i2X^86Q4UeAaq6DKdo= z+O6A`WCF^da_`SNV|S~0y#+m~JHbwMc(?D=n)m4a0-pN#<>Y1%lu~p29|T=xWjP;e zmAuA;bvYj{{4TP!)}*0U;ALdV+WrT6%C2%fgElvv>r{GYE?ht?9i)Gd*c3&j$MP6R zmW6oHwc4QnKnKLe*-wS-WG349v2rcj4lO8RFwa*Gag`j2) z>-+TCR?z8v%E;TRS1)*Zd704f74(!fMbHc8IrHZ^bM|mS>&|V_=!vETUK0Y3pld~I z+lJAOfRoa@Z)*+oXlWbT5PEh^(Zs1an!7%NM?Igwf=`=Qh?A?&F=Q^c>+i)d}9yxqqq~oTqreezF_7PPRvqAC#ZWOFU+RD`!^0SUTdKIx_IidYbIldA#4 zI>0*8)td=DNS+VM}G;)CjS}M?LWgk{`+Gd5JRG(m`W3<1rvXa(T1(E8L z>P~fh0;7)AQlDe3CoG@>%#jk*I9fGkl+$Xe-mR(*HB@3+l6hubFel)ylBiY1jb zRGk$jAwfm$Ei*<}3N%xeoPaLoZ!47juc{{SWX}m3{syI_(%9V`Jpc;%8>y<4$f2f= zaOT5B>&kQ^7?xL-5r~z>{{F=P_&ce}iZz0Q*Gjfjb#>M{K7me+98!39@9}*6S&u*e zQfjF>6V{<+8qHxDV#d!Q;H2*|Ui0+aEpw>WFM{pZL!FQ?%8uo^9UAzn!Y@PzNwui^1pyN# z;Qa@BQhcE-;7TtQO0^ai7J$%TKZG$td3+e*W`clI7nTq~lk28av-mmxEZA<=y|3(!)1yS zocP*)Y7caqYKP9#3B3ihw`*u=x7wiF9)fRwXLLKnIBEmiBaU#|>w@GL#{D<+2)wd; zjJvdWx2ueTlbtOh>@>8jkn4h;U{`mq2H*elTP#|>5TQ{aXsmSbjR^b34Ujq~6SS%w+}!55Q~;Bf{VKgnms~w%;S{cLL&t}`|MM~s@$}`Tf3or@x>PgT55f_ zc1%{N?7tl^DA zhYm%v<}L8ZqmT3bjWK@WRIJ~))g=AgfB2Y50&4owDb$azvAU zl$unV*2@UMfBt$Eis?w*Wg29_I1NEW^)x&_7W98SgI@rW=i3 zH{nC03bl9sBmlC|uqn?WLw)y}w0HzR|Mojo9wA2%UZ--+C(tMG+*z};C^Q_;x~{oh z4j(;?`rCE5efJiBJO2mITQenxFz42CoT9P18Qou(6;^%VM<2hB=`*Lo-O~k4nl&`x z?dxH>zr6bTOH^^1qeEF#cwZ5;3c*jnliF0K9XKHk)iAgZf^E)bM6k$9yaiHbAYdhGXlI_5fb1{xM_3ehdvBrFx^v6Z9HA!(|&PIa=FB?HL_;6LjXyXzKC=y~b$f)RfR;I5)#19*?3` zaBFmkrWzaU2*+VP7|sTGT3Vs!64!$Y&xycugliHNU9uP4Q@r6Z!3&S|;iP4LY(-xw99E=X4BS%;%3tl$&eqy*2L zIb&X1->-+5vA@MsoP^%tqesmD)c!+<4)U|}af5|QjchtzBQvx#1N$5B8WL{fCr&{r zL385FIllKkgdV_2@Dc=@V;rF;<);>34Oh#fLes2fzgF&KdjAhQJ@)Pu=_N<{Hak93m-VpGtDo>Wy3O(fuG_bQmkINewX;jPwShQlf znN=__G#q|`;!H4kMQa=ANEg|eqNo!_4DW!%QG{MVa1_EKqv7MPUdz5_T0(!qD_n31 z3MN4O2sw8%FGi=%?eR?OC-GFvW_afDMri+3Q*?T!1>+HPYW*0xb$A-KUE0CEdnfpM zQ&#$U8{2Ayr&@o530PAX7*22zc7BARZ*U;(AyuGkuKq8fDA-x!e}QM}(u~^Tc4NB< zRH{COV5iW!fau}j0TA*IXiVTSx~hAM8ZUVB z*z=}wq{?`xdrFs>E^rv+0FQX8H>2uY;55P+&Qy0UBQ5sUHQt%PvxDnsg)3Z>y$L;k zc#QX_(({4WR4>|FPk7Cw?VaHUhiQ)JK7(MJZwseY_Hf!{j~)m4xyPL0c$_Ly&^yNE zAwqArE5<%Q4h4i>oq=8{p;toaF$lkEs=pdRkD!yi)wo)%SKn3ljdHyI@q6Y5S9tk* z;`o_kIC$&;b{*V>$ulQo&E_?tQdQ9A z9XdiK$nVg!X%hoIWed4@;hgC-+k^nna)ueHGX;|-P2oM{joo|pp`7;d#F?{vFCo#S z8OCwn;p3-p5JOEOuy*~L2M|n78O;aE0W&fFu07kS z+>>$U{3+xWWmDdVj6c0o1x@<1j(|AlaE%}UjbKp-# z_y^M+vb>shgkUe0XB!0gx1$Jq5`zb|Mp7c}t#42SeK3`aS|81fXFf)$g$4P;$K90> z>x8yXJ&sn7H$%Ipnxk9$C*atnCA{t1Ai%L5{2kiBv)fbX(eZIOb$A@kom-+u*H-A> z<}q|`^$0pVMIg3%0(PC+!QIgwfnJ^nrCn9Mp`~$ys*W~T_E(`VtNK7GLB)UIC)FqQ zCzYrSCOrOu)%-uQ0|! zZLtm_{59rSsA>2}^!YA@QQ~ap(d}wh!;XQ*2kl=Hi&fhu94vx#5;IYLGUVEt)54ysM z%Fh0<9qbOm;}#Ws&6;AC&Dic8RwQj^ltYlTKxSXyN*b#Ypn`agk#(9M`cR8+1*+=r(tfHQuF`}O6YN*oc-i3wO$d%N$jr_|MNJ)w%PUPM=mJ_%bw|-; z-mk8p%QE{qW;YsH(1^>g9f(J56XMXi4#X`q`(ZPF3eb9fpq|Kgx%L>bxKlN zT}740hhuFuZLMYGg%U}8_T{JC&(AF_q-D+FaphcQo9;2{{H(e`HDs75FL_XxCu^IX zr<{TRsmHSd?>bf8)wCP5wHAe^kr~P@sF5hs=gcy~$=g4`G^(2Vyikgq!0r@V9a)Ya z$AJE=5Es`P4gA8Q;7j!-Ldt@M5O%?SENr%&(e0VX(4}QlIJRy9xAraI)u{#ix;%oQ zZjZvhV>1MHYL1Yuk08SKaYWiZjxd`?5Xge<->ES|Y#v1*j|+Eb1^=#3!n4C;aD1u> zoLeYc7#4wmP^DW} z*BE|RLQ%uyOe+o5mtdq0(yE6Ps1*MJqn5JW_k$o62o+|3kR?KcLrqRREklC|STlZ) zDo?@f0VedFsDgQ7?R(gxRfkq+{!}wGd4fQDydj!V0k?Rj`G5N8wWgIN^cr^&@c7!6 zuL-?ow6x88G{<9}kD*g!C-fNX43Ah3c*fJFKBV+|jHJq=rFBc7s$;l~a)o;m!8fJ{ zJjS|F;d#Jgq9;|JH-qnGc%}Nle@+0rX8RcE+0G>-*Sf-c7x(Sw|2X7{9tUWj4>-b^ z3eRC5V>h9D#ZF4(<)nDRgTX-|LlPk71Rm}@=XCNr20~nloT7d)z#HKP;~;= zS6^w4g)%d;M%xsEp6dCm{qWR)QL6bQXz7$MQK{t{-!J412`R1XJys=XwZ2L{mYbJ{ zsj1VbK2^9I;+t>3Gsy{x2(b!+rj$S{E+LEww}Vt#jm!*ajj3u*(7M$TEQAF2X?C-s zvP#0KmI_9#qGbdWEpBl+Rp3d&f!~Mk-MM!^@~H-ksRGNYYPr9RU@Wrq!{h(VBG}YF z@DQPDv93*U{Nx#TY2(dycq8z;a?)&^6`W=n1W$SkA;pgi6YYyMH-+jlZCMnyT z{5(Q$vZ=?^Z&ni4ni|4M4MYqL$Fx2mlzc&5hE(S!I31&^GteX0RPU!o5RIDAvX<9Y z`S-N1annY){K-eAo=#CjpeZVO-PKh^-BgUyBly%Q*)p%KWzwJj)aTvrrjnI~o0*ve zUIwm7=?Q@9pqz2d(%6xmj~i*(m^O2!DU+rVgJm=*#2@Zn-4N5)5m7N6;O)~4A)(C) z^$uvD32OX9E$S(RAn~K+aJTOYr;bmSR#vQ02wM} zW>I<;g=PVtASl%qWa{B$Q!OC~s_xLAc2~!#u_in$lE9p_tFaQ<#GF|_VyBbdz|2~*9mqz z9pSix|989W?O5axds1RZ(hfLHTZ9jfke+n=?j@0~gy*ART~f6BtAf$D@_ z!)A@Je#-{DfBAjd74-unBpGTc{O-GNO~FzKYJ!ytos^x{1thi1YMWHuqmR)j#mqWk0hu6@I+1BxA&(fmub7XsE z<S4Fi~LVH_|biyxJ;Hh=iOBgamf|5L#4AMGd)l&X% zjW8)N2|cXm{+C~VG3VTRuB}xGO5pm{S86okHRJVCmy+Wrj+&|nf$jLoW9H$dsJ*43 zYGxOILUuALbC9xk%YU7ms~t+TsrJ9xR+!pq$e{=V)6l&@(; z4k!5ZJ3^`4EVh)&PvBEV^l7)cKYy2c_&l9{s)Hzv=8B%(*wiR%X>A`z zOW&u^Hnbf&M07x>=uW0@o-=K((=cZ^kC4hE@a`)-LC@u3=#6(b&~qJ6FixQQBlz42 zKA-7?-Yh?O%=U)!9D;MXE4;Q5di$v2_weVyq?<+nNEC@OKByo&};F?WBBZguZ)_K$`kAazN+e49#@HD$4^s9X(ly3 z@ETLuH+$5CQE{}d2aX&kYzShiyzD$GDXy1R)Z*&R4D|5uLZb&c_M}2Bb$qktF2WB# z|7Pm{vZNYo>#3mYvHQSbG}gr5f*w_63ID%FizxC6iV+>t+kEH71ml$}*Kqafb@N)# zi|*CSs65qwsiU%#UgP^Iqi|(!HFc@3mEs>iesB8n=|QDByJLi&b>Hz* z$B{)q)l$i;QA+Up^ovg)?6-y>=;qxl_BNx}NXJH!ep}^okQ-;-G8W`k-ut--#^y~^x-{!RZ z9S|Ay6#Djm3Jpw`5h|7-Z#VcjcZWMwmTLzBryZ?ox0VR$_BbKc1U}CYdaWA3kKhZW zg7bd50sN`)9V#(mi7J+#HD%L^u4^|jb!Z#34R4Dsy}H0bEw)1);WXUo zfweWzyAM7$L66qfYXYt6M0Wy@ARAAmHjzLi^t@;Iz;~`cychVwWq~UkmpY=y27b;i zzIUGsoC!I%{RHG;PZJ&oJ>h>e0D*@BG5*qci@m*9ZJ<|sKeTQ-Am4{x&7XBBsiEz= zRf*gDQ(wILzv@lesLeaJz|qwSO&%eL9%|9+-M80&DLsKqN>55dedz={fzR46Pbx&J zPD;!QJ#ABR3BBJ6y#H;<{rF?0l`5rur9!$7gTsf9n(0EVb3iBuqb#i&Nc39WTcq@+ zQV|*$@qJ(Nc&nwA$}6v^Vgc3I(@JOpc)`xJ*b-VM1hD&Y;>>xJRo2ncR~g`GDYd7* zen0t3mDk@!X-zFH@o`I?9lz7jV<#*&njlu4 z-Z%gF(Y(JIAsQYLMSE`wWW&f0*HDipiuT z{*y8Ju_qog7E?n31w6H`9yw-d^wC4?;L(G;A8Jj4i1L~W11r_begDIEn73f=g9eR< zW5b4ZsIMm^i`56R=z+EU-^si!N>9McG99axKk$E{m!6$wpqG}OgKO6_sPtx-vT49a z0pu6pN_*Q8L1FFTAN(YI{2pbweF6gpwMByns-$2q7c+N-YsZ$fvn}A(rWxD_wLn@@ zE1(3rFc#bpg2|%Z2r@=wx271-qb2$}K8A?SjS$|kA^K4Lg%NfEgjXOT=1W)#e2UO6 z%?Q9ow84!L(TT6A8pHTGS_V=@3WQRVeq4rhX@Ssggk)#c9nv~GwnVfOL2UOpyt_OC zcYZ$~o0bT4Yy-dlhqb?ex3XHh{_)-2-QC^Y9c;S0yVDlC3oIlR!9qo_JC3bKJ)W`= z#ZD9fYy8HTYfs$raL)6-@BhF2T;4VBSZlMsYm6D!ObbUzY6JlFKnlN=U?>v9d=MM# zjg;t6WT(cXfEKi*Aji}f)>S*Y)F{W6e1A39Yw8I$!mxsHlYK2`RM5f-gxV)%s1MXJ zcv(R>5P$`=+|h|KaPxCF(37PV@SOdra(wuQ7WAm}Tq9iJMzFgnu|S$TJhB`eku~zd zo6rj=4Mb>7xPe||LnIP96A8U^LXTE=IAJxCFr&TAS6v=~CzY2!As2;H@=>I^yqN{a zn?VIgrDvcwhZc815sH@3$}TTJ*2-+8ot%cub25-~8Ns)Xz+BHCx$BBizOkH9fr=X{ zQNN`D^&9Fi=g!&SKSc-hXju;(b%cT5&-@aYIEbHqKWu=f_>Q3=XHt7V{QM)n{qZ~e zc=QlnfAe)*bm>J@F;u(#NkjkfuYchA=bz&jm7fnoT~Y)(*;duZ34V&7fBxCPPk^&3 z?&X(XHv6plQz$>8rUOvFI%Ut+^6j_ZF<*Q^PxH|V7E*dre=RMo29|2sSAto49RW}k z{&VeovpnbVC!R9OP92#AKiS+L4;{vL-~ULvr|vNPax)e#T8eA0--Ii#z7Csj+-_9f zQ_sFYW&0UE{o-4r_GE28`BIG>UzpKmYbS-g)l>TzdIc$F3z{zVhZj4Ddcv zJtCpEahtlwI27x31fbNN6r&nHZrX7x6_rB;D%t7R+;@QBJN2|POnEm?FI-MV`yEyD zN1uLz=~Q|Gp{=*nx&@P_<OBa%PFb_x=3dpbEjgIa&%bZCf>Ix z$(z9BK7z6)!3AyE0jN*$LNzUR1yy1>pRZgEEVReP@%|``=Q24IMakjFjtfRQfu0^4 zjEuNYq{oG!ASW4x`ROPv%|%5;5h|+6_+eENarDwW<0Dm_9^wl;4j_Y-*ea|yl0 zMaWy6kL=|RYg=$0E$@}ZD7lt#eK9IGRiJ7!-_sVpx6Otp|S$scRy)F#hW9%wM{(sE%xrc_t?GFQg7Dh2C@+CXVH+|&r&SgBR9~l-Tr;;V73G(FZ?et;s_F=5&Zffb@8taWv(HgE zAEd>l4J7oo-n@hR9d`8^D%LOf9N+zT*kpm3IzwGh_+zWSetd2lo?bq*-@2ZQY0dpy z^``v6n4v}>DPvua;TIUlr2~Af9_~2fthM;zr$e-?{6RbTJrydSkMO&8!*x8*QEuIJ z>&-|S{kSPJj_UN38An411;DSr)A(J-urm91m-_ok$9XNOJqtalJPkb)@HF|60iJEK z73kh0@E8VoRDEVxUB4oHhv0wvo%gVC@d83kVx)vFEDyzH$tWm^q{<6NZDRyVD*RE~ z5Q^4zby-P|HNY#03qWbKH_9SuGb3G4LC`f4CN+eS6k9{A>gzlSE;j?c8Ywe@iuSZ6 z$qij;-snm9!Ju?M49N;aUxq)rXnT87z0ppq+mY;!Rw|v&bWgM;yK}>QyPro7qm0nhU4uT^?jM8)Sr+p>(4D6IGSSrt-b}Q5ko-wque1C2!jBz7kuwJ#OvL-}h(=;e%((#xXK%R%AzJQPmM zqqQZ-rg5FtwrCdB-fY5e4r5*c3Knp=v>vSl_0{TNO`D;ORVBNu!)-X$3zEKq`0zg6~Qe#SrjWYnV|Dz#`J%Im%I#+&$zWt2in9hEn1p{g`sa>2FN zU2j@%HS|n%dafRXx;jKt>ACf*TB}qQQhMsXqLCQ~j~vFlC5w-Z+Iado+IK3>wdbB| zy2L1Q^7GAbuP?v-hQ}N!sg$JPx*Pa@`vG?C#_RF@PX`V71U2R*3VPZnC8)ldrb99TrfELavaHFEjtx08;BzQFg>0_s_f*ekCrJ|U-g@U9ELtMy zN#zyt`saVB2V++XyoOu8~Twk=N_w z`F&}+pJTmK0F383JZ6AMz_jxIO*~hv%?(MOXeKzDQhiaK=uP+qp*kx96&bR`K}Zbu zK{DZ2n44jS)2ZH0U^C6&{o$z7bA-CgWaMTOdZBRlC(r_@@@R3*^oN2TRh}}exKlNI z#!}hw2S5ozHTuFM*AuQe?(iz|g-?l}fnG$T!`2FTNxex(?n^=1P+HOvw4_vdx$2vz zdOX?MNrYa1t8GC)^or*ep?DrGEp2VVq5@@#a4&XIzYc#;?y0H`8n>(=bz7?SPz>l=Ahkj%H`jhErcc>qF3RzmIyTdFa1*kE# z>eBMmb5A0#I1e-C&Ljvnnz-Vs%Z1^`|PoOwjo6Go~S-f-A@EAbx^i-f11zOe)+A^`;K4o-Me?2bJc)hms_@Mp_2Os zx81(Wly@d56{_R2kzY`R%dWVJU)+PHQ?aeTTej*XY`b|UZXo<@C*`~L+-LgcX?(7_ zsz~W+x%a_GkY8Lvn>ia-u3wK0e6EWwzKr+e{Y)L9W0ZxI-j{^%H-~=0mDi{~&Cvzr zy6ZO>MX56P@>=E@cy2>etLa9g`FR!B5!fwl9X#$(bh~!nW71E}Uf4gOkgFS>eeore zdj7)$pJ2#{k^TE!`F>Uz&?!5Ml8_33*IMYgIOF=wwAcjK*9X5f!@$1!{u^9;#igik zX~e2i))-ZI-3`}c$+9JUPmU2B<0gzVcKM61H2Om0nLndKR2ng*E_33zY4^Ok{Lb)5!SI!NpQ-e zr7ei`qOEmDX}GIVd4gMgvH4(H^}yHyZwyIuMjJudK}d~axk*K#=;AfB zt!N?i>ImN!f^Z1c;qWXk^bl+UXbbPt&GlhfZWx~Jj-F(KlKTY8COt2&)0r$_5`x@r zCJbAsDy0UcO4aaEM_a7^g)P}(Xf23EWqJg%Vgif>&dW$dy*fx&QPI)zIyy!7x9~Em zYU?PW3BBwrLNAO;kH4Be{PIZMN$F`eFxp&~U{|>FbM=U&(xa_+=MRJ%q36cWz>}YW zM}Zf-i+vGT6@%>d+Kqj7UfJXsW$2S;!lgZB$<26oD%icE*({CWAvnV*aF1sO)=RbIsBzk+c!mEOi`)KS^hY^^rHtKU+Ox=nRx+0u-T zt?d}Jts6tO^dcQ zRh!h_h7FF^TPZ!wTPx_Pqp^Uh5jVCWL{L?|;5SlNw45VHjWNJ8O3A7xhV0_>nX^sn z>GhkWz8ugqA$zH1zeSxje-V|>VFNxXJy}~VFTP@(0gh2_TI%`wJ1a{yu2^%5ft^xE zeahz%{I1_D!0H*Cal^*Vgw&@5%?~(j?b(idGS*#v9Tk&OJgGn2muAS>)|<9F&SRW^ z@g*h$%fjW$xvjsCE1r7xc>_?5&3*07zau-p;MlWmzvUK!?;tJoXC@)(l~=FlFN)xJGPf)-DsJ$zJ2G-{M^(LSqjeqJuR(jQwV+z zi|gp0XMrb+t92z%(2zRKhW0Mq!P`nc@bhv0_JKEct%~X7ws!+GN4ozE|(734)?OWT> zOW+N=X)s3KI25Pee-h2@SA^RUg6s&Pclb}xQ<%g91fO6h@M-(|!{6i8zrBL1uDuGg z=FT#9*8Ia~ELpagDr7y?*B4O7V<|n=;|Xl1o_eYoB52{Ip@LF!Qgwo(tc+SnF>*A{J^w<}M^1{)+n;vH0?+ila{x%-n?7egt?Z{}REFRuYpZD?-`jt{ z3?rL8UkxUDHob2*%wM?FG=wNE{bvM`IwpTkunB;dt-G4{a5b&5S8-jSQr%!nN4L4J z3oY);e|>`>QJQ<33-BugG7G(el2RP`{dY`KXKw+W JQHL84}RQ$BkckS8R4?P!x zq6ja)_PWW=BJge9ev<=gjH{{SWP7Co)%QpMdUyyut$0TD*Pd z4tztatWg}FeZikpsYk}=yoMB>g`OkSn4#8NrN!EBU}sgH6sfYtyuWY1f!;gsy-lS@ z=q+1h`sS5YWTLb@iNYiaxdlN;NO49QzjMVEfhegAGOf8z#ROhO94%j*4{B(`>SEk! zA>GidmRf?RiwdPTg$gg;1)T|0N(4~{Vb_;NTS^f15^$q=(MkDUm`3$7v&aWivRyDa z(+OjEY-+X>#?gk3X4%QPRCR>alma(QDe}X(TptW(nc=+F5URyNf+JPk*j#_K6HrZ5 zjXi|o2&%lnsjgItgdWx9uuLkyTwf|pFSJw9b@BXR`TiK3Lojo_FWbkV?s%>&ZxfZD zZ78YZbz9<{9cnO%R-F(u4J@gCXvz#hRa%H?3C}79hZ8ZsyTPQ&nNrl%BJ-2@7x##smuh8=t^uik>5Z4)p z#GV8P^oINaz05Hgju@MP?D1L1o|uKKshP-{P8iO}M$YscVt*#^ zjxvt$%i0e+j^^dI+f8FTw8CaGU~sl^Hfd%)6xz>ldaXzvG1vn)NMtn z;D7jmz&>=C$CS>9kmj{?O+AC|t?TI;vbUC9VdJ1(}IrP&Jep$cemz|37 z;34e$_!HBh@Z*tROg)}*?kPU!`d8ooXkaIWCyT52?$A+wR=+U#*%GjVmsI1YpMQ-{ zloFm6`0Il|nc37n=lP!!fCqT517Ce@8ZD&Wl;2M+z$&Mi(KK{Tb$lOxs^d!Qu8hau z^7B7pV5az-p*b{^W%)8m}j|j4T znw5_60m1hEJ|!EV+Utj&>hTm(d8(Hb^xl^8d+%L>?`^8Re_@$Xdc_1^CW=erDN);fEFTokZ z_@x~|yE=-{8An??gdpo81jl4JVPY1cM$j#(^uvm3Kb%}2gvBN9SW)GTQyPP?tkMfh zD!ee6WoHz)Vs1qUrc&MY5K#72%j(7D`3vjXv&cA*2RzIX2&qcGwMr2)N~)Ers7@j<^J7p>yIYo>fXbq5)K)3omb$Ri6L<~BpqH4E0{_q;xcc%} zUa(U}5iVV*@-+6=J&d+C$`c-Oo^a*os6KkG{m}Es7x)N0{xJe70}$2_j@XXae&|v0 z4NgJ|?QQCaRHThcL;4uTSORc-CNd^wAZ;@DPfbJSG%CP;=o!T~uK>A=sQ6aoBKOo> z6rIOkzspNev927I*HobHhFa4~+qj9Ac3USp1iTv^z#D(_XiVHb25av-17G8NQ~z_Q zAACO%e1{3W!?du1-av)-13@PhXu7og^c?~41HL`@t!cGYsGpZ$=TLd-d`yt=AHLuw zKnXY&f&!7?r%)$k?GvC>&O%QalWm%Kfl8rc+P3=yyzi-!1UZG)f8>{2OV#PwYwA80 ztOlMurV$wGrt$;J3w%e8{&H-ez^4%qwy{FcI{4F3{QNuc)6c&V_&*zX$a)HTvbL(r zldV-K@qm($9z05*Q*jDB2R{D_-+uoSt?B24Bti9^GAjRIWH{dnC(sFa+GnAt5g}4~ zQg?!%=IH&3_xqIZNl6eilujW9_nsgpWyj^fbwN)`Z$IO$ciwYA?}N7py?C1jy)gH%!j+#;g*5T2@z#lGT*uBs8St##6yfBlJ#f2*BBGAvm`^1ZOt*;j~&W zoX7Q3>wIu>tq+!#xL|Gpqcjlnt0FOqws;W1HmV}YRMkx zO!q~5syE-C8@g$m+j*>>&{p@EHiEr1-3JY6KB&(OLQ_Eu>hcp%TbzM<{({uksY7!u zfmes}ng*0sR2ik`7ZL~;AKC#yPfCyWR+9m_hY)Px1YWc!+y%WPH)!~oEA75puA2d# z1A1NvsPIQ8?hxp!jB=!+}Lz9p^JQ*n?laZ_#O$d%nMe2l9Bu`F3%9IqO zPt7pU%bTgz+aeS#EJnf7LIb`0(;Z5$dL0!W?QQLKHE7z@gpQj!(S7qE483(2hTk#_ z<8B*+skcwW+&gDr-maOrxV^y@D;LilN3g~NoKz-JYnT6L|$QwVxe zdMc~FU24HQN?@vQmO7>?luSU-6J!*Ep1>pU2}ar$5C^J0?GvB`MLXooju{oaRMsj! zT}$0nRDUPPeIqy#L|O`bKm72cDX(kk8d};`-@`|a8nCIAR!c!o>dMh#%lAhhd`|$X zW3bfTw?F)3p!Wk|rzcC6ADfQKKbW-k8vgbL%X~yAe$QtY zl+BVbS5{>UJ|!dFci?lQLO&-|wJlH!dP+v>P^wClpt*VZIrE(TRMO^LLQT;7OxaPs z`^KogeN=E3da}G$^?gj0r}{wKNbxDls0+;J{F}*6OSSF$_yFFQ!s`z~?w$9QXh2<8 z93dFmOfiDvd+&4q`|q;cKdJiufu&2Apr|Mx**S4YNee`IRXigWl{NehHzuN~Jr2!X z@u+W2fRo0@sxz=U4p*tJF3yd>@HX8|#ubK8b@^a2<>AaCU(BW|Sy2~?CA4Fs2)yaE zvvW(hpLTU-juRFYIpMT=Z=6x@fir8}aCUD8`SSxg{Sx#O(H0IVqU#JoZ; z%qjGxI&{Xwbcg*NLvT)|1)fEPIJ4Lt6M4-^xy~3zbvT&N8$}pSFABhn;s8^3IDzn( zRUSaa=#4S7)MIj7F}cJK(+JLS#r_z{=kB3Glhol6 z2|cUy+y%Y(0np3lv3}@z@t4P&zd(MaJ_xB%yZ)F}z&MraSRkdi@*ktKPkO$`?DE4vZGM3(#{HcN-w4Ok|6w;@KRl! z>3B@2z5C$-!bNIOkdYcB_%xp`Ro}-1;zv@8gpwu?-%n6|Mc92zJ1kf#r1&(-L(lLf zA*%XD!A^EqYE*#!`13FDE#LFLPiV(~I&7X(_SjnP{j|mcup=BT!+IYoClDUsXDeVT zG{vN#C)g>zR0rmRf2cbtzfZpS+=O5#wI`(~g(vXI?y8PZeHAVAtc6urb*Cvf74Ltb zS<%#i`BR4~H2Vm~_ugT-e=`1Mpr`33GP7clo*9BT3Y(NPFJ$HfBDXLQRgK{oG$IpS zgR|kJQ4nRJPACrIGQ^>LI%)N~X$dD#RZT7oz~o#{M?D?kG=nyFNtqAE5>hkry|An* z1j{S^v6A+6O_du~l{(?1GAEqd;EMAaU2#=U5H4!*!D$svSX<+POFP4HX;%~h=!f}K zhO-E|m8EVtz0McQdG4w8o;bfd6l-eTF~5N7j}Tl~=!ALs&X|?$gxNVRn3dy<3B3O_ zKFiWdZ!E3$#q>M^GvA5o(GLr2gE6}*5YsCHF|NP|eN?4gRDInkZd7*!9p7Ue)nxR49C{n_&{kJ~x~f9dsGpwcu9@8)w6TnV&~xMGX`$yzI16|V=y|}m#2Z0XvbbT0YKuZlR}5l%V-Y(j7BPJ> zh#3-#Xh!TX!f-U9H=fX&O6W~bMaGOwhhclcL_>Qk=0gNGCcS^lW0&(olFr3JUW@qheL>Ps1O zWplqh1}Z^Iee(n#Z3~RQ^1f1k*1lSGCkP6f*3t@kTB@ARRST`PwHAI-eyZ1#(o=a| z-`?BWXW3gRJi*O&MpkHB^?b(WG6Xu=N!90RU8+d1aKP?cV;!ZSq{hBhy&l2zHOqd% za}E%O`>4*O-jrhg+aIYe3B1nieAgHUI5L1e>Ov6d={v`5Rhcflk+! zk~Hw*_NVF=L!g?8j0iqGoB9q4dPc?ZGZDPL;Cqx3)iq3V0|HdaQC3%~Q3_S>L(mia z^nT1bt+K4H=GOg$KVV<+Z>puWxl(@L{2MI$#i3T5SS zNT$e%Pjp9Co=uKtrJczal&;&LvU4hAkL_8 z#R`JyG%l}T*)>JZSWTFnRP2nC3A?p5?l`&J3FkC=Pz}1{^jc?})8dT_JA!atYarIr zDxXqC$QIHX6P9xbz?odGuJXj0Eg@J|Mo0>dMXs1?6e1x=>o}Pza3t-xT6~B1H-hvM zbgJi514c`#2ih~dP*0`Tp6P+X#X;!E@kJd!uqLX#uF7=uHkP2hp~56JP!|@Hk*2l| z#U&+(Pf3Jlptl2hzJwlCo=dPBoK1EWw|;v|_)%e+I=xKV+Z;D|3wQu3o-Cu(p1@?_MjI@pr{&i?Pl`|Ak=pu_cJ{!Rd|!uuGuBp9KYpTp zyvC<2qw+ug@;lY^Z^zW*CscrfsVuJQ4ppxwn=9L`=QA+obLsg6E_G&>&DAsN*#t{# zzXd<7>v>gH;M3C4(4lTO->)@c}AmDZc%* zv<7xu7x=Uk^gbhamDlex0#DhTKmPQyKS9qnUJSInAJUqCOi+IK(Z^JH?-6?HuJUgK zy!WZ}=FFan*cdvcm;hvCgdrm{2(fXlh)?i9dX^t@=q$^sgV5MUdpjiCA3X%XAgUns zk5dck&=ePp&-FIIn?a>CGuIsp_~l$)>WRfvQ!7h6v8>b|6Ipgj5)nvm3pP znme<~1y}b4QssH$x?ushsy7H1G=*Yio-0<<0-r?4tt6lp^So0l+;M($AWkWF#@bpB zoI*u-S{0A8oFI65wToF^*cM2I7-q^Yr2<@4>5e6(&REFjSxCSwAy60RIb#mh-&BHm zMy@LsR0bORJ29V1mG*o>o)<X{ma zZ4y$(Wgu;0CekO-)=tYp&WsG?&(1{g!h8a+5P1uWkh7#1m1j4i`KnHIZR|qNmQM7k z58l>34Bs{iqqmR8q&p^K{sZ%I@{_0Fl9w;TwtwG*+upq$ckbJTCqH=tKM{Il9e#%9 zQvDf6erNo~pv^V)dxW1_bPxUf6TgTA;1Swef^N$;^(S*Ool!N2{l}jU;c&mAQhqku z4^Q1tv`=BXnJ5PKxl>tfD@D8jr)(^??7(vBt2cc3aMMji*Ao;4Q4LSC$L)RX`7S!2 z&-oo;_a(2brJ$#lMOC(|(p~E|v~Bl&CAbnC1H%sf7jSHtNrVIu!Hu?2>hC9l>!_xI zlmh$SVM}#PkolG{J9zY0{78kZbW*aknkz%Cw;G3g1XSIcH+2O=jHd_!aMK&1N%RE8#883B=CX|746ILLUM8d5|eysb%T+e6N2nqe|~qP z(9xX%r@k~QAKJX$cqjBy<*AR|aH_b;c|Mq38i)nu!B|AV&ChqoLPBMAsRvH04#1KE zsy-^c`Gm|V6&|>#Jpfm91>rOTZ8ZUO{g6-ty$hNMtY$A<+8Tok>Z5R4nVV67D+nva z1%%i+^*&fz?S@mS63?u1rb?tWZt%eQjkLOqvua#$Sw{dt8o^}{&TjC+x$Qwzc>c7- zKIZg2GdiONwUwI9tp=rK#R%67Xn`IC zo*SGrH%6p4++%#;7Uc<-2&%hicep2b!#ROAI7w4I`ofzamdbO_bBAZKFZ`-Q;aeRB zpPDd)w#Fi;F$}@{p$KaWLv&9xVh79K#?#)?){aj{D&dzkB@5Zp(~vzg1^M#{KaIjz zN{hRu0+r`BplMwPIyZKpiz=^Yb36Jrbz{WVQJAn}3TECp3u_)(O*?xLHomz9cfPwD z_q=y6_U^kE&mMS|p!)^j7wi`T?PhD*+^xoP35#E)e~ITQT2ivxnZIA z9@U=aGI{6Se-VIhW9sy=h=~hATznuB5`zf5AS5L#0+E{+iK^;klvl)}qAD6rBeQ&| ze7rE2s%;dN&iHIEjLA?Vf)}P2_|YnQVs4=i=JCt9fJ*JOnn0Xd6@Z1@KRcD+DsaX5 z4gR>2FxoUE8fTR`<20(cOPf7$S(_(qEkRb{g0rb+E~p8|>9n(JE1Yp^u@laqy1TSB z0O!{caCPoDx7rz(&>FAn^vC+H09@JbYqrm=p=GXjWm#{WPB5NP>4b|q{ctX~&uQ|; zS@oV&jILNkb-1#GcDdRca|z!0++ITayR_CDi)*~GpxO(w%3Lvla2(C|sUdReWZh24 zw-VrjUR$aQnv-2o7we3MByTjN2BEVw6+>F9&{XTNx5}?q$Ec}PUcNfSrX?dJITpT& z5d>TS!AAgw(9TBt!zIcW&e1;b%Zfroc`AZR6X1~(#Gvh^+H=jN^3xdHs!;e;g~6*T z1irPQRC>{f=!ir_XE-fxIHC#sm|;@u`Ge0V5Atw79u*(@?T32PLcWQLwTA zd20$$ab7hVu4+W<4UOp7(ny=zOz^c~*tVgVv~w~R-n9VdK6wsqczq-8c;`+$xbI;+ z_#Wef2l4#J&l7h1_EB;C&i_%{8T1{c+B^E|FD3-N!^Y+ggx()YPqFo;8*%vP5d%Ge zNQ%uuYT?3#228S*17Wv)`*x$)HgDc+5&&pun8xL*+(77QUW}V=y2+H$y4|mkZPkbi zb#B(R6`B)8lLYB{y1rEC`fIMnPe%?L$XU<{dRh(ypWU{h?iPZ`@z5I>f{QIP5H?mB z>X>Z^5%i?sGzFxfckt(5@da(TfF|I5t}Z8p-gk$O8sI5R%P-65m;k6z8y0pJcm`sE zRX=ofU%^(-VWDSXW+7*xHxP1m-EIqdpYpl%YzBM;m>{TQ1EFVQAoR49wf%}f{F2*W ze6BS00^uLfdt3IF&{MpB-~g@nr=}4^^Jcui?|ox!KTw??L;p73d;e{WpD+p$k%5F> z7*$>*;^KmlloUn)1|vBo1o`WIwl{jIY{sO!ViIlb1lq5$1mAcC z?cnqr56sGT!+e5oHPzCpQXeWRFU(J;6(vM2qAFY8=7X($p}3gfx|D!AhhRFdR$!x@ zr5#;LK%PR_oK2N?0oB~4RBM;f5?@5vT`E;4kP&VdRy*P94lisO8iGxOgK&M1AFgb5 zXIWQVNr;|CkeyBYdu~0i&85PqLc;Hi8mdSF^<>)X)9Ssjy4n>d6NslY`(h>Ux3bY2 z%eb6T;(`eUUKpD0hE4)+NS+6V5$v5Au4t#qYoXn5PxC=rh96q80?=9*h2iaGXsOfO z8OqeGbnu?I zM8uCtLBhCfq)#g(@Cs0{C?CZuXm1I}6z3<+ON8W!754}a`z55_u`0xe%_B&;VCUpJ%SK?2) zPkqq{KH6H;D8| zb)%5N`+_!1Fw?q}orT+fgWgvJk_DU%3p5M0f$J8010f{4Y3I3+-4)EfRsKA+3=?F6 zik8||%6NfJ2}K1zZQFesv7u1ogzYLL_*m#!OKZE*d+wmMTFlqEl#*)R48s89flnvg0XE#05;MpZyOSbE1R5g zb(=dbB2dp|+4H%)qQjTgIuPg6MxRdTo~r$=emIjbJg?0U=XC~Pb)74g*ScUe;kc-l zDz?N2V+p&Ve6F#j-WXRAh(Wnt=%C$KN9C?ee{^K|p`BoADGVa?^3c^(g4(J|R9Drb zs3J#yMwklxgQl!jVlwznX!uLqgEok;6xNA}PT zq;^%nw>TC~St0N!i9eV6ca(aM zYVY?W1fGIcSI|?3<)07Zr=wJR$3aiAaq}hv7j;EZ_YkFp60`)S#fujkdn>3}Fey}4 zP!iB|Tn!EuZUg7acT z&{$jnPs5_L&%(~y+do53`&Ir#A#ge%C{^{Jpr@svr15>*Y%9*(>m{^XE}8ASJSOVjV~5@7I+qP7J37LcOvNNo+pBy>h#p$ zA<(H`pTa^5)iGq4iDlMP^PIoMdUoWx8Q_ zyrZw&RDSVi7YATYNf2fi1!8__2xjK_U~aw-78UtoIu+Z39A_*fEKZ_Dy>5^@ZW``^ zJ4gFtdyg}=brJ?NN~c&dKzG^)%CTD%EBUo0hX=Muh)xNmW_C#DkG<8ob0rj`-hKdOXk zH(PUkP~oNeqAAe>EmXnn`GjDR4~8_;;&zmwxlXpWfzYc%b#0^Rs9aLlfTY4=WYg+q zH#8!tss?eizKON3xGxGIkh}x*FhDnUC1+MzpM$hvDa* zjA7@l!I+CrLBsM%NEuU)ptekeb!8!ZP!1vn=OANh4eC!GjgjlupzFegs62TD3YN8@ zXk{}>PHI8f=^dy!e-PTP9*xe8W6`x`6gsyK!;o7>V$7Z6G4;L~Sp48pT=>L=*#6g> zaL>Q*!ToPPK$Z6h9{9%tc<`J05zKeo-Xx~J};PRa`Ha|}J}ulHkE&{^nNz?}$svbQ!F!twU@M9?$!dxV~Vr|v0= zeYCFXd#A9#GsDif?EoL`tDq?0$>OSRk0AU|Do@aR{{yqFc{kqw;4O?9JCu-fXSgz4 z2tH3lM*ATmifS*xA1Ub}h)eQ8RGb@}CT4jMbe@===ZQ%fgdT0|%zSUmEh6{|{V|bH zoS5p2#ic=5Ru+iaRC;Q>SeEC6i(6c=Ww<-;80U$5#`xea!f)?5AKX6L1NTgi!1ghr zxU$2KYRnZ^w76i~V3rvbg1bkC;2J8s4J~fiPT1W%$Okv~dSY{%6Ydz{i=BhraoaF2 z+&v}$w+y9%>>ym53Da&ub8G~zZ*gW>S99*p;bDYs1g`E1zRW!69n~+N_uS7c4UvPFlg7bL2_jJ{(UrrbLlOCMc{i~n*7ZhGS;-1YWdxaaMAaPM39 zVeh~8V$Z*Kfv+bMMbyqOh4vY+80HFV)Wp zY66a2ly|=<1*F9vvm110t z7e{_RdaNNskW-7Zg_{j+OARW_F@E6(0?#HFHS7JM`DQKjEO-V&&w|jXK^{BSAffuX zeoHI6YgApoinHob(E3qbeE9b{NRX@Ju`;N9e~7C12STkszL5%)V&ru-9L;W9=vj~s zRCfbapWSD}*jED1wAym{QGX+bwsqV>PoO&u*N1_7sGWeSJFBlP(9RA}8X z&h`^yg&ivIL+mFA9YOegNcesDzG=kxVBfzmc6=WKf}9Z+=|M#ofbb|!BqRqSI^G9i z(QZgg^GAAaFw$}Y2)ztv%%II%T1jP>=Z@)wnqpce;a2RAF$qo>pGbAY^#xQt^K)Ej zZC!9yttYmPj>OHQys>Mn7j7TyhI>c*;jtOvxOZ|8c2AAM&T-K=r_vRt6F^tB5^%!; zu&vV_w+`~7D)b@Pg0QoXAf%ex-tB_BM)+Xw*Z}P4al-Yq$v06oZfPg*8W~O89^{3` z=Vsu>Zi0{yyt&K6T;t|pA-JB3?~*2WoKLl=hL3ZpEYGfx{dJ)Q_raMpURYi3fmId0 zSWy*#rHn;2{+L|kgfRt-Vk*5%FSI21p)t-M^|794PNnk8cSQ@|Z&Q&UMt7B>x3LK= zH7%&AszYsEBPwefP}$Io+=^<%<`kf?xg7~*)d(*vM|f!s;_KUyGiWr5M^8iUut~JG z6Hzp7Cd#JHN8W^4NF6x^DWj(&b27JQFGt0))6jAD#Tb3nwHR{oWf*+%MHs#AN=)6f z0kdx0f|=X5Vmf2;wk?=_>kfkNX3V*D6DDoB0^>JcjQP86z}jbaW6k5Y;=I@H!`ZLj zg+-5DiSfHu5Poy1^yXpd<129fa~ELK-#20ByE`3SRo=N1cf7R=xBvS#JVfQE1fdps zzy0)<@ zLk$xvR;>6_d5zkT(zD7^0JZnEb%J`vnX_jZiz|>x-3ffMwgc<+Y*^sgP+vZU`t8|p z>~p|S5VX(}qy#(*Jps?c&xQevfG0o^OoE-QVB z>2#(gcBAEuqQdjSZM4<9sS0Q1wJ&pifmQg~8#vby?`X`jIMpMa-*rX%xlVL@mWpE@xsPo0$2 zK=|3ERe##H&{4yLx}_*&YmWhsDo=HBN+j^jcRv`QX{5}rgr3@)4*&A20p1U^yNW|c z)iuVkZQ*BN*AKw&q#_42W+(<)T?;(hIB`7m1Uf-ap>3o72t3n>L8YhRZnhCajS&Le zF>7nU_X&abXZTt5=jfB?7^^Gb$=ZHI*a>(xRKI7`-u`z@KfMv7x{Z}h&x%BITM34Z zY({lmHj+|WtjqT`l#ht^wa3i57^|pJ23!a-Bi5KR_;Muv+cxZAk z?i}fjTZVYy657?PsQ_;v+-~W0!o6d>4HR$Z@f(M@V>8v}?wL_|U~v*Qjq<>C!)U+9 z1>pJ-fw;Kc$I;=LN^dbiwnlvoE4(nL)D`3Sp2rsYpgYwQ_0gKt$R9O?pC&+R;txP` zHkDqU51I<1F`}grg9x?y%1Sgg)H@~$@1KM`ySNM)02D#%zGaojZD>VWeJf(Cnh;UR zsBS|-+fd{VpNjH{3s5s_C2|P7vX)2~3?+V!YDa~-O=Y`$Ov>dsz| zrt_~w<9XMh?cy8IeBlNR-*5+}-~JFL+_VeRcHV_4H{Fg=*KWqJE3d`mO*df{x2JC2 ziPiT#hKpW)4Hv%p0@lC#0@ggc2@~&JhWU>zCitdd;p6kM^r@v-^2Aase_{oeJ-i%C z?pum^ch19wk6eiF@dH8kD}Mg%7Y217m!{QL-Bkp_-!&{vV{wldAZlGBF*a{jreX(} z1Q(665VWM$WIL_WTC`}9fs*=J_1VL+AZR@VqWmnZ$VXv*FRfK|?Lh8?!6;Pje z;)!GSR@PY8(sSsZ->{5@*w1`FwgE#{*MjapLXXdPJn(D`1fNn+eaUq}&qB_E&e~Lk z)-7xVA|)-5f>Yl;wdx9dDkI=2zWtu>gG%l2&-yz6M}Pa>K<_7R3w8>-emwXbA>cXe zvaW4I&tPF^mjc;|pl6kywYL^}8n!0r33SIp&xQkZe}4L~$8}lgd1?EZ+&IS5i6g*g3VnIIqSVn})>TwsG;evCkiy zI@}!4o1B142o=HX@=7OM$~Y~{31{Rw;lgrPT-)k}ox>xrV{jxkwR+?7awl3`C)_t7 z052>`z-ud#asPM^Dn=*VJ=_UTP7lQ6Qv<_>35w|6ZS;-0adyr(zz zjPt-Yr8i6;(8FwxY$5vkFl3opM$YEXh?QOeX0{`Q{7RK?1!%M0t|0!MN@4RYN{(x zTT^YoS4qW}Us_JJSB$Kh24pq1Bd)p$q2&#TsBK4L`!Hk;nS|o8b7*ZBA$RILWKN%l zf<>!QbNU6SJL?J*pK=ijPPrImXI+J+%QmBdpli5z1Dd(sb@dLkTyY}?Z@ddLc0Yxw zx8I8?gx+MrZ~Uem7<1iLOxtn`7TmfAi*COUi+4SUbN})x*3#l#_4Z3R=Z)Ji;^sM+ zbk8JA*fSE7ADW2C4^P3kz2h-<_c)BceJn=p7>$uPj=-9G)__RBQ7Sq0&2xZH!_a;u z@EBbG>F8l&XQl9LQUOisymRNx1~Lk%Dgj1aPkyHTdiCX3;NnC;@tEwb0QEa9nk}zT zcNK+>30yi~&=d>>N?T3<)v*gNywF%zK`%c)-!xL_T*1vMJ%!Fyi>|$&mCnVwNMOnCaQaB;zO(2Ei z=0V=LnpX09LP@Q#Tf2O58NqiUA$Sh2c{M?`ZIB<<*Sio}E>wI@cw$xro?8%ur)GuV zxp`rDWD)^3)CrGFbiet_W@8)TYg-Ur-Hf=FK}hc#i|pZ(Q9OPQiYL!U@!Z8IT(le|D^Eq~ z$!8;H#hFN6bSg4epNDcPyXJK_q3s%iZvA$&U%L}Sw(UXhrn}L%buT90{UjDY`aBjr z_%s&meH^n1xhdOj!}J?(!@}G4VCkI?V9A{iVc9(oW6dLvVeN~L;>=fX!LaSKFv@zM(~9)sSJB})vTlu$rW61c27v^G>W zS9VpE*n)}dt;$Ims*%Fl*byT}m}_YN^5x4-Ssk;jw~9f72ATT5E3drLSm*0+*nrJj zw-~@#*h%TxzIw-lZeV=-9e)xG^u9g@J{!m2N6^{UU4id-m8U*)f|=H>y%lI=H>K`O zzde>wIRWzf9|&?OJwi|M!;iGEKmCluNBG$v75IMV--rs4p%hmdGi_3XafiC9*zPL! z*pJ+{_EsV2THpzU_IU8V1|W*ii;3|@LV_Rn z(ca3=t}1cI*#z8`gxlImSDaVtfvda1OkCFFYe08xX8r2D&^87%&!gz9uJD!;0g%{>U;4gEcanERfJUTs|R+kpJo9c3e zJN8WUp;Gjt#r476qh0XG9F|$;kH=37#Iu)0qqXn1*;?w59~1HPs(&83Aa`3PMw6Fq(6s z&{toG-u4DmSCykqlcm?v;%eTEnmVKvmLR#b5;?7%NN(;xcug~6T6&PwGYV-#C!mOy zws`6SN5zvbC9#@EM%-a8`)HP#pkX^-4$EWzTsB1T)P9U*WH4_H|@oU z+aAV{oeyHO}Apst-G-3&bu*d$E{enYY!Ihz7MM& zc@#?@x)Tc@xekN2&BV}KN1*rC4s_ktfzCTR(Xz7zEjwD!xTOgV*EgW?>ITfcZ4SO7 z_73-t!uUh!{X*!`-ZFk5;FKHhH`VJAc#7X?c?CTS8rA3N_0y01qEh|5{@2%zK~G&s z^pYYwzLCK;NN8J)8@7AOK&AKBzy8(0MV9rgx85@8=Cxnfy5o*J%>IERKoqjN0;5re z{gYkG&I)9LpF#~AwsGS>g`oGn0iU4vKf!0OA-F>)^*Zn}y`@#%K zn>2AO!o&RuIDf>(_#-)qz>A^E3wJ|wq&wl~fusa4q$T^nX%Q9A$|4t>TIzyxYCLd$ ztvl9t1meaKakybn7_MmY#FdTixUS6$>j*5>sogrn1JBG3z-!B*aW7%Csl^GKXiK-! zV(uE_i>K$O;E5T@xV_H@4^0Th1GKQW40gdS!@aS&#{<{5InkO1;Qn#0xQB}F{xME? zc9s`jUmT3r7X{$e`M!8!k`o>p>x8GL`{S`Gfw*h9C!U&}ggb@?;Cv~^CW3RY8=u7; zH>jSEO7elJUiiyNp?L1R2)uYrEDk(Whj;c?a&H4+nxyfC&j2%YKP1f3u1V!aIXI?_YXnH7fi>`=7jgrY8;5G+kXZ)ZJf>#C^o zXmhFbs%tb!cpb9KDiKfcWfOWCor4hF)Q;GWKE!tqM?&vtD5W~ZQO;CyB^1|+aJS_TOLN=cG}xL zPZNBvW7(rGVBX#LVg6lr6LfcB;azuN*}eA}mAB&IM|k`m%zJPH8rDxh=Z!o8#fRt&IVReR^Rgr^5elDG@cF5KJyoNx_cPtsh^%~udJ-B=|IqFY3uoPOk;0l zSv877<#bFy(>_~Y_}F8Q9V;&YzWL^xri{Jbz^*GM)WXc?wD#2wN0Z9a2o2j|*~Y+a z3%&1^Mdn!iXXvRF_uC&18O3Imn}wYXfljbdNP!)TelW_`3V1*Ma0EXcascm$KqtWU zFMlTR6oMW(_dk@Lg`YwyQqUU+I14sG@OT*7V+MrBLGPH|?T4PRxz^SScnsTsA>cWd z2M9Z*igJv`5cC|Kl^u0?>W(Dn*)Y&kPCb>|uOKL8b9wB858lDdnG=^2W`=&?Y!5Oi*XKWZAo)CvS zy19?Ax@V}fX{5NGpjywRMqE6Bpl^P+`y#Y^-3@V8~bczK>Do}cNAzs~o@)03U> z;%r~MI5!y2Ob^DLgI#gkAP?;9@xmpgPPm|q%Cg5B4^1Tq2m4_&p?4FN=KXWM@X|%G z`1?(n`1rAUyz^iUUcI#(cb%DtYsUNIEUHX(p*dByyUEWaK3!fPh`Dtk7+)TQLAkzY zOV{M$zGz7fL}x}gy0fFuEUQb!SEW>3#W5JvRgZ?IIx{S-y1ELLRW*cOJqj7I`9(;o zYeYsjVb{@v_@1GN=^lpYu3^X+ITgvnryz0k48)C{gOuqjP`KheRIXi*%5yiM^qdV; zdN&Yyo6vmi?F8SgXt?GUG+loOM(lc$@f7-YK8oSDJ%urMJcEUgzK+vh{5MuU_6%m- zaXaSh+KH79-h&j7bRS60&DMabT#k9(k4D^nep=W}g1AJ<|J^Y&z4;(etR(4k46Y#XIIC}IjVfM2D znxOP6zlFd3LMz2D$^#O1a+>q_0lK`flW%!*6m5*XXZ_JrH2E`?DC@PNP zk6svZa{Q5&>W+jsCnPd5Qk_v;;DhG+C^(&7?TfRjy>L}~Fs|(i#?7PRv2jo+u4?wg z4~^&R1oeQ?o0LNhZpDg;rUs9*gM=0J3770u_xvx;o+$<*xBWR zom6vs396?i`r)}5VR&X{tm%(;U8@VW_PXPxWeIqgz}q|82`>}U@#U4s=;%glODCe+dJ#s88{XQ7%n_53GIWyZsuDkXHr3url&`)B)oZUt=^5)$ zbmlcEJAV@`?Jf+t`5_FwS)G$GF|6Bj@}Y^YU;M4Fs-$}5ljrzVoO*kjt~?& z{+lfDQ63}oEXYViqx3BB4*l?>spAv8)QDko=m|!Glh*CFV5X&Qe6YY0)U;1P6Eqb% zZp&EPtFkJmdOur6KoTIo=P{*|`ccPKo=e47EUTrC+cFBfZW=KR*ev+|90P$j5PE7c zweXSZQ)7g>o_x(?vc4K+p%&Hee>!4DT4*$d8F``oQi+7$4+PW?S~3n1U`Kepp9wX^ zQK~m3ef@>@RWYzpLyZ?|$dFBTz>N14^!i77eD=-v_~a{ETRx9TcV*H`sVmF(1fHNL zi!0zs4RRmX4-ka=HOhj}Gowftw!4aBIRJVVbp6oN92WxJ#{}NT2dMN2JcXdAK9&Qa z=U5l;wA3+&8r--41AP4HK3?k`UgO`GF=H4?3B9U{cr?~0qPij+_0~Xmtk%;h`CccyLMt zK^216mM7xPHPLu+aR}}k>x+jcMdIn133z2;27wigd-@2jDNcB2br@cp;m>udxP@uB zcYHY2q7UvGAB;z5hv4t0CF8jz{&;S&H(p;8j8~TX;GZW4;-hoo@ZlLL_~+7C>?OqR z9qx=fdYrM9D)9~~%I6nl;-+>_(>G9ES9Xu0DqI|f$7lIq&je@eo#KX9&q%~8XQkkg z6_L1mUKn;vjlkty9t>ZcS?7*bRbH4^tbBpWDC2=~d77Zy-;pXmJ%}nV0L`?wT{%(c z%#A`zRtOrY^y(>(oAN@@Tc3-uLp%84mYFfQwGHhkt7&F5qKH;EHa7=(ZS9DwZz1@4 z5Y^I$xb|U4?i`8Ko-s%pHH{W`4svI$K*`cGQLyYRWG_7%g{NMInu|B1VciZ?U$Gs% z+aEOQu7}IsosVGT?q@ND;G4GhMa;hMC9Hhg_Z{J(_?Gbaj(|Bxm|3+Y*g49m z0Y%``s0?mX0e{c;t`QcRyyqMKK-iHL%GRu`%}RT(_=!IvKe3$Rus%C}{wBLM|7PmH z$3Gci_cQOu>#3tNLuyj$O@LDtXT?|A$LIW<&!nZ2l4{-zB{wyRNxx7-hJbg_ES0cS z8J0i%lq#i!fKZVqK2<;8*^rkM}B4$DhguIT9bs@k`UCD1fZin61}bQ=x&ZfcS|C=TT|e4X^S7$wfSIO zi@UM0w~vj(w!vY9T`+Fw3B@gAXlsXuq?{d1!5;zBn(JHrbv&I-Z{jK9#{K0GcMx6|I<+UHL2d1F_fFZNJn-a{q$ zz(nW%=Ly2obA#~Y94f>qezqZHgZ%;@zqAJW-qOkDjVDj2+a7milU1TulJp zf{L0JGeV=B&`Zk6Ms{5-(wjSw(A)!cS&3~PjKpSIUPj80@kk##0|hgeqIkhc$eVu> zvKE|*(o-)%)1_Nbb>T+jp1BTnSM5a4O%I@L(>-XU`fKHK$juL8>RnG`-rnahd(YEY z_`qK<)0`<{C-Z_gggrQ(}@>n#{`)n({ecLBPuJslJ7Itx|nha&g#GGt$oi>ynt zk#=D^63s5Vs@}gD>s{ztT#u;^_I>AHNHFzdA-|{NF%tAlz*H{{p@L zlNboU<3k`DxPGD-2tB*~KcT1n>ib7K%+UNB$6zMd^|!PdYpNO0WG@LCK~DfZ8!vA735-iJkV`CZD4M@d!`>WX4eTNH}gLVq-r2B4uV5Ot-&s4ovkV`VIy zE~RW(htx(z1DXl79XQcFva)!c@prcR`G3`1OFFXHRF5L@4g zl-^NDA2}I06R7s4EJW7SCCHt75~@zQ7_ArGfSPl!L(%D1qVAHdXjk3dRXb33`Aul# zvitg77`bUT!S*nw-Sz+$-uomb?x3=}b1%kk+kv@v-Hi!1ZXxi_$KZ`;W9UsMqv84q z$iA2mzAPWf7pEclLaMy;k`cQ$7IA09BKG7M#4L|Q%z|+AUDyqN<>gErO3%WN3PN>w zztZ+PEbp)XSKu2M|LK^8;6Uh|2!6+J|8Fr6dMAp3z_XA$e%k`iv954H&w|fPJftPV zLQjnkf}Ux8{lQU3XOa;Rbl=MQ3V2#-l!Tzi7&v6?c<5>VUZuX51sC)LI|Dp^_J{eI zngp->J6V-yfJaa|>~epDi2%q@jy{d?wUZPHhO)S_x3aUcwE~>YuB>fMIjMOxP6Ry% z@IF75Sm1cr*%0(pcW2>tqA+mlS9Sx#0X@R+6IomhMcao@xxQ@Cd=%%VAulZoMHyiz z&kaFUK`^Qc15uIh$7K*|O2P@h7&u+h;EY?wM&N}tMR@J>QbI2dS2Q{q@ZCEz4R=gP zz~-I+T-WT59lZgxlwP>K*M-W>1#h1gg||+R#H-5#aNh(%hQPaXm=m6x7mRD{|IM1H!T3KE(pbgW4Jxq8IMiz!h>qSSQ3N>CU{`) zXjeQvD-gSe5|$lK*xu%X>ua5GYmXaVUX*}m=Y-=9TIn5KPS`}a?II+fqpEy)c^dW* ziuVwd+XlN}J@0un?eTdHnxn%9i*mg%zd*CQ`D0#LD5h&#Nm|=s>AvVr_QuehFmz`G zV^nz(h8D+RXlVl4v%^pw?}euHKr=UPWeU|^O%BEl=|n?yCCW=H9hq0E)ls<$MU~}9 zD#%A#Z9Nj}T9Dk*i=>u8NFeYM8oQ9zJp!qNMkAGqFKxsWWYX#u&0T}qH5Z_1?Nz8h zYdz}Dxfbo0Z9^07t{~WS$riLJp4`sxBm@#wyy4Y)%Ty`M4&r9P6WH- zV<6}hcAr3J!Dpdof%hl1*B^r3_p-HwoR-G!_5;w`TMIqY&>^rr7OyM|z@t+LIfChdF>ZK*mh;60w7bhf@y`|Ec>knW z{Ch#1Zzs=!!&)sz{>Zi$Z;pFWRy~jp{3jbw^uC z5+)DpL}w%IZFxB=%av=7q1N2GYNQtCA+D$xiB_v$3oi{fNIz{aDV_+U%wAO#_}c0P*+ujvZ6fXq{kyGH3H>@38QXSiItdeM(=eQBuQe?Q z&8bv+RD8uzE~v{2$Jm|*4C`t}V+~6~tr&6jjTm-Zq@SLGkaXE-vn~t!x z(Fi#^0>Nt;r-UP9bvS~Thah-KFoNa+F8$N$45y{cfPA<*!_EwCMf; zy#EG1#X!g%zu$t+9=FgN7$*YY!1zCb-tpi&(Y_OfEpt5dG^^VGfZl*Gs*EtvWZ_!3 zs?P$@g3-d!l;cv6JErm!0@QKWR+ww*zC7nA!b=So3UyJj*^zB`7_|;Np!Y2_QITd$ zJIHhXl-PjRv7MqV^aMK7MWr8j7J7o4wYA6Zw~(_-Ga-cKV@OtX!x9?*tUAzqSbX8US4i**UATuov`PuO(A^0jw5>Ztgk1`6E zvOGd>$FM-$JR}eg&Pc=V330e-a3F3Q6^UC%Mqz7j5Y{!iap{lyro`ZZX<>MNSsebk zHVw}%^20k+d2e6niMK9t!#~dR!#~am!;7nf@c3LG0xt;9E(pc}DvJY`C1T%233&Is z7`%O6INmtT5C1$P7|+airW*9eU*`JZj$T5q&jnA-2*E#2&cvS4-h`MFo|+$K8YOP- zamDs7Dz-}X6Z9}0mY<&KNAMD^GySn^I6+A@x24q`Tiboikhp7Td2gQ(iLE2UaAC6# z&Zzar8TC<^Lv<(U&0)+f4#tcEe@rd#!{nj>OfC(^g4PtwYfZz9rW8zVU^J#BSR`r6RiP=lIE2lT3{t5HVFTVO11AyO);klECRr35L4BHqz2kts=bU}LT}I*qz;{k+zE40I%_4W7M+g#S*uX8;8e7ndI^SId;^AF zv;m!GUXJdwu0YS)OEKhp!tkQ2F!IXlsratPi1k}A?uMNhyXi($op~~9&tHtRHC>2W zo{#X85)rvJ7CvkI;CFHW{FVnGU`Y`C76ib59%H&c{3rP$WHO<*R?z#=0lj|kseaEu zkJ9hg-&MEgi2qaYohU5m2F5_O_x}(0DJ337I6!@|*?XM#|Zl9QrSmlSnRaVWjt4D>AQY-k9afcN=VN@exE>7ys` z{h)EU>bk<~33#@jo>ZQej`RM2o=tHt$Z1_!WIp>!O3z_!ZCGWe_~dg(Xc8vHKfXUZZe@qOE^9nyT(Kidj7a`Tomq^l!y&&-nh2e3%3l5#O`r1xOJE>o?e)M zzn+qcmrshsXB)Ec+2%-mz9k%=Z%V{_m&M|lrCxY=hC3~$6W&-IkFT%E#^+Zj;iD@P z@Q?F@@XQ(~ymFQco;}40FQ4Is*VhK&=|$dzoEvrzbH~%uL-F1j1$bk5CN?!W;bvOa zZM43(5BI^NGt+SQ=vZuOcE%PSyL%Y#u__Xe&+syqc-L@WY;N(O((}Nkb}wvb_r#6E zLUC2QFV3j+#EK#hT3bKN%k#qG(hw{xVU&epURf~aRnqd-MPgBN98MmZi}R<}Vdc;Q z%l zwxk3pWfjP8?>0$Am0vH0DlewG74dbQrk*cd_IB`iqz{{9)L!ny1t=!?3TCcE$-I+L zvFJ23pL78_&$tvFr(T5KwU=S&c~@ZA1y>S=>oAxx^s?*Fcj>j5xM?SbU%dhKXP=Ii zOV*(3f~g3dTR=6J1ivLA2s$YU-pjn;%O5TO1%dFL?GHaHJf8_Z@EPNcfYG$@C$~`N z9dZDVLEbqO-?8}p{~3A%%l$X#{l6$a8z%za@i7o+ivI>Yd;Cvzd;QQG2(^JwIv#Se zqCfnoecJv5U^*_9XP`F#Zg%~I!Q<+`r~83KY^aMzxYP#QA+O*=o#Qq)qTSFl+Zh23;>?BwAQ*>wKq`R*=-Fu zQwVwr+1yV){Txe|twd>g6-p~8I%;cCQB{u2tW@OYrl6&%41;@XF}SM&olW_0dT3@e zo>-8I7giPEftgf#L)~%T4DF-U9pr}{eG%9-A{n<0ji=fQ$Bsc%atjji*K-na^xisr zb9)-T*q(r|wq@hpOX6vDqGJU$-cO2j1mom;NIchczi-QUR{ul-9!Cw>tI@6Lhr%pk+^?a4E9c; z-5p8gN4tA7t?X;dL-FV|XWTzgt+%nbwU2hZ)dx5A1Y=8g5U%eG!s$gy9q)x@1#VbT z?uSJszL=Bejm71mSX>c?1r>x}br|MU24PlJFc!AN;G*d@IB#k#Ru3syfC z(uMd&DzDlO#ME^ly0#s$O+B=?qme_kS--sz&Zbr*F=b`q@Q_yqe*=WCF z6*5=#B5+m)0_H^{a0vl7-yPnwyx=p#2Y%E1;5*S5{$mNh5q|LQ^F-U4R_eT;jzQ1F z0O(2esbjIgrKKPyU}@cM>m^tSj5;L-z^C%6)3bn7qlT91mZJJT?N^E_Ep;7TPuo8VYHAriq_k0L zrR`5jz_7N~!p^QAQ+oYjIw!dh5GRXAmR1B~Syea|Q{l}n_QRAscT6et z#Ny^CoHeEZr;p0RNyBrouqzFdYhy61Fc@vhw-@J%`UFqZBzmJPj^IlQL_=W;Mh$95 zQ(d)zUTKw}*MOp`I^eZ|+2L+h8QN4>hohB={oh2|{is^^Qf#(8k)UP}pt=w)u>tYPL_)3hp z>S{EvJrnh3oQ&#Im!a&WImll%8u1IN;m02gk0}J+3>SD!@qpI^FL;mhhWAKs`0)qE zug@1g-Ck&0*-D*v*rD+H!`AOPfX5AO_bWXu^|C+``0TcQsf+-p;}(Q^X{r78T7rup zXZH&@+Sbb+(})WTBi&17?KuLIJx>4=;Iv=mZRibhE&V*7$1I6{jkzzJA_lg8X0{aO16B$g5Ilxj*7iYq^%MssKch=ry=W|f|nUlSnT z5Nv{;8ao6#3&o#R|Hgd+%XdFWo5QKKSdBaQuA*p*KNeY+dlrbNunnRpI#Jwp9H3Oc9PgSBT$VuLJ(tjE`N!uBL1~H z5w9%`Hw`2&)4Dx5I{^2L^2Tkn=$l*oaZ`6Ru5Sy&)qMW5YiMr^ov^gX9rN=&v5c^r zU!uv(gD|f=fY5Wp)FMx;?1;yCw<*J3Md1Q*`-~#DoroJM&}7Qg3>^k2{?M$b9G!`v)dL-x}Kg*>lSu;4FsA!SJ&5h z7Lqzgp6&jxTdMU@@qrfcXNJ4W~r*T1An`sLT(xh^OY za=eZjLj=7;grC%&>C(buDkG>0fKrA(@m@btCCbh!?0DYeq4%fYxeBGZ(g=_*6?~S@ z`AnY>$coPhtEp*1jt3yS6{&w%{vuXMy(_k0}H>jdiwRp=T}aK!tZa z^z6QY&^rIziU5`2}FD5s^f>Wyev9#D9E2|?glNNUl*O%5tU_Pzwf|?Ml>P*Dyo>Z(Jl#WI1Ntjw2g^5*> zm{1*q(Pfe7%?U(%x*uv2JW&?yhN37plq3eAtG*b+`#Q}y+)}}=rjg5fRMjb?aw!rE zijmpWj=1I?#8KHLwhuuJ)m~I%FJjw0v9S{&qmtaMjv?9d!S`rlSww9mxZ2L4uGCsf{*~B zmw+WW=w+7{VuFue+AnbECFtm-^HomQ5;O%OZEL>;tAL^Mx{d{vg`}>h^XxHuExiO8 z?bCf#UZL{ZC-~^PdTHGPQ}?k7P=M6+bRC_meOl_c6di?^X+0H`q{NJ}J3`e)=$Rx1 zyq>P9^K=b8x9+KHNCC{9b+i2t%npV{7{rq12TUg4&EJ z7JQ}?vaWd?^uAK?d`IS&@9{04>1*2S=_r1$EK&?FLN_YpSiy* zjKuRR0`Su5KKT1pq1bn00=~N|ANw{W;FAs6cynzu_7ZeYPIJc_%Y*Rv6em1P2tF~# z1<$PT#DUG}IQ&2%KHM3D{kNv!z^%DBcvmG3-cpNK)+Auh7%$vE&JV9H%_i95u(8!0 zJBRtnoga zO^qwIbOoBU_Sf|W;j9`rT-+Xuvs)5yMr$%=7x-aLQ2?PAg#}e%SXdK*<*kWW)D(v$ zZ7GCaDrVHjV_bOz#*{~4R9Q6o@NGGa2PgLc`Vpwkr>iO$ZQB#jH zg0Gw*yQ^QFq}+VuG_@hVxtq`%g5-{&NNDXN_z1n$!HDY^j>KLnvJq2F3vLRR$-^fj zb;J~8jhT*;Df7{>{4`WfUx4C?Gf**oCMsskLh-a&$elEUc6bizR;)qMym=^HG#9xu zCm?(J2qaHvL*(!h1P@I`;IJt8^7HiQqr&TPgG-wW+?uKM>fGT~O`BWk1&<0hG)%7t zjUMUtXXqK$>3@u)R5kWx?W|s!3&ZBevrDCU7i_F5v{19#dI?n87NE4xt_v7?X>kC*aXF+54*|L3oeI|*hy@&SMX(0tTEd?YkbxhaMTpsp)=$OuvMLo#t zYm|gqSC9Vut5G|Lexeosh4=rF>hafK4D@u4uB`+GhX})i%GAR3-S_O_HC>L?)fr{? z&G-0)0Q;W6JE(3Yj30l{bNpx&o~`pcWQLc~-V$_52CA^ovooLBFq4uKcwZ52N}Xl0 z$MBiHWqifw`-0CVg=c|hR2)O=g5ELsX`k$?>}|io6YTW9tQP!&j`H#f z)Jpx8Y(_f}xvwaGYeZ8qLGJsOYC^2Xm*Md62Q3-Qhwk$7gd z7ti&>(~EuZj|-v*$Y}gyT_B!3%^mMvlY#x$<>38GvvKbPFWfrF8TU^JHn82;?u)CN zJk6xvo4Vbwsm&R?hWg^(kzROZeiYtV?C9Y9&y|sQY?3pTpF19&=s_TQP(m*TvrB?8lQF+02D2(7F}o%X z3tQ4Kt1$(mi^DNEFBpSzgQ@g_(UqkXRH5j|2tZ|wGpbX3P?jEw!JTzzX=&oGO)bhQ zYEW5Ki;B`xRPmQ5)l3H7h_nvb+u=wd_!66Y5z|O}+epPn_(iu4L4x}14V{R@zHvwy zHi7Ge*QhBdn>-hdbC#lf;!G5eos5$46Hqc?A_~S&MDF-W$e%Kcs&653rcOuJlnF=~ zKLlwLJCQi93gN>t;6Er5K0N{O+~Lk270*UbcvgGDtIP|Y#h!32bVluj zY9fzdA?buOLGKvW`k|+n0APWqb;Zt|I}IQ#tn`mr+sdLW2&S;+^}(z zfu3L{XvoGMA-KNziWZpA`#~L;xs>9Q(v!+NW^ZMq2|^7m)0oOthCK}%>=qU!OI)%0kG%ZABjTyH7&W7FolIJ+$OVik66`!fg zQ$5}Rsyu#<2M9bt@6TzY{-@Qo+XMI6_+X!6Ki++x%I`zP`{)_eg@~vyWMrkAE-O`4 z)u^msR92(9TA9tuP*RqItlR`R-8(A>4=t?1woxhAL?v+ZxL`cIC?5Yjw-|p}6i2%n zg2$&t;K^Cxcy4hREo>}4z9t@@ZjQt!o5S$^&P@F4qA>jR%wVd(Ks+@s5U(tY#9L=X z;LSCGc$yG=ePs;ZJ1-5NUY&yvu8hX(XZz#Pg&ufubrhan7=YbloUnx|PLl)OGcM4y z&~EAp#HIDFxV*^;>lin*I1^@Zczt0So|)u_$Ehg)N}K)8DFooMU_3qD2lotf#_l1W zcxqal>AR_xc4Z@PLP|Pk2!orG3EUSyhtm05iFAT=4 z@<>cCjl|TlD9ox!!sN<$45Pa1&hSSkqboDe0IxeU04>R$s84V~U5Yo#QUcLQi#wPf zzUuYL`GHqg)}W@m6g5?4$S%xBLQyfYx@k8%X>VzB6B;`a*U(Aub(#9TNCGgbc@SdS zh9ItsU>rOase{KLYuI=LyOJ?eP&{fPibjk@!SLb8A3hAZBSs)^>^Kxno`IYR(~vn~ z3X(>RK>WxK#E-5)^r&ov42>f2sPH;H2s`fx7}typ~`=~wBOg)7uQ{Po!Pcv6NI$XHMCDJ?bA#91R1>q zBMVUNQ&_m^rLq<*7Kl2grLL!AHuPNAUw^&1hE;O5ypHR9-A~&pqnX&0gj9A+_f#39 z1bOTS-GlJaSXtGP34Cgt&}Stzr6FQ!xX`se`|MM=xw+zsD=s6z)CltqZKOUsT)i5<$+@F3V*t!1MXJ43>Ua36OTCBzq*;qpE8>vkN?=3hvpeJAn zdR%_H?)02|f$FuLwLxSm^zs@T{dh zrttcKci>~ipVLMuj)&jzF%Wuct$lyr$NjM%`)GCBJKNyv=Y@!fFy!UtptQJ*-@jT^ z(`^ZSRT?}{SB`?xEI2)~s2oqPYQcuSXlxxBja}0s@#3jjc=wW0{QK-&yme*)-Z`@n zFE5G1<1>Bm`Y9p!^qM3bxGn3Lj_2ox;HlZZcz#I$UR>demsZh|&hW?1J}2Bc!o$p&`{RSmaOW1sdort&PK+(s0ZwjiAzt$Ly+j zOeu}V#FA(X&n56^XS>q<(ZOg>4?uTz2!<9$qAk@2O^I%(h;~9@G;LvFGR6!aWTu9! zsHozHUX4ac?dozAQg(->r6Q-j7m0+OMr0(^wIh+R69D6y2)_CbgmZsrZL3K!pVBiN z$z4N`F=#kSMo&U!_b?O;9>o}e+`hrc9l{tk3`Jwdqjd62TH|rZ9ybXILkA$10kivVIlXknw{@4WL4rc9Y)_9-+6hF*e=EwA}8CQqJhGB2yF z&Rf5JJxWSS41_v6JMr$j?;2o9xs{ifn``OVMHgLUfGZekz73tDV`XJ!=D5l$YqRFt zuwc{|NoDT8|9%50J-gD`%huY=%j3t7H{~w4-~tpD6~T$eX3_e-rRQMzt=n$Icq;Gf zH*CQ8i2(3G55E%#D7DTWGTEE$e7>%(F1)5Q_UyR_8*b37an3mTlvB~s*=5#8jvR#} zs^25b)?K*{#f8OYe;@zWufOp(Q)j68bybgOhN~SU^tk+v_xgqaJErca+$`|E=ud;M zklH&2J*qr4W_-r$32+vA18waw==CeTfzT7=1UnlRcD6q6SmT9Jcpn?k+4$tZAF%s> z0iU4v!G{M-BGM0(LZ0Web+p0F-38v>UPw+#GSI7%+hUZJRiM1G9M$z|+2v>Y$f9c8 zIi(P{Pt3ut$!S!5>4aVm{zl;a>zo{Xd|3rPIIocEEeg-f@y1_Q`r@NYV{zc>IDD}w z5npYI$C2A|uAw*o9F4*1~fIXvPaBGhjo|+Mg=jR0D zfzi%*m}>7;g7mLT!?9<$FYfB|$0LN^gX2T6XQV&w80=}1o@(Nv4IO@js2?t<@xZzD z0cP&oxrKh1Q_S_6C@igu!;F$pOe&zwt%}2Fs=LtzVN`jc=t@&!QD0M!H?$}QJ-HER z&k99bW)SM*Tu?*kRYbd>GAR(Fds@)j*^ZhTTHJCaAy9{9%I(@}q!koU?Uf^0!_X>P zkW|x!+G2o*KCF2Am$v#6t6TkHXO0R+7+p%MZfra29_4W6^|J{7y zH65c~f{7*t(icK=U#RSZ4?g%Obti=-D=N@vav!COSGh+XdBoVug$oy=ySv+fQkCPf zwo+klzx}p3Z`ZC}rovqJz2%l$j5<@xp|-EN<{EQc>w=q}^X|LvHc(Xko7dJF?M$=kE!gWNt4Wb)%kiRJ)7>M=eq2&OU?CV zoi#a;%Im%U?QefG=V<%MC!ahvN%=z$J!I;yG;>@}PmeiQ&+#2Wq{fKvcx~0seMgXe zO%?YQp=LwC8wfqC^c0S=3c^gvP|zc|R8ME)SW9nzBZz7JrK(ey$<0|_pVz>!&=c@% zORa6ZaD>LoDjbPGk0lNGFJUqy4+wQnQ5g8CVM9Ts*V)qzCw|869-asY2t;ynDhdmV zO#+aL%1V^+J6KUm_)+aSJ-DC>w@t{!j?t-jctJ6qJGqqhvjlIRlaJ?DX~t+y(6P=|JV@h8tj2> zw9EI64a5C};9aA`v2$1?whxcR70uqbsELZNE*NX-BCxzV3@hp)v9d89i)v$NZNo9C zI2_}PBQS#C8WCf!yFWf|XMld?_qR^(qq6uzjN^m9koKYF)jlPCb45!6) zjLcBI9>J#;r0OcspVyme5>N^R(UXy|)kj9fV^GA=yk=5{OBRl=dEE5QrI6g^6uck%VNHlwNxc!rKZF*pv*f zx-hs_d%?Nd70xwMcn;V(l@oa71Rj;1SFtyI^8FByO`ATUi_klapZVXfMBlMyi~-O) z(Mzxp81#iz$ku8(oK{@F-~yYJlfa|0f~AfNE;^quqle%deCY2IEb7`=}--gx8K zGwJ;a@`8iPTKEbqy0+@fOg3k!w*J)i24;kv!me9et2q9c>iMMFPE=24tSk3R*{Siw zDm(}9bPmt|FTgVj?}X6%BTQO&#);r}{PywSQ_Jl~{b45k=Dx1pUP8|W&Tj7T^zuPu zWHd4}vrI!qrD@3}@M;9T3OGHyxElA&EW*~|ad=>U0bX5OjStqf(!RFfnH6z(jaK(x zYYXtoiX^JM1pMQaBz$;bF5W&b3x7W=39p?V!DSS;6Y6Fe*7`{PRqIymwwO zo?qyN$EJGW`FT-zd`c+pALEbP`do4E#1Pz0HFi_4FSd30U~8u*?wb^emsX_WiFsjo ze0CUaAL@p!tu6+9w@~Hn9_dXb8ID(%M-X=Y1fet4qbL5lA_h-RPa^PQ@gO00@0dXB z>~_W7BLcB|bTIZ#h{5g&DcCtG0oQf~;i}FMoLcFFwd#o67KhVYYvpGh^v3JuI1ivt@MCvl`C8;2)Z&?xEH$-cAoGp@@Pod`$@NMCBJu=KKxA^I}uCG{w{ z`1tsk{dPt(DKIG`&38L+;vZRIs}40ku5HNBy=Ain4%z25Yu1>C3!U@80}q%oI;Q6k z*aS5lv-OiRXU;U|>6pq1@Omym$SOB|*>y}WohMl8v(m9EuDHV7Psas*y>|;qDOzm{ zda~tO*EtG3x894CjZ~s(4ep1YLfvDIhu-nvv&U>0wKf2H$A^WU4O`X`ya(T#;&|Zc z{SOQazW)@*AV|=i$m*Wh@_vW|TzJ+bBSV z@?G+KD77c>IX$te5sxja$M&&F*fA~+Ppr(t+m|U?; z%)pe|RE#W&!r+_`^kn#>J;fKT89``B_AyGYJ=Kd#57Z?3p(-;Lo7hBFWm; zQ0bLcl%cY|9;t;Th$$?imF++jm0dU?7ggUzFndO z2g=JARauL$vMPkqI)_))81N+$b}1dbgdd^T+>E3SLbIt1K{XlhEQui80^w2W2hTDe zc$9g{K zQCUWAmaP+9z1(oM)bS#(-PORAAOIK|^KjIl8ZY zOte&Y7z;eL%38>2IdI#C1)<$?BaGTTp0fTfk;eDMnOR#$_c*miV6chr-zo*;?d;| zxO-+kZkd#br%x)vn-^8%(~X_@+qs2!c4Z3gpAm*f=fvWfMaj5tN-!Rr9)Ud*BXRe* zNbDXTVN%KO9!)hhDuUsQmzGB2z-7s_!AbbwoOHZ*W;#ARF9okIsZD(!0Jl9M|?l z;D%v|xMpY)E*q4H3;WV=R&NGYwx(cVV+!UpreRJ?7G}5PU~)|gMioa9d=aK4wVsSVL> z8jV5tHMS$3mQ~3`6Y4rlJzr97C(@e;y!OG!?HG*Irgp>#c%>BxEiFeF|A>*a&oOm` zUUQqN<4dB-i*Kwa1Z#QyQiN2d!KW|+o_T>(b3yPcRsuSnzF?Tonwy)1{O>Ll?`3z^wUo_0MxVD@_H7P zef{;<&986G6|y?nGh1_puU(e)tg%>?8a>AoK{nUMf9TDm;aNr|v** zZXO5<3PDOr8cK>vP_1q(m6dRMa8W%TTvUrabBb`^{5%4$8UMJX7H?iyhBwbI!qY1f zaSs9YYC>{Si zJqmwYm55iCM46m=FD-S$M;9d#Qh|74W*GL24Z=-b&e+oKgsmOURD15Yr7r-t4i3T2 zK_S@KOk3L*j7O%W;^}$3&!`~WF~XM?IvIaolSc)ajF*>1;+gqj#>VdKRe}N^+%|;j zQwow0-PYlW9eupsuqd2cs?_oBxNSlz_RP$~j;`jS1+p5}*^j1ZK@2cn@U z12ZR#GU=w&a$8CGRnXoRQ&E)E)FCP-kM^`4v2C4*Z|g#Aa|fcS_M&KcqpMnxP{aMz z9Z0I{LRw=t(wYdgdLFB+CiKc2!D~fURUwKHjpMbFn_3u6NN%h{dTT9`8cPsSo`!(D zC`|V}g+2Be+Su2~gT! z!C&l4F1f_)SBtCNFPO?os{^vml}*%sskKv1IR$NPZ3Z@a>AK^_jWfsh?Ac@PYgL{A zuJdjEq*R;Ur{H}4`RAj!Sgoo~SiO3+sf*KbYj0)stQytz1Y@fT^||O=!C9Y^Y`VV8 zf|&p(C<}T5r`|(RQIS!91qB5-@4WNO{dF&MO|DzuohVKOyn*E%(EB4yUp^_gZw0+S z;`seqS9!h9w@Sg7H5$my}At$1{46ZXt1!V@bh@bAmo@#tG7=ePjuof2WPvplgNg{m(GcMOZiwszXrF~Rr?t?%DYO~P{v zLh;Dt5Ii*_91l-%!Yj-DuN7C8E*&!(GcmC;0e#s)=*|vC8{yZQ;)Nz!T=m~;%?L(SaxiML z;xMYa1tWSp&{$J}igNzSRLkO45HK}VRwalmDWfWDp+f6Ma!VKD>e~siW`e4j7Pgh} zYeQs3GeOs6>h==Uu)%X9sBFSZiV;>)f@oeZrlJaQl~qWrsYP-vuU%V-jD|`iRpueC zB#l-&5%I-INGeN3N?97xE3=VXL(tWfqOw|`t1{14Lls)9d}QTSm^FR&AEfJF91lIE zm$IPJOG;_rwp|~%U+V)E-SID#QyKdbIPA+_M@vCPpwP<#N-rradoMvs$8-ky!Pq10I#JKm|i+hp|bWI**@J>YCc8rJ!i0qwELR@nIF;@xU9X3I{^bhCT22>kJIl z4=QH_l#b;AR*V)2({X%6_!DWok;#A8%}k4z7wg$*F+{7g&g^Yat% z?=zC90Hg8jyhuDbJ(~N{@bv6>ytXO>Z=RBc`zH9|_F*2hw^6ulSTHs;yW;M#(RgBB zI(8CZw+{C=>ECZ?cE@8gGO%l?AMPCHhR0_I;LX!A@YB3fD`nZX>%JA+)d_{PVQ?+_C;-S5Gqr`(N>;|DWe7( z=v5MWwFHvMvqxK+TV8=ETHI8EEQc01tE~qqRCCI)7hc|k@X8iMRJ9^g$0N%DMb?HdI~|9MChe3lFCYvP+Wkdk~}07rX!&s1!=_@$giettu01T zO$nnEM`vY0LSJJ08n~`6seM^k z9sd%129~qy$HU7m_0qNUQdxUH3qhSHkl3YODyMUGp33PQ?UVXbpFNec&!^*Bx94ly zUR!V)2qo7w`#sAAh(XpMHD* zpD_f!!M%NOC-B^i+9UMb2t8K;(8a*d*DnAG2}vl*FM`wU6SDEpf>J!QvJ(Hguno_w z%E6P%lkvvc`387zUtEee&dkMg%hPCQlW_mkcWC+t0xukzO=TB6Y(tJx`Vd$re3wk#!^v+5`GbQV5&F%vN#IQEzQ8a zlVh;8%Z=e`CIxzAW+EP%nt{C&(s0L^1Uxu97uR?DV?$2>?w*v4O|;PG*0|xCVM(}U zVKX+)ZN+6X+Hk@2E}S`~8_P#FV`gVLCbtw~VnYtbR;OV^Nu24nGPEQb-MJwcR1j{K zol<_8RC@6~s7wk%b!IFk4eLg4dlP|DX6h2k7)o$hLWv!ho{gBye594tpoq}R>*zy# zZ7V{{8xT>`g0Si)gjI39s*W(MLy{~k0hd@-j@ZINB+}BQ5}xUVXkt+@Vsdg2o1KM> zl0p>Ll%s%_wWy{TCDlbJBkU@;PlK^5Rj)xyTvu0b`Y>s@j#{@gWT%QzQC5k0v*-N@ zc#fB2hnhD2Xx{iUJI9Vhfg3?2$s zRb>!xu7aJ53!L3O;NszF;OFe-0snwtq$H)m>ApGnxNlAtA=iM{&Z@%GgxJfc=Hk`U zvkAK#{Pnaf0w)l^?3w)aJ2V}~E^nG%MF=fqM~CE|f8NqBZqvay@b zFOH} z6T8OwQH}ZGz8SQ?vqE@}Xl(0orvmiEog;a_2~mV=HeOksgntpxkIyHJCsWyt3&-y9 z;do?r9QIC%G|5D7>J8`h60mzpGS;_wV8@tf+%qi=JE=_9b%$Zy&_rB4p$Jz_uEAw9 zJ8<5#Zk#%?3rk0|U}kp}CbyPgN^21&HRWM+Whw?2Mx!@B%)qWEKO7y|A*Q2pOL`z` z61-6!>w~J)2#g=pis8MTCXKvcQ${tVZY*j^DlD!*N;)lQS`IQwt5HCUn?v{|H+Leg zu??|Jw7B(6h^=ct0s)mu=%o`UOv@hIf{AhQbuv58coVj zPE}V?P2g2BDhNF)y2^4wkD(DGdJa98k|HS0c4=uDa&z-Ae(ZP$(~faUD*}gr^#5`9 zAAnIDS-Uqb69oe0oO2GNnbAly$~mJPKmvqB5F+QCa|VNR!~q*@48{gyf=LcwY~!5Q zUVGO$2XNed{^w~?-nL%s-TU7E_kH)iXZN_ftE;MehN+*P(-m5I_j4-+A2xg%(`f3L z&|%}JDR{9AU75r?QfsruFRd0#3o;i2*pFidCNyg%mE-!mIM z>rx;hG?<@xSSOFUWm%>(pQ?x7i-I5XGq1|8&e8RtQ-P_#k5JP) z)t#pFMDhLYZ&Z7-|IWLpDXqZ3UVULtwI}M1YLDR4oDh_7w{QP}@bvJ3-T5UpoSqer zm)F?v{4x`sUz&_J*IDuU>J+@TQjQnKPmHa!9_Er`Jr6Fu?VY=6AAGz1ssdPoTBi!1URM{ZxYjMHdwSBR#)d3HVaKzDZ0XR4^Ktf+%JUmQ7>ah`c zer^KJ&IrWI%i?f&j2Dh~h2r#-Se%*^je8pg;6Vw)cU5`d#{9ujPcR;A3&u6Y{cu~2 zTw_EC9_Wb0z3qv(bwm=b9UduF2_!UV#~Hg=UN@G+}6Z462d? zMB#a2NHSjr?xBs@%Zrq7H{1!ip^ixNAAqzFSJW5SFluN6vNJQa_4~~N7IqY*iQ6pc zNQyThJTe|(aRx+~(-2|HM5ur+C@TlSxp@f8&KC8Tjz9@PgGJ$m2FtTa!UAdTih5T99z(%@skxq};KD|DB~yS<;89A9#)jI{n4hNdE8x(04T3_| z!L0%Sjn`2CBkWW@mRDdRNVsKQZkbL%uwGu9b?NKjIaQup1yO>5TXjrm@H{^gufdKC zc2F>l=a|O4JSLR5ui(V%DzK^YJWp3?1P||v`I*i-2^N;)mMg@tbA=rwJV$sDVycdR z=L&jPQg}R8u1{Cz^q}jh_&yc?Ow=7+0gnFVXP@J%&%e<8TXvktu>#*GABs8@=RQ^5 zM;~#i`M=>as>b*JhWe6nIQQ;{0rGhr1bqGZY)|&>t`cXbnilHv`Q9Mdy|ytQudL6+ zGfNG4b5kZhx~>#|zq$|~UsH^ab`;{B_33zSUZRAMu{b^^PLx|Db{6|%XQ@B-)CXz6 zdunC#vb5N+(fx&H^Q0obK|LaMce_OmW52GrjQ4EPqjp&Z7AI@XXY3oRskN zQ32Yqb{E`V-&?LBAZm8So(5O!ZT1kw=Y+jOop5jcKs+!^6lF(*gsbs5JR%0?rY7OV z`AK+wK_p&U8iOO_y>YB72&bpU;l(9ZE$n`Jb^;#lh`}vIF4#LPK%f_d+o~OLZRr5) zmhkqTkx{r?pm#${Jhn6^;g(6I*g0ny){UveyrBh{-jI*Z$_$JuvWTaky~vD~tOV4h zgop>AR=hxfmmlqp+z1!#6_JGz&d3fK3|oi`YI7}^Hhv6Bi%XCyA*xxR!?8kE0Z_Vy zixCK@OlgQql2A1v34sE%0HX;3qT2Y}&P#ygD;{V}Mwp~WixNu^rDqU@W)cvZ`8-Y_ zM^6>-as4neJ4?(GPN#9Di4<6sOL~gY3}d1RNeRh_h>S!~XfS;Iec|Tm4mS@s&D~|c zq^ww)HsL^5(4#BpaWHbXI-;@RDKM!tngR)p=M^aQ#&o7BWu{C)i*t23f0l!c^?CIB ztXIKCZ@fP9@f?l!q5z}Pcx?qV1zhzRn2+a}m;1aY*2#PdhAhW@RX3gG*-1jk5gMvI zr_bGc@4XsKSO@d64nm8N;r;8u$@9#|bA$%BtcT@Uj&&0<+%k>(yf*Wxv5o94S3qQ0 z-YfGl&!4#h-XEzvrm6HRxdL5JSI{HCKKt-veE!iV`0^9^44;Yu{ajS)mtWyK0p9mt z%V%Y;v7BSRPe16+&n?y6C!c(bzyIw$d?j^#C*T}jSq+a~62i)7cCzcOfv>N?p1{AV zkElNRx6o#62pH|oE=j{H8*}jD8Y|w}mX8mvEyrKC6ydL%^YOQB1^DZxEWEhbfYXyA zad=c19%u{|Rp*EM8~wD^P#&KijOXUZ;YfQF4z@&LPqo0R+6nj9Ibv_aKpY?Ifn#Ib zak4W&fai_-MS-0X=)E#eRN90f9BcEy-r9lKRbemrJ#alc8}bL?&LN(7fFWs%Kkjex z!mespJUA>EXS!1HcvlKuns3E(b48(w%6n9 zbD=ZtYxcoCO>Velhy%74^~0_8-nhQT54R1E#jT@ExMoBOZk<$y+h#OieMcFVjV#5& z;iVe*##g3ebg=~;0=`lC$rzp$izaIbs*JwK=4&CNJdnee3;Xp$Zpc7UdxMb`?uLf^ zR4kuA8;y-kNK4OxNubB!7KEOry3+XeCL5AdB-|B{B}jU7idg`cj39wtXmS#w%qGMN z1d|wkN_d-^o&lREM=Lu<(z*d>6d0Pgc9?{$3{%-b!Pig{dS-Jnl9J*P8xxJNkT3-L z2f@e37w+zEaCLVU(7DLg1s@gGRW}X#|e`Iy5$V)}emyib3YMy@x?cq?LV?D5cuZiZG#)Ew zd2U%B&odv>6s)*d0-?u;@+ytjVt#JbF#*H-U>z(^*zg=-!*aZDmB#!Ef&>`1ET@3X zEy2Qa9OA>CX->K0XCa7GIo8Ros^d!D6L`uM?DT%6^XgcE@;`yz*Pnf%floQ%rw5;c z-p3z&Am6LFgty;HT|a*ODJC`!LzJT%e6Q*UR{~VU6Kc1ZxhZAF>1%%!>(BgroMTH$3 z<$}{=JaKxgANGr)+t=WQyE$7TeSm<^1veLY;EoD!Z4uDjQtsYbPwZMABdX@L@hRY;_e1#?B+0* zVZPW=?u-q^E&{zc+&4J~cTFn5UDGRZ>$DnN-BpE+oz+;;A&PHU5vJ5+VO(h{MvKzx zC@_i}B+Ly%d7_uV%?0@}-pGsg)Sl;M1`vD>0zo$nEl$VQwJRme7>~4!Y=NGDiqK0- z)jAKfmk0@Bgpf9|9yhO!$z}3wiF0QWdm17@ocg;OLT;U<$b9WN}x;jgE zn=eiL_i*cJY^ZEZdSk=m09?*mSh8fvZ{yk602N@Eu7w@qoTE=&lxTyR2?^%qAZhmA zGC$An*s((^!v;TV)+{ZJ_rO8bZ2T<8`(@o6BS>fcRH5qLHf`FZl~?ydC;0e%unr!t zU%y`a-Z;K*!GZ<9mF4(90)gP;F~3J%kNX^}r^fFQC<<1uzy7*5XP9~DYuB#TAj?Ht zRT`bwS1=?rmFoK=@bNsarLNc0uaw>sdipfF0-u5&!&*J;=-g-87XmSEmEz+$!cqCB z64HMB(FgeQ(~ogU>iJ1PxMB)&W(G`m` z67rs%5rwnUqXl~DI59dA4>o(@Xonx3ni_&f+nw>mWPdy*A@IRrF}SV36T55OB;@qO zGt>RFS45th5{N^s?gC3LALWBv3q5gDz9;Uj3B%5EQDb=nG~hkj9wwkm#^V#?aC~&A zs6G$5ekfjC8jJU~Tk+TJ83Mf+oRDjsm>h{0mnY+-oZnsNi(8A`aCe=A%){m0CPv}p zOe1b@4#c`bCv2~gkhk50TgRs3wuyzfeR?(SoZpJ;XEbBu_*yI;QG%HbxtLI4#ppsK z+Vc&vO-4&*B+3%IVV5h=D~R)!5Z4(wLH63*+-!m2$ck*-ux%6O%$WykR-X3m3~eC- zOA3+&Sf+FVRJs7oD%(^ENiAjxLsO8RYC)PPJi1j>p7s)P0Un2yP|2|lTRNxJ7-2H8 zlS1-~3N)E5h>VJXpML;6M6tOG&|ExS;N; zhL@ZxFDsSi-wiy4zMif(rKkujhHz>CE`R78AWV2rIdHHq@0D+}VhZ@9#=gK2W0kZMZ)HpjrlS+tS;I#-h4u`T~W#Mp**jt79?Of4&de=0p3$hO^vpC3xP#vd0vB`gM+)f=cB%_)xl|Y zJkRS8s00p`9k0)HLWf(XDZdi@6!i422b$jXaQmZkzf*j24}6BFlpWO^oyUZoQhzGc zRq2Ex{R^%9C;0x0&+(H$?;n5v7;8Hxp)fcKLt+d_a&(86K+eUk7aRn9eF;4IF4(!D zjpI8!&4!b6GjPxFKpYqsg@;9v9h;blS6AlZ?e*FC>sFhDiUv`BL3m`WD-L#eYn>Ml zGzkDZLUDAopM<4@@#shaQKLWZD!0eI)qQb(W++}*5Q(QG{5>?%S6~>8n+qh=EFOTJ z6<0}l30Wq3;KJ+(oEjgD`|CY$OQ9p~EceAd)gidMItaIvy5rh>d)W@eff4?AUKHtL zogtzY-Ed!n1I|nd!Ko>}cxk1C%JU;}YDO5ImvH^e)EKR|_s}RW++I3Z!rQ^vJIovV z+eB543cy36ME8%2$JR1;Y^(Cat!)PE9G{7uQ;J0CRpO5M!?1I4JFcGAjOCizE5y{A zOpGZqVpLut+C}NLWXGW-(GR(?-pCMDmlN%V!Z>&2gbqYOgcCACT`+1$9&X*S4a=7< zM}|O;N{=f(7)7NKc1Bw&jGUr&dHE@;>=Sqy;+X=33;{&Cs5YCZJBGttpRZfZQSlkY zlVLCKK_1i^>EkRL&8*7cLAKZtANf`LRVK0H-VlT93{=!O_ZIeJuep*cnS18 z1&W$WsOu>(EF<(T(~TQ_pv!%AtinvaO`0@GTLVv-nq$hw#fHu~xGKct6fQLNZ~czWeU`t!^3t!sD}N&;B;6gN>P{rna#GPMI6!gCPQq z{MVlg6s4|9AIb0J;lBOz{Y330o}?`dj}s_)n=6V5Ho z5S5pRlhciOQo__zlTCPaX+GZHSd14ICE&RQ5qM*@0Z+~d#^X~XwN8z*lO??D2kNx2wtxdz%8WuO$qRj!VR@CO@sy;l|1V*xlleM>+#>WPA`F9_20yQvkOh z2B)V=$UVjlds+tJF;R8|-l0+cI5@%!d*z z-5P>xYyGghD;0ZZ7vsJKwW9JGaH~M?#swp=W#%xfnoy6$9YZjug<)I6STnRvSVTAig8bp>?Ex1L zQE^@_aPe`4n{4@<&RxLf>FyyeiqF+URGgV1_2OoS;D@)^?RF)^m^u{t&e^hhod7oOjgesEPrLxjri{}UkLWI{PaA*q5Z0x*$ z?yG5Zte@c_!9lgiscXDfmgUMUoGQk8>C8`sN+>GzNB}Uu3R^i1j$ol8rOG9Q_}vh; z1RIZ87lFuh!j|DO?}6obT?H-H$9zm@{e)h9eZ97?U-dYVZ`K~L{j zf}j5QcW~?B3VvURfBnVfXLnS4;@=DWE`9wiE)ss1)0svnGB2U`#by7Tz~pr$4F2)U zi}+5!H*NHA1o!WQnot8O0um86zy*#H%5s)Q?>_zI-@l&*zW)4odTmQC-rAOj=a#49 z!qRj+GcR4jMw5Weh;uU}oSYJYC#Hqs*?A0Qqj6z=Do%C9;pEsTJUulY$3*!(F)>)e z)M&gg$Al**#)wDYrT$jrYN+N#S^KcmQrLb-*3fjyO6#0>?YUaeQnT4h-`V zW#}wQkI(Elwk{02BqTjP(TtsSKDefI5N@q=6HxhL?+9N3ToBIAj>litS@Fgy6VA_% z#rXvZI5RU^dk*=;^mrU@=d-_1JTlw|N7@2Jk%}Um6py{F{QELR7nM>Tz&770V z&Ia3W&*{|QWot%vX9b_-8BAuxCL|&vItIa^;S#zA!pqwio?hMpF%P&?%`tTK>ULL8 zXSfUWJlq6qZUQ}5fg9aL;O8QMb90w6p73+`gO`)!@eqaQIS9VqgT59tlJOhUZv@(4pev zeG)1RCwa`ec%OX6r_S?w1R3|K9tk$8GQxy_AxNpxGvkoq!#eITm=FFKII9X1CoFL+7U>>IHf%o5mo^k~{y({R^^^oi73VKv= z1RI^O>j}N@{p-%Q_2s5ERI0cb| zJmB1apa#5NeFvg%Z+rCX)ffGH3-sQ*Ap?KAAro(HNyA?@XXB-188|;jz&0rfCnm(< z@o5R-@pyD}sHmk-fuT)QR1BV&Y7jMO#fh#UJT)~6&&)JR226fH_D z7|%*r{LCDIUaK2!Dei*@hI-<;d1-iTT$}(e1iLETaZ7;%?ksb`&Qe$GsSCvI6+XDL zCIlNZ2V-5j1GX2q;U>PgyUb0}y>WVqL7Ve?Vqy@U5D>q*Is?xyOVgf1o|qhsg95!{ zV?%JdD+=cY+~;SSMZG5Bt_E-1Bx>^7N>}Zf-aQ?0xVw5|h(wob*q ztut`<`f0dn#UyN-KMosawqxa_7AzQDi>X74(OH{^5f#~}Oix4=$L-m|QEcF|Jx63l zI3P3175T|wnA*{TgZJ&mn2~J)I02h95))OPK#$Mh*xSnIaO|Mqu`MHAQ+mlpqt+1- z5fy>Jpg{Qe`ohCgpe2xV6PUS5XzJ?hra{b0Am%A)gr6HbBV_9-A+9^&rYSrDp1Ze% zv4kH%=ng+odqJZ5f~1}>PbWl((u?(Rf+5fw#^4~dRn-e!gcOu@B%DGW0d;xzAAJAe z6^vjY*a!u>T3?U*?9hmdi_;nijSu40J{8cAAw#sq2e=$O8$FHB^jMzHxA^RiMraT; zZ2VMWTpgLlMoc4g)at`rSC0T=n8#;lv>7vI{1)EQlqzC*hMzW@O?#flYw(&Z$7`}2 zLr|s@B!nJ24`^zL3ZG#TFzT5epS7_r!j<<*D6%X=d#X(WhIv>QJ4aYI%TY1%^ASV@ zJ0Z>Qkoj2`^AXUkt*shhxO{$ebhK8EdH7wk9zsR|j^%ic_oveIu7@6Bt^7*RQ=sd4 ztXF&*;4UBk&M!lcKvSSoPS6o}1fRAqTQzp?D=I$1Pm~_d{rJVV_~D~(@XP1lpuMFE ziGFTqiA}@M2pggVdam{k=+oC8z55P8f4lyo@+8dd*9&&2e(eg-pw;{*SZayd}kQIdrqYsMW-H;pYBH^wF^39Q0G@}c5+;Rh|r7>$^tSGnc&{jfMtw*-oQ>Bq{ zrUt`W{%`{WBy82dC84UTv$&JMP86I5HBoWyZt!yT6!(I+K#kyY7ofShN`3*Lhd|9$ zR35`wjzIVF@P?11`3pD$y*v>tbtlR-%z=JL3-CaSD7|D?dsw|)ksBC-&axV4uf3&! z`v;X60jC}OYxlC$W8+h5jg5*ckLZCyrALLs#>7U!#Y9yo$Q4nz!pR$NyrCU)EjiXj z7_tnH2^%UHp69bZ!b>SjZrM{wMaA>1mmp!|<$dwFT6}!Gmd=g~_MEbQ0;i*+Lu>pz zR?h}`k4)p@qAI+ts;c@;J#rCI0!XPpcGjr(b5M;j@5YTAwfkbn2I0+fRB^1A_dzJK zJUdF*A;RjK~yL7=n8y#==F3x{PeECM_0h({(m_y>4YAi<0J8#k0W;f{JyX05IQ9DlsHJOwW=x8RM{Hc^Vvq5wVc{zeO4UKlNk z%Ln@!2H??=uDCEeTw5gcfuUa5S?PqktDUg7Nub#1C=40uS`N+!2(jdpsl|7amplpyIX^N=3w2Vwm0Y=BfARr(>03{)*7MhA; z)1J#YOPY(;$>8DcB+zpdz&XK3VCU`Pt%a^sbZ!DZhOVwsp3m*vy##jNe(?5`t(>R2 ziwN*Tbbv3Sz1@)P?GBrl3rgj$V^x4V3OyW<<2(oz{vnuCGqhXjX)NPR+h4W!;PQVO z^(6gQX+(M>oM;3ejSZg-Q<(x4P35KO)73r|jXs?XR+Uj(1zMU)W1}Si7_KVgIhOBf zDvj4tb*VI7Tit_7Q}ysOF~8cX&&z$DSLIY4%*S-z8@GfF(+L)pmk{DU>!K5MG2-iIbd1*tFHvAUww|R*mL{d7r?up;o31{&>9tuVUb2yode+| zVJ&-Z9qjtTUR0jlRsCScMF{NW59FI`QbpZ~(p#T~zwgM$vx}2)ZoY)Bt26P&x*Q1` zEqG+CzbK_(JUKH-+=Tmv`Qgq+Z|ol(hQpJ>@j$x=_O-d;>G|<^afuNpC;H>*S)q7u zLn_`}Zomt3V{me;kGMBpnjeK1=EsR@3&g>WFg(IV0mk^tu^Wze`Qh|bsc*O^?r-tO zo}t0Gvo1iwS`XY@>V^j-96mZWO2X_w+}G@j1Ea#Ud%0Ia>JwAq@zflnfLfq9Nq{J@ zzJDm+ncGjwJK!D(XK$(T!}V4E*i_<)>xPL+otuwcQ!}xmIuvW04Y+Yi1$M6(i#rIt zr4w=8qKVizrxPotbztGd5tuc$852iTV@z`iM$}}Zsl<%>>{wK#1a;5HjrT%Mq#Ls1 z{LoUJj}=Sjpk?Gx7}HWz zq9gDGeC}@Dz;kmStnGWT6T;P7%6P-wQ{2l79^T$?^P&5|&C?4$UcLf5Zv=?y3-k6u zLP!8iVF5@D_Cca!f22FxV|b(=7G#^zl^lr*FK5&Q1Y>dYNG-G_3W%Pc1$5GMwO0jc zFA2H~2^t$98zZ-L8XF>w4NRrc)V_ig8>BMT(Dg>D;Px-iJz8U}GHw5AQ`?SAkFO%**q7=>3uZ8T6E^JbM2|{QGZ2 zRevk@Ax`j7@%`|P9A8#`1fB*$?hELYGsOM$vrqBaw*tK{KL?(AZ!+Ha!7Hr$pny(E+%SUY zNqD%!2M=*^gArl4r!f$B)cE3tqQMegyW+9VC>(4H66pDhnhV37)$X{h!ckLwoPze4 zgxtr+2aDQF6lLm#+lLIm^~L>hd#x9)6?ksUcg5yXZ>+EM!mVSHaBW*WmX!NrZBqia zwrAninT@!8!6@7~uLC>gj=_c*?N~ao6?4Wkips0jR9;(M9)?z?p}sH)Rn{<+BzOz( zyipkCj*Kuj6q+KmXAB)3!;o2!38T#-;7gUTmdiKUkeWgOS`m{N2mc^;OnAdfpye%~ z@^TlLiDL6`an%6G@YYq7ovVP39Tx7Q&OE&&gmrZn7<$1?%DH%ZYUA%*z1-pED|Jce z>g6S{6SWuZ697X%5DbC-Nb>c7(Z>~5Ul&+h>`~}77?Vtq*i@Z|nd$K;l`ywR%1^GT z7cvoc-BB)^{0{=VpTyazAt?ASji1H_MPnmoBhwojp(?}k`to|1sdVO7rqb9bX*|b< z&DjPF{iyC#Pc6eeHb@$;)AL#iNHqO5SRYN@3(H*TJdMEM=Ti`(se7XFocjE10{_0r&UvS!GM`5qjElJ}N;fJdW2B;8FE`^64k|@XL?! z+1HZx)d2Lmtn~W!73e*;AWJ}# zrS;stx;96E7mueGCgbhRd3a@w4S(BFh_^SSOX!-6*H#$unkcv9UA_WEzB$b(zzf8& zNwL^1VdO&sszdDoI5Q z>x`v&195e8Ft#>_VRcmywhT+c>iR@%7@mQxV@t7ld^I+7)njF60~U1DVEXWKOdM8% z(e?Qf+Ge7@$bhQsXp|WJQ4r&SvUp$QhD*p8=8np&WVE%`Ah#d`@#aLBGP_qlF^R%U zw`L&KY(ZQ?EP}&=wF&;55=NEBP6iLx%Rpn#t2>98cy!OC(8l1o3EaBJ+%cV?bo2Ir zlK|0a@Ibf>c942p;4k0`mryx2AW-fn0!1-#$O;WclG8xM%U?fJKRYD%wL`vZf3!#Y zU`~20Mnt$H*SR+eL;TRy*g&rR20Xgx4-I)54Rw%WqyHm-{gDwwDjyp&k7;b|YM;h> z^~W^*wPny6ex|8w=&#Q_3S2Z*mjW1%>CDf(EK9(sG?pb$)HzkR zf)|guWg6?GvkdE2FryPl%u8q~AgX)dIp$S$>LI6&>8k9Nz}FLc%6o!~{%6p;Qhw#% zNqtJWT_pSjdOv-45x-oNdl%@bXMEaLTnlkITjR6t1qr_V@(XA|cCW53#`#4yJkk}8H#X+u z#EfX1Ut+==8*=d4+AO@cwFqym%f@>fbMfTVFq{I@vT2ryCCK(HBQZQ?X1+(fhF{3dTQ=9V8RiA?qWvOT_OF?x`3<`{X$cgnp zL97>YBR!B6?g3kb4~A9dBPY|0)Ql7uY-S`{Q(&}maR8gPUY#+?fPesB`1*OmQy}Ky zE~?Iz-VHoHqoezHOUNsstFyq+O_UqMRwqwSxJsz%?!_r(PH=V^1pBM(v}rdk;{Nvi z5$@)Kw5SM}0|QYQ5ryK=Fj$-&k?A-H?GfRqmi_F3eULfO4z)h^5*GNNGbsS|k)Fs7 z_r;iD0zEM{ae_~4>TKie)c94L9N^|M5LBa9))Ovlth9fZu8!HL^;>1?dOg!wUX|4w z8?`c??^!SFQ`aUq{>WHXU7HZ0sd5AbjmHEFx2#{~SNFg)l~)0ct_K?Tna1-fT>(v9 zSHbCr9|*QT_Dh#IOyysG@gf2DFDKkoT|EKFbIKI}uLLi>n(GNam3F24S{^>5<8wWM z-VXx09|d}v%Ra+g73vaxe8$HP4K+yl%P&61rvkmtzx+hbe+7K}86Fzng;nv%=nRNK zp_@PaoCaX9OMeWs?~6hG>@l!cfAkaZ_3PUgc26%($MI?r93a(Jm?15h5UT!NGP1=ncf4hCw(tBNB(&JVfESYv9{c=Z1Zat~e}^Jl^hwC;2>a zdZ6|i$#ZkU@Wd<$ohJxnhkD`uwg^#s;kdTa8+#`t;l7E9xUn@9*SADqMX`jwb&)EHvkz9PB0H}KwSU+ zFi4#?e_s@ZgdkIZY8lWUH4;X5c=)1Lq<*clGlmEIqDItRM~E|~CI+A})D5Lcp=fWe zkyhNzHlj>xzT^ZCxRr!|{L&2$HcXlx8f^Hg;acS@7~Q11>Bw3lI}=s6(JOE&>-8qzDX?aABbtFG;9;cyy#dN>p7-h^V)(`i!gyWGhu>!YPJUS*sLgFyl`r&~VQHY~`@#wffJS>Xw zK2eMhkMhLvPJan)1Mv7{35#33a8Io>_7C+C&_&`&E-NKU@r7BTczHntPEYg@=!M|X zE-xIO5Qy7FCGHv)fO`dS*Hua=J~C_Q&?-XbE={v8pB(^U9(y ztuP7`3S!Wf8IG3pa170eK&8na#fjd?jdeqMga9wp8JVJxl6(gvD?SvpMOnzO7+^_B zhAGvgE$C^q3iKqrHKwE>Iyy!hlH%jVP73GCofO^Bb7x0}yNAF|!dg!s4SJqle(?6; zGdcmC0MFOgPuvIIp04nef7GCX{ZQoRk7g%tH1%~tv0XouI=R9qf8p{2{m_sQhwAV! zi<;sViNjKZjzav?%D^)556dq*8~ zGeT=9G(8yDNUmgTxXh=A4vp!`c%H`!LNw-QBUh&1r>SfAEJss-psB68hJu#5e$TSX zn6KwOD^p-p#%rj0xaGCfmQ(Qbvp{IfPp~kLx*zs>@|foc7v|@8%46;mNZkK-&{KY8 z`27oT3V8o1_>}8m_FsUe^FPX`dSWl~ncg2jPwi{R;#7s2G86#*EXwa^33)Gm^0_Fz zPjO+@D%_P z98OFM!})n8oSc-Xbu{d5@|U^4)8ot6CsdaihWP6D2h(EI4f z!FXzdCtg{=rRD>1dYUM@HU}Jdg7*GA=p+Y zVQ-@-zVaZ<%Jo54x)&zpgxoN-?rN{Ag z0yq~Jg3%iR-hSGUlK=^ALj`ytQr_Ri5uwh5kTlovGT0N${A{EIk$UefKZm*oU)RWMa6T^w3KJImPWbsa){o-~ z)f6~2_K%;9z~nq#HO{ZJv{airOk?K^KNp>OIrffka#Ni(%a$#Z@?74EJ-b)-KZjp; zPjOG+DOb>=>%sTmfnU$_$_c#h1YqAQ=zT9CJHy<|;OiOYav(EZo5dla@lO)+{)1qY zH2IS8(5$K0l#z~^A#qrekd9@>OmsyjqcS8KF>YL8$Q=&SW;_QCg58<@AArW z=!){3R1{`gVKF8lQ@(r-7Ul31zK4QKJ;f&^A}l;yTNuDi8+a_rPPr(&-+(7zTq;i z3>xDiP!}78n)q1ch6W))l)6fVD&T}O|I8d2Tr4ckN$3edA$>qRb1w|TXQ!v!x>LZ+?z(&kvt5}9J zHu%n6f{o=bT)3dYna5OQJkNV!UQQQNYK`yJc;=aBw9m%@#4#~38pJtUgV3WX=-J!b zYu6{>ICxo|r*WnT>*gvjybkN;f(yJy4xZ+@g$oyIpJnml#oBkr0l%v3zk{CM|0@8y zr=|Uw>%m9YgYQ3q-=Dcw<_gf`Gd*@_(DgmJ>bV{VEo;yAj%#t)bImd!i zq6#0I9IdIumsi>F_QnFdx+)WIZ^*@KYiv@F0Z&Ny``Bb7b~OpqMukWiEon1SaiWV; z*dnmEE*Sfpg1Z+a9T|WZ1;mHP`s3b42b`Q3foJBJaG=FkTUq(QC~usY5{gGhd*RRs zC!Fc@#?zApb~F6(yr{f0(>(ClR1X}KFn8xLd)zhB9b3!#VMnDi?iwD3JBEki)|Oyg z-xP>VRo+-r;e$2R!B|-8kJ&|m7;bdJ$TV*Zw*{ag&J}rKgHgm4lEYn*6)Z~6#~!J^ z17QhrL2+6<$_p}JOEYWh+nEJ&CR2*G$Uu6U05I8v=*TE-Id)f14>)NvCoVs~y9_&q zv>aH>o?1_FA8$_t`gqDek{2Q*3=SLIAF<8@km}=#>_8tR4<3y8UVV@=*b(hM0oW3r zge`tiSQQ+FxslPxA0RLc4?DVLeL|6Ssn12=5a!IwedgsX7Y=HtiY7Fe|EHhW$-(D_SN1=LA4B6m2H%y=De(R8 z!tb}dvfmSWKMCjvJLQC*f*yykXakqUzu_N}pMS&`Uw1FG`pc&uWB=0GSX`6=vqL`^ z`uD|TQP6YER&)g?qS`qKdA?zq;*0k4h22y0<8Yd5;?0TImXhK)yjNGI;^h@KQGHo> zX>}n^%rWAnt8?(yZQ1zdXgR)lv=o26H5+ek%Mk@=#G$TmoL!hK05ss0H7R&k(m%Rx z2)?+vL4)2qn{)BjhIIU8Q#Q^_kH-jdpmr!$lkT%omOCX>`QNX>!f^9G2pVJ@tKXwA~pm%n8M7i=#vl z2H=zc@AMphJUZPE_ej~Bng(H0Ngv!kOv2?6;kc&88`m}XNXfrE(4I_?S$MQ59IsHH9ehC6B>?|=mfMzM5Em=2#>9>rL zF!aaog=T^`!bY2l_IqbzWCK^mhR8Hdui_LhDkds2E)>9LQB*=yP&77tPLrY%Vguk3 z>TK|AP&Bn#GQ(2BiEHNZJe`o@mOpk*L8B|saJm<-t?r9kuHDD{Pd)XNmd1PJF+)o= z{Y@=f#m249F(2=f*XA+d#SmSu*3|M*Jg135HcGS-XT#cX3)UuDF+C~~b^f6!4~ar$OfpiWorJi# z!0z3v^YE%DvA5OyIt&Cpg?UY>?$x7hI7t?9V1!-Vr| zEqG#)8K)!!etf-Hx~#nbMfTtR8e?@XC__}_4mwTGoD+Lj;H1t z@ya?YUR^JWQm*~@v{-F8%ZbitT$q=NmlmbrnYoELF)-EQ6?GHaEZUR2`)_RG;;~-jeZAr1=Y95dRJ z!%-{nYd0idXm|vwr2M#mP^^tI;6_sh7KKD$n4=@od)Xo1(+!mpQWr!;!4w+-qbU*L zavUEQ2SZXEQlf%j8FUp!hPY#?IRdl7y)ZLM)bDUS8Mcosh z+Yu}TD8Wc$eVmxj>kuRic?lO9Luo=zf6Ql!tWQ1DQ^qy>ST`Fq!)^jm-5-@4%ksWi z7lF)Yf$F?^=Evt=yf4dz#J$r$9|NklU=sm&5(D*-rp4wN(f28)5 z^Rp`N2|PN}_4yU_wC8yYZ!d?qf8<(62lGq0OW%Gi@cRn?ko&^B?@Ls<0edn_@lbXh z?n$k{>WCB!b@oMk-vL@@M#LZ|c*`Grk3oZB_sZIAytpzQ&n?OV*U~%E8IOabBLqxl zoS9|9$?0i0FeVXCEzHF;D+_Q~l-j&Y%88$TZ{{9^6~P9Vmz}vABQKI z@#rKg9+{Aaqf>2oc10PUSy3#ZrU4I(j+b-;o|@04tZX>HAVphh>eS>|33Frc)HD-b zUYv^uTLZDH)(sDi^uw`9p*S-;TvTN+UfZ02zu%aK*ES~Mwe=?KlhF3=y1qEjHc-@R z0G^r^i2H{1!@+UxI5|r`hp5Bj0%oqLvNUTTHV^T{@`8a_l5dYKqV(1d@yDzTXH3uV z#I)=Hj7jxEtI-X0i2}R00Vs%;EX+>E9Ub*pTb7RYSU)t!`=iKf5bE9C1y=D`92ARH zp$S+iim%G9H?sTmM}eCsnj&M-YA~ZJF$w9hF-SI=5MwYRE+HA|T%0sE5{1#BXiqhu zGbtX;Qs$P9aRR*`glnRhF0+jrE-%F)%5)yFAt+aKX2o`ow@W;-OjmnRW zn#ziRV54VVwTLHI8(|#}J@k-v%y5n!1>Y`_E+7f_%v zT;_eTtO7aJ8=p54Y`j0JHJ<0^<9EX6b!ti-|3DDl{9OFZe6B|Y$g%`7A0K>2g>>mh zInU3|UUBwF{|j_fbW*Lh&vV}iG?=d^^c2vPt2ADVex*EwpB`R4T@OD6It5FXrC+)z z_pe;if4G8Ql5!WNJe~XM_?vG;;r;lfgts5!;ujwSpL~w9vsdC8a{<<-WMOk&ITo98 z(Hszq7-wm74i0b`)E{onP8jIm4!a|hBJuFJVB9AP=>C=n>>nA8107L#e70G_M=Kti znug<3M2*eK#gWMt9PCQQ!(B!k7;nO{nVC2>FB?xUFUIj%Hk_WHgR@J^aCCY$_K&mR z*o<79Sx|y!SJ&X|vI;yjF%|c=CE%XnkvPWXp(KobY;ug&*>QGuGEPp4!Q)es@XC@r zoSK-7T{WKC9NU9qLh$IsUp+YVyQTvl|-CE+~%bi^2%HJxZgUk>l@x3}0syC&Z!DYC}$@4JHmi7NwVz*$ur^ ztAw`ZR2X6tM6m^FGb^|l0HLSNu5fpTr#BZ47zlrNSA+-@{k{COxw3rj7cSs4hX*1* zA^;^mPN){R%}r0jO~b1&BP|A10fUh1KM-YMo@k4Vz_h3YERIRUask|2scW?SWy=>( z)C2{hSwJ^NAUq{28x6@Oxu;kpbG)E24GHndNHz!zO-aa4j760wz#-m&SkovWoHUMZ zMc4f-j~auu-d3 zj@g6B^8^MJt`h0Bk{ESqZ{QLws zLwlwX(yWj9xMDPWx(OP>oQr|dcptnzKR3S{uE?@~|9$}xXJT+%9|wjr6#p0S2)SQ> z<$&#O5bBQ!%I~SHF2k%R(Ddm&U7t@uOz%D6_h;S>JZ}HkzyFT2HzbS|=f3i9zx@iI zef=rE`sP#o^P+^e^6!3O(K>8Ow4pOF2=h}cxV5Gg9T7%E2=v%dIM`(n92_0sz*j&X zn;MCulOwUeJsc0V$KdezL_9n;8c)s@B_^uv$V5?mv$JtVRNWy_YR5$l9-WbjqchDo zy)Y9`E-4b&72*ETCOkMk4JYOn;E2HRVKQD=nu?c~iE3P!f>)MG2rWwR;gJE_(pE=0BeApA89VD-@W}XR z>>2KjdzuH~#FSu>wW)Y|UX+BsA)*Lf1>6p}r_m8NRrbT(!(8QhzPNX^53Xwvz@_)W ziXvw$%5%V?dGpbLLd2YC&ecqA~+OY9s)ehwvaELQ^(w# zo#E&>2=>kn=<759_VN$m>gs|(4==lamj^grlOaPS3R9P-l#Q&VsWGvhcvD7ztI)I5Ep2aI@m^McH_Cx(TNj zS@G2J9GqI1k4IuLX8XQt;wZ3m%^ug_9G*v_(P>wfkxB`fl;wI zJ~0hD1X%Ypx#E$remFiM7)QqY;M_b>g42CPsk-8h+CjLXqCd8m_QEw)SK-Dc2i(~n zg6oF{VQZZ~mJ~W-e$D{Q%N>Mi=?)m5;(*a+M~pByqB+qKLyfMeOBjT*$iB!6vO__5 zKcolr(T1vUs#&=?4mCMh$P)Fz@pvYyMVooSg#}X5x|Kk9NT`IhK9|SexoJJJ1fG+< zJqB`#8@`LPpKSZ~!odFh5a!_yn?SNCI1(lP;i#67b*TKK4Kt^q)n-Gr{A1>a`XODE zUrB5T3goyz{_58Ybh9K>E_SrT^;M}jJEaka+e)!8B~)PUk6Ke8+OiTbGQ_0)~HA<8b3i=oHQ_j9-BQ8!lt4%X(q7kalLr4 zp1|NS8?OR@+A2V(W9Cz3sr0B~R9LJxef@0o1cAODnjRzycs+qayAq68mwrp*zAC5E zc|CSUsFjmx`ugj}IoK~KSs|HgC$y*~n5c>pM8jvvuEHyV;%0PtibN-8r)r0g+?b2L^(JjGB_Nrj$R1&^+9`iGVK1ctq{+N z!aCF$jl*3DIJY1}1Kv~fGjVKc3ih_e;>Z+>gs|Co{pv~yRf}*&!1vNnh|(%#SokprT6rz3cR|t7O!os#2Z^n z@zUCC?b`2bEy256itvUUKReHW3k#C*`bq(DXAq8$55pM=c^?~Zz@ElX?5qsL)!74a zPlJmnKnZ0>ddW5rPtFR*xw(-NDu>~Lk)F7#$r;yG_Qlq+tFWo853X(W!8J{R*j(q0 z_0^tOS>}!<#U7ZG?~Z9X9vGkDff1ti8WSDRnB<7+7<&{3+o3SD56WVkVDaq(Yp4r~ zOp&O}wjf`WlQqSRlvDwqs5`#8QQJyr784x}e_ua%NjR%s&coGGoCny$(eA45cXD2S zAu(T5=VRX=30?xdKtJS%1fw!CO5mG}ahbVjvu2=G6k2&i6l!8(&}KBEHX#9NkpikX z19D^IknQJ=LMJveuH4|~Bs}>vc3}_WqTI@dvbuqrk8DNKT@e$|{HCOE70<&Eo zM9O`{36w2dox+?f;4>pz)N5DUC}~a7XhhL5r2SExO$f~lQ`aK|^w#tHQ2AMw*V5mwy0*G!I_+2a_aVfT6LQLVO#AlR?|-*XrAO=T z1o_wdd)4RpGgtZa`L6^$I@9zDuP6BQO7D-rr_zCi@Lk?v*h`a)6pcn{qjrP zH+wD?rDb5LEeCVN$EIf?)jJRogZ&WV6a+(v0mG{*v2u1j>|S1*fm1URwAm7;XQhei z%g3`zb8&u20S=5w!b9Uscydub-nym=AKx(yuWzr#n*y}AuOA_bt{%tc=i#C05{_;e zikGh)iKo`r;^|EdII^f1PpxaflWXenOHWo)CJ2524iu)JvP<*hzDayi8p5C zx?!?FuQSUN!&975pEwZJvHdY5svk;1d!tCg+Jf*wNC_B-0%I7;b5fAc;VC9jMyVF0 z@ul0Mps4T|)+S0=8yXZ0A8$?}pPP_L`B7ZH04`24Qwg8%_vWUqcO z4R(gr(;dl94oLHGLS0-0hR4NVlGHgyLeX}!NkZIM3BAptO0B4kjF(U}87c7w49U(x zZgeP8o$W9x&J8QGB5`$T3RV|cG1T83#r^xB(cKw^a@}UPL1=Jy!sy^240rNGu3cY5 z_3DiTe?R2O=g7&-LWVUR9U}-mX)FT0pG4Y;uF#(3X`s_VkBwIk3DsDYsdH=~G&Wd$ zgV#6Wo?uYvH2pQ$;FR%tsy=S@WoQZ_SE@^&roV5MuD_1n^!KUC>94J}Ow)f>?(3ne zuV0nnz3J~oU!Hk*%yQijq^jdJ2)Y02KmXia51;++zQC_Upjm^LHy7enj^EoZesw0^-fG3`8%%gktoYsPM*G@!45@1a?kXUha#T*{&FG9gHzH4n%fBlW`Dg;_Xox z-4|tHy-^u85c$FO$POKhtXLmZhNUz_7${xz7}gh>AyZRFXhA1WA#RnnKIACL=Y*8%bWhk>=hDx!w}8 z=VoDecr2!RL}Ej*5i?;lBVxQ~L@sdT=o>pW(50m!>~QFt9%Mgt8Y^ufFs^ zq3HpkjEzy{U zt9(CQx`-dX{t6dGy?!fS?RQ`N1J}%4iyY@*l!(}9Hz%P@+GJUHG%CUqM93JhWpWGF zPOZS21!b^1yTGh<4xE~iCgH6Ck4{X$;{v~DmsJYzB)sh~IJqzd=T>Lpsg;Fz zd|5f3THS=B3nbj#){5t^Yr_jSjm9(AiC;enFWfR4CpV160UCtl-M)~0OdPnRi?~hGI{jjydRaB2N)|3nM zhWKMfdVee`cE|J#C+%5Yhxn*eM>HD;q9(CFs^UfE#oCL)>y4^7XXJ)BAUDbr<>?8i zEKEmQS~4u@7HuH`?cKF$-Alh4MTNyg#lYXoM_UAxS$cUq1|WbVj?tsLjy}ZNZ^vjSNGx0IoAG7}w;c;+DKLEH{Ot+Qkl4;qK_nPC<>A zA8Ogz;qHNee!UUu>xqQ8aF`9@NH<0xEjkccG2s&SCZIaZfYqiF?9Oe%+Q3+}+YdyB z+{4H)Kdg;U!f^Y+h?3)QFIQw+BwRP9BGZzI@uNDWD554L0&{Q3&PPOe>p2zw; z`fDp=!>93_0++trl}ufSuu#Udp4X?TI@Nc`dYQ&8kD0Dq)uGb4rE#nOJ{9m3^!_{G z>sjtf;JXt1eg{2TO*!Ub(RgFWuCIbK6Jb*&C+f^)ts6#4&h!TQiD?{(;I?YKcT*AG z-ky%vHk$Cx)^xnRF;iQ`_0q0Qug zkpjI&f!>h#ekhIVgWO;{`2y`w6zPcE2p8nV2VjUms34oOEs~IAF>8y08YH~slJChW zDTs?pK%id$eB3-VC-j`$9O3Nkf?oaX(El>@LPh1}3~)rdizik_#9?iO0ZSv2FeN%( zU>Ae-Fn=sbiO1f`BK&2+G+bAjifYfRknJa-Xm2|d*bhLRqZ^t$+>z%v5dK$*dK^3e zmV_9jOUP|aPC|ZKIC>-d`nvVk>sgLl zrm17@E3na+u0KcP=iz?OyaW*U71U_ls_(O>(o?P=))Qv`sp}QrmEcF$!;gcKd-|pC z1#Xx92MNQK>(jM--(AADpL5vEulVrNclhw)x0pU<4q^kN5$@}U)Zh?|O0-~1q7`HF z$}ys>1RIu5M14g(#t$>Y?xC>}I61?j4NEyR-h{oQBJogX5>Cv_$K&%Wae7`Qo>)?W zcWxevkM3>9OWPaq_^N80A@pt)(A_l;&xx|TaNAP6wQmRBd*n8}`^YW$@c6Cx;OI5@ z+rf2sZP!dZcf(jbwW$Rsme-0(tjF^kYw_}yN<6lSc`!yK+GA*ZUo?xlYcdRwu(m%c z1U4xz%$K!dpz|GeUeFQ9nB)s+QD=Kf`U{pAJVeDW}EcK1RQvYbo@Cn7Z;BYki z`Jma?9TOq~u(P}X2U_c}I@gF{;jSnS9E4og{umt?j0F<5&Jv}!A}J9=qaqPz-ybo) z-pDW;CDaud$HpVeoB=~v9HQi&%$}YC`A|#~Roa>qkGjx63>OtTJ2e>%)T%Aif+=4ksm9pRId+hM;9z3Tv~X~6(B4MJ*%Sl;-^#{&J9FlYR-Sn|&W@cK1P$+z zd3lcq4<7t&3ZAN;tGDnTInIvb@R-K2ed<+~+^RRGsr)nrXqs9MpUy$aydELLfy}%< z-@D6UG-?c=wvw=v=hQwPt8(0O;RG7X_kPo@Ekm(-&Uma_)rn;b8nI(>6YS2+ zPQ~fj9DJOI)3b8%@c3jrI@yBzI}F%2E)|ESRIv@ZQn8@zLXZ@xhth`0U*M_~^`?`1{ct@YddSc=gT&cwR!^ zvzvzF+089Dx2hH==NI7E>~uW2BnNM7Eyp|CCH!4s!MO!-czKnCw-U;-v*N_WNbMy; zdxr(#{^n5JH#7iuH4MQ1(T;NOff9~~;r=#vY|6L8>MZtRI$>jp3)U35W3Fuw=I6L! zW~L)1raE9uvbfn5?G{(G7zU#$aUkmA>`@Wb7scUywBadP!F`bzEebEe7Zn+a7*b$E zj)b%(6W7Q~gV~l2QyO3FlZtrxL4`#{!Pn1UTkhS<#Z7~r2j5N0_gYAJ+gFs{AW><) zeR{#*;D{1O4~%yY!Q#Mp%ngdg#J~uQiHt8%)?xkc-(xxo8mhm8F`Hmzsif@zEWlG~khP1fB*x{WCr8 z{-YavO0CiOVje<)Q@;oZHZ&?Y4#K5z+87~3<2m(iTQ&EV^Hgap%W@o0%-2BDIE76= z*`LOSM&R(81RZB|aM~Jw_yiDpgE^>q%a$!#egaSJv*9rhA<03*901KUE_T3cbFqUL zUU)&fFQyUh{H%m2#}(3ej!u|!Mg?b?aKQ!6xM2Q|KKe-eE(j;iyx<}X{QMk*&CkG9 zRJce2?}5ks*3q*!O8pZ;gb*I zlV=X%<7Xbm=NAs)?~mVyzaG64Z$Ee)-g#gP-nwV077o9D(>T1ceH6|tD#K%Qb45w! z)Us~oYp#2zbidt+;r7dBS~U~Yyh#*300ozw?YGhHzy(+iWb{Lo@_ zLPO#pG$c5nHjc9!`k*klHwwZANJ#4lYq&G=OyMZcwMbZ-21|+&DH6(B1bE332J$68 zTq7@18klcjpf=OOO+crVo{xus&)o$+ex7i3ae(`PzVPXF72@q3kUhW|LkD|fk|@1S zzc6%0L}PS994ZBDm2qL(OpH`fh$WGssELlk^sF4rGFvgy$qS?VyWsk06CO!S#qGgS znBwh+T2XSfp}|P#*9%d7?GV)OD(%f@xmJOgF&<@^DJV>iMs;!mMyF(AetHpBrDb7r zZXOn7WMI4~z-IY;g-LP9G$&!&lrCu+zY5<7yno8xWnIU%M-f7oBsKzNY(Q+VY;1g1 zM^JEP1w+@oygV&WZEdagFrAHyiw&q}ObU8zII4m3nDbL*Bj(2?Rn(8Lv%s#<{sXrxq)q@f^XzYw;Xc zh~a0~pHs%qLuJNvf`IqH7n8Fb)i9MX6(Av{K9~MGr76|NEoXfYY*eOdezH=(teeN2 zO`_HbCE)uAKVJF-|NKG1 z^{>9c<~egwVKKoLABD!!Tuf*v#p&JKF|Ean8&)-9&-LSQ|BlhPX;vZZ4t2$2_wZoc z-5icXovB&}#`$Gscx6*Po>^Umho@xXvvAaUw2Kx zdpo;ueoYn5EXl+9)dhIx`fB|BmLYg`lLfDDNyguAOvU@xoABDkC_KL^45#P#;rKK^ z>}_+F`yGtq(*tm7PB4y57xmZSg*%7(;_l(0xUta_%W~|nreqLSm3m^9%^jTvM~pQN z#Izhwj7=SkVM+Z_pX7jw=>8~==!?oo32TE1y?!W)bV5O#7qSupP?(W~+^iI&SPU?w zn6%~IQ!L^sX-F}pXmdJ)LPOx@d7#SIXrl@d~MFykT5QEIbXrw2_A~zu(Y3|-=jWS?Hq7_|! zkr?IdgU-S3SnD5*m41O3@8ydzUVc~|ZNNOQAWU%eMz-Bm2puR2(3FTGb23I&mZG{S z6M5ETq5QIT=ZD~*kk3XlzhVC3p4YP~oGPyNJy{d#pwQ_$n0 z2F=aQ+CGg?Q{gh5ja^MgcfQbe@812p`%%toQE5`a@;t{Wswr(eM-LteE%Kz5qj(d`BBu~k3W2auP=R$ zpDulcYZffRa9a+>6jfklX9pIJD#rfnr(@aFLfpM$DmF}S$E{1IW6PvE*gY~a9t7XR zST)N}O9?jVISu;Qd`)_*kI#;i1iV|Ir)p;i;__*ntog-7w;vq$ikrys@J=Z@grvq$jZxuf{t)IPj__*T3n(0k>MnfR+H#rN)* zfS0y4;v8R0v?d=fZO+H@>uq>;RT5s`Y{vUHq~p!42E4K%9xtwm#j`77#N%*kZYUm` z6^wI>V{vkJ7>=?Rw=)2{1l)JDa7>;jmgZcA6-9$EJADA!V|t?{q8COc^v1+YXN*l7 zgwg4)XqND{B&t910_;#2a21ODd!sP0KMEs6;l=u(GRKI5oD^6j#5ANN!NkL6otok)>7$txw#-vpcfk8jbL|Y1lae3#nT%F z-d<=855P2297cxuqdw3Vg+89h2?>NPE)rIOmDOxOMp7co0$>w#OJT#I7&GdiOz=$%q_-5&4!Bq*;<+u^M2t zSx_k8o0(UD?L%s?)S7{f#U)r>R)VFKWf+&8fd!*SN;8qhA(ZVb8p#Dl(BJhkpLR&Ak0{=YJ|KN zol1|B`&B+JHN|`imNb@8kYvN*JrorcX-h?^MF$8y8b1$@xyS&a$9;y9>fU&)!gcPe z&p;z+SdKv9J<{}3?0T9?Q>u^9(wn+BK8vKOaGT1L3YGinzWI51%trHWV0Kkd1msJow}{KsE^f{Qn2N&aSk8H%pXKu&m=l9_A=MLc07mnh?7f<1@ z&z-^B=TGCUr%&LWr;p-e0paJ*?#E|O-ig0HycsXtHV?1eHWP11i2KHMt$1;ZgtXf# z@WRGCoLy$Y^J{E)cSk8+-;l4pV)BW(Cb_pb0dtsyv7w^u!X>1Q#bYy~aIh-?4|n-s z-#A~~*6fWPRi4;T>V$F*dO;MjQHLoV6dO7rA1zKyQ@Q4UHBz6h{w0 zK~OIg1&Ye^?S%rr{wRuY7qu0FA^B-YPd6f2pl3`M*jcTj_NeeuVKJm2E-VTmegWEi zS{H$y_TE{69$)9^EolKh-U#ybMx=l&HY^BnL4HW~_eFU^BI+VSFebzeOU+@J78i_q zQG$j3KFEoRfHg55*|t>V37qmorR79N!X!YA6=0?~JEJNlPE=tm#)U_sBOnl!a=i{w zit8+?SRv;YM#rMj!w0eb9g$O5W~JM3)AXsL>bi#{Yp|oT)0jd1*FA)VIAnvN@yD+0@ZrN+1EX?dM*xlL zgK6xTU|u!=9xGsQMg|)^pLu=o!3SCz&l4cZI2%I02&povNwu65?}rVW5AGFQsixSG z!E;nVG~NS2Nsv-GFvKJ*2m`7*UYjb3YKrRg`0?Y~Gc&ceAJv>5B!mRP#rvbuWkaX& z*_&D+S*_&4XLNk_$j`xjHh9*>KNfuEsE&D_rmG$%cQgso;yf>;omZ7QVr~K>$ zF#j$1opON%meUVeQNUB)6MD-39r*t28~_|ax!^Jb+J5-hk(B zUx>3iCgSXd7F^ieh-cPU;mvEC@Xiel+5qU+w-)0s+skoyVl*Ba8-=5jjku%17ds?e zy}8y^LfHW9A1C2>t2b_IbirM%F4#HD6tjrUIm)aXk3f!=;zzZ|8Jut>B ziqFyulM7tXmNF1SlLw(9&R$eqKNN@eM_Gge@&o!KJHP=Yi9x8&HY3L-3eOndJ)br! z14$_2;b;Hj?+{e>H^B^wi&WEewCV^j0co>F? z${HFPh~>sG++LD`8_R5%6d#OMV;stj3CI?eN8sgJOvn|bR%WxILO?XEqyVKBQJlen z$PNv~UghsO2k*r~ztbS~$_<>+agJIge}fnhG6k8c3MBo&KQiP zWdWE|9B5M}YM$P9BtK8L4dbJ&LgsR9^_ z_7*b&g{sLeIHLR`1|-EFwh@CLB5EM z2tq=j4+R0&U@24@3*I432^!}y{q%e z{|J76Z3eFUB?2VaBKmY+5iHD`!u@c!@0P za!OF2mxGz(E3j^1D<(Eoz;0is0r!rI#-XWZoLQ8MBQtDxd{HqTn_GydSJmU}N>O>T zeQLcx>84Tm_`zv-=l*H<`>`$f`nlb>`0~T}?AZtL$%RAsL_*w;Up$UaUpaxV-a3VE z|8f%FzI6oOzi|*hy#64*e)cZ>O%&q$2iM}w+o#~^bxnA5b_ov8EWpv(xi~Q|180}p zMDb+__zLjov`E}J)EhTed*J#Z-lFazv3pb$Zmjpgjg7(BROX4*d4sW`*ach4J+Q9W z6$>*5Vtk?`?0=!&p$*6wFiF8DEoHq(m zqmh?kgf-0wOInIH&5Y0TQp7Ft7uh7xiiwOwpsz1{+&tmwCa`mNhZD!-dGfW5Uft01 z_J+SNS3mYfe6SzVf&(zrl#B_Y*d_}=$47=^X-pV48>6r;Edp~aA)-LTQJWHl>a;kd z27ANg=>xNGxRg(T)x=H+TekwtNEJ^*ab^ZeQ&Q2Knu*zEHQ3nHf-N<5*icx2RXLdw z+8Zz@KOYlqxpHkAN-P#cxw|0EX)w~<+z=lcjm+!ID$ zBKW5?Y>w9xE)WfuF(K(Z`h7#{*~n@9@zV6?*dS=CVJTDl>bx@MQ^rt~rqY;K)lG%h z)7VgGJ?rdg`uk7-psBj`=V|(T>UoaFMy~SoJpb?R<^SKt&!BhxzgbV`J@<6pzutNO z|B3(aLXWP1$Nk?y??;yZ4!`^)Vg1kYM@FExW#K9`7ZhRYh$?JbItgPNtI$|ff{M(1 zv{sd3*_=i!o?eTFybRdw=}5$VT+Mh&ia^GK16|2DJwG33msa51iYh$2t{yLJZN^L2 z4#itLN8y9Leth@L9r*D0HvIkJ75G5F`O=OioLX9nV{`NI$h34k*kzFWNWhu- zDL6IPh({*J;g(u~UX?d)s`0~}Euq-g8G{`|oUx(A8H+ReV}Z4gsJlT}E8tsE;EXxx z1JGp@;H3{nm(>wN<9nk%wwJ(efcQW(#P>&eIA=K6AuHf2Sp9n=J!~+FEfFZqG$1SO zvdR9V&xY$?(`Exv;o3=ilwrrHRrvT4OK<6#)E8!)d=SBJXNf;S`jKCn2 zM~afm%EXuqE1Hwz(HIwwY9CiL_`6|LVkjD7g3yv`M3bm4Yk)6uB~;CJ^hEk#FT^@| zAv3{%Ap*JPvSJJ=DnM>lIsO6Pnf{t8o!3^TuHEzA)HRr{ z&#xM?GIigoysAs3v(YJIS(UHnXV4oPF7xT@{D0f{nXlyPzLfLcdOAN_Pgnmgf8;%( zcO~%Yy(jSW&h!5Qo|OObCw%?Al#$;i!`olJ|0m{6T8Qy29hlWM5}g9RvYZSQW#*tH zrwHRlwP4MP(HK8EA2})Uusbv*6{qHBin`0hk?AHJoS2NWOG@zMqEZ~5mLWjO!MQaR zcyW6JUc0Fk@7_5YAMKxoH}}lNhex;K$7go~uO7zFFCW9#&pe9H&OL(9&x^WyNx&y* z7he>W_u@YM`qKTl^z?1`@bE^wzi$EFyJsw3yP*LuZmq)$o9glS;$rL@m#Fo^?r96r zhM*i67l)mVLAasPM|(l{&2>JaN`pn|xnq5aBUTqWVpD|&Hp#ig*$$YLY=@3G@gzI# zIo`aNK zO`?L#64IJ1soEnI!dEsQ05sV^Kx6 zsLCYFkK>K#yypLx#(UR0@2RKj!ADoX>*@SX{+;&(p9ZhX z|BiasgYJ)@cj>2}aY?@VFTVc;-+nLuzVda>6zEl#RAchE5dya+6iU0uwq>ETpd3>s zbYkVoPK+3yg8~r>c26$J!pobB@cOnQTv(eUz{|yjRh2lue2A#MN<6)?5>KuulTfq@ zuV3GWzuek|H*cMY*Y21qVeTS)aqMbbd~zqgd-5K9_4salef9x-cm6^AaN!~R{Ne%p zc;P;Q-JO!>CVckjM!b99BD{U~6uf^=JKo*dgumX}iof1G0xxW+*3{;G?U8t(BN}%P z4HVe9W1FZq`i7i=*jh9gH`cmhTe$<)77h?~=Y;E>7 zMwwGln;=k)4ntvl1j^ZIkra=y#l@IZF$B{ps<5=N86Bc1IaaR8KM;Eg>Tru82a^KA z&>9tmasg&xhA6vS0i8(nf}AwuXQd%8J4?1XD6*!bLhiXW+a`79VMc8}W|U=NR(cv1 z<>q33Sqa)LCX@$yqd3?HDS;j+up|gD6H%U@39HSFOi_@Ta<6UmL$x0gO>vJ~t%>tL z;=jlbSv%Aw9o%tV0;D8(--mL3 zray9iA9`2ccTeZ%{WHH3{OJ5{d%A+20-y4W65f9E^N;xU*I)4U#Y_0|tM4#v@@xTJ zK4wiHht9Fhs4Oc&rY#f2`Q?~7Z5mdunu;+Um1wA{g59YF7X0P9GW`A4A$WOX4xSRl z_tc_7JR{IMCn4^WON;Qtayec<1g~8?08!51g4!zahK$Gt6)0>B7dTke5ft>L)8BT5Tzuc;P= zSMGxAnu2k4Lok+?cwu_RV2p^j!?5^X=tyHm{?VV znRSg=KCA_cN(-^LNFbGO#kh!ItP6_9wxC$7h>6EcvkAjeB>YT>MNVomiba_fi9##P zv>`V)2f2Cq$QLP}FClEPIT;m}B#aqSfax`*SX5kug_&8HWJuK3*J}%LMMta;%0k_c z7U+!Z#4zOBjL6EgA}c!^nd#YR8_}V$jR;`#{Y~3k2AarJ3muT zSN|4QN~0_I(f@ ziG2AzR}@!Z>B0rL=ISMwKBW~c_4TlOcv1r1zNTDT?c}-D={P+%4Nok{!}F_a@Wi4b zoS0|B%Udf&kqyI}Hw?ua*S6u=4MTBmeFI*;t_^>^Yck%tVX*6EFV?5s8Jq54t z8jn}*9D~2?nTU7yOu}F8?!p_ljl-)qjK*u%kHcSX>BRYUe5rEP>xN6CNBJr@e@1_lPL%ncnV^G1%P}gKLL)U}NzhEYInS4Ha%!UFIRc z;3~)-XqS+-C9xl>Bke@t^+i=wAJoPTK!ZU57;BI6sD8)~w?}HI6LJh;C`wB}fz^Oa zfeF{9GYeo+Mcr{AGJzM9U_gL`seCtW_Y1n+;V$6w63BT9^n4}6?FODF!UTkdkRas7 z#v&^q2sH^wXh=yxHOJdUNm!c{gE|RQn^H_@%ge>sl44A&9)cN#Sy+{ujGJpRFh4C6 zi&7)8A|VQk!a^`BJQ$tfp%@ww1Dl&CvcdzAmlO?iWEd<-iCSk%T2>ZvGP6-E;44ed zM48o!(zH~xhyq+%R*M}~4OneW#k@!#+*fPGV_j9)T9$@fm)=N?@Ia>e-pWe9E{_pej=!(&wS6W7g06qNy#7jZPWC%JF`xjJj7ItMXU6 zfBn7xpJM91uH-$zN9SkLJ3r@t;{0xuUkQ8)epiCvZ)rb%FTcO<@bj-<;o_xF@Wtov zWA2Pe=xiT~<%_#8YibJ`tMXA*l#8KNL$GztGTd|f2F#t-in_8wf!@R>7|)Cy|@TR=H%hnygZy(T!8Z%Ih>^) zk4w2HHJtW{I?f)|@eU}B?1#dzKFA2}2Sbn( z3T*~act-8r5H<<@tO5$Iir*227sf}S)?!4RD6V>=0hOsHl$w*!nvsgB znMT}Rla6EUC3tvLDRx$8;HJ_HEQ<@r>;NB3@$tjdCf^fdG47hP2PK>L=*eGw9 zefpy`$A}z`3(UxdMc~;sVUh;D?xrpclz`Ko@pUg7uMK9=dT@Ua1RBT9sW+4nWL5z% zS3%*pID(0gU_(-KM>!anicd|o;^1HA;ecYMaf}-q3%6>86sEIILX2gZMqqNwadLdF z#(lnwo72g-uZ(r^UO48CQ{*_HnCIzgdKvGJ1A;krPMzZvF*bG@>tLF?XO5%e)H@D@ z=FAG-3za7qH&83QaM1GV)vL99YBiPr(@YP#p8oHkM^|9`cYY=CT?u->K~8tvopupF zT>4V{GyM9~SNP}8-(b<4saQ07A|`d#qp~Pje$&!kiqbKnz8u$VT#UWDHelwYT9oIR zVaGR{JvPUHr&p%p+;S_M_p`GYVkV*sY457Ja#ZG;^f*gtBTZp06l7 z7k6g_2Kgc~*iReyn-Ls@yx?FIhlOcy8i0sy>1!aoH+_ljH<;}LlhpWEX9HJEbL0QVYWv&CWpi!*S;Ty%QfbQBx1Nz z0HOywA}%%(8M&Fr%*;S}b{_K0R#C6n$hBr6Q^NETn*|lI0ho~zg5xc@cyeeC_El$M zT%Z%$B)o6UwV*651=;f1v$AqAYE0*Ek*xfj<-ejd{|>8`EXK;kb1kQh+_fBY zW>jI$^hVenpKZm-`6fKOHbcV96g)gBQClhbzV<{Mn2>_g3$t)`Wv&+DJ|k-G?1~|H zW_2xI-drzUgBLbb;(~;#XBKDTaZ!TLt}nnF*H__H37KENt`aY9E5-Ah%ccGb36=Bk z&_pZtjkn;;qGBmiDDca~fr%2rw#Nwc!f^Yr2;ABnfn9t*?x=X&C<<>!wHH?9_1D7L zmAUp*&o>< zeUTN?-CLWRnt*gsTULQx>Tkf~doI$n>1NTf30hd|;pqc6cLL7~d|@{`G5Ac6>*e|S z`@k#E8~(vQ0zF@tLjqA88HwuXXbGz$Q4`JmNMw6?Xs>c?wOT~^8Bm&TLQa7dxmgwz zgal)__;sb3cyh`J91-B{t1rPF#aXz&AP?`fwBu}HEw)6OF>0_ECdR~KyoAoXGt00z zFc#UOIAi77+3Be$EzC!u)rRW)Lgb{f{%qLTikmprL z1|dv^MPnLgCve?4!cWZ#p|Omb8b_7KSs;WLjg3+nKO2p!xp4hFWh}2C&HMx%RV(xB zr_24{WP0$af8#&$|JR^*=|_RyFZk*EpYhA3U-9GjKVrtzY4W=rE#898G4)u!XcD$< zn2&WUrs0Mii*U#7OXS~nE_Ur&3%f(pjd*l+BA(rlEvhXQ_l=FlT_d6eLdke|aypLA z&cdN-795#n!vkGL?CY}O;M8nUX%Y@^t-(th1#p||aAbO#Hml>Y+39#n!qqo-G~p#K zN42s@lwq!@$!r{%k%>oVWaGGm$B)g;#=$8j0bUYzwgd_6!fD30idyl{Kig8L&U+6`s4SY(LG<2!2^(kkFt1QbRIYZH@<8t}Y*{59aY zyL)OXk7M)b9GBo6T)#*b|y~cmE%-S zCH7Y~V_viYGh>o4Pk_8MECP$9Op&*%)4Fk(Ys!{M<4 z8`qN4hnsXY9gJ|LN>gyM742Ahw zmGS-vMWt%l7zuHP)ZC|0Niuw<5qiwa?}}yrZ!%ph@E5h&6 zPq_5mFSz&}Uz8-@{ZIeIgelWdR#kzSbEjhYDuLaGC0Mm;9%jxOizUmZVAZ;5Sh8{= zmaUlvySv*XaqrkLJT9T>iAAEqCK_;XTDrh51qUW&;L!9;?3-Z1-th+9(-DUUCZr1d zQgC8^mMFdwJh`+0Pc13JLtSP(Ea&!(HR7n0d3jSKPHM`|j6G6L18GMh9-3^!WApPR z#7z~bCgYK5CfpZse65uV8 zkalWHZ%ns}%CkD6EvY}6;`(X5w0V94z0lsMiE}`CRDTqQ_eEYI-_;GH1$ylv5oq@gLYr40rkheRDJKgRwj@**XP~k$7d2^F7#d>4m>3%> zqZ739IRYhN|*=VD4b--ad)^Pj{UAxCwk_ur3yQ~PWL4DYzo34h3HEjfmzG|m&HBBQ$N zxgH!Byrl9{z+eL+hzK_R_|wwTe(M!x-3m}_2rSEE)ltE+Jmvxcv^U>;Q-h*<7YA1; zQFRbfszK6NR&DutsPqa83pLfI(p6p#Q&G!S(O91XDvirw)z#H$^-uv*_5DvV1wG~b zUi7ZM(|_j*dh|a6AN}|6lYBq%cL03{=6$6qjZ@)SfF5>e4mjVW`cV##U=ch@b) z!e#TYVcQyPx@Ik=%$DQ+6Jj>w)L(?ImHY zz%D2Vp1wW^l8`ncBmkD^aFis)qbN8KWu9Id@Mc6OVrhZ}3u29!V@$e!hpg=2`_W>+eoae zt-+4+0&K}n!`gye+}Jt-n`@ddNq{vZ-H68OJk&_IOz7oh+K`=@jm)fEYk8 zaj{ROez-U&A;*pdHUxr>@FD29OcFaSxHJ;a5k7(;3O#9Y9DfM!GDHK;ySI7?MZ<7cE=qf*lw z@0lQLYHHH=Ctgi-76gac0~t@Y>myh`Txr*fZXU2PR6mJHv$Y>q~HAv4Bn#+L1Zw zq6*VQ6(-`y%q;BdG~&Td6Hd&@!RZD0n!>w(TmtTDi^7f?U)<0XikpT;;>PB1+{<%g z<8a4_AlyGb7I%({(9W+Z^u@e1S4=esK!2o1d!a~xmnR@F8IuHZ7MSRl zfoHO$AYOi?At4ghdi(qq$|@)HI84PyRGy!|9|D7d5fDJ&1tTFM6lqZr$d8Uhc~~f# zLqjnpFbqqREVwSO44X3xFgrN~6U`~;uvsvxsRk1pYcZ*z4wH(CF{Qi`Z9{4>qNy2; zwbdxEC`MsLf!td%5+z)Y766x~B%!mm98-riV0Fg`+&r=g*9|Gb3Q=)ubBnMvH3##u z@-b9CUs<{VmFX!cN=-o_$L|U3ENK}?vt}SKI~#eT>}t}?m{wkl&BKS``jI0U z7{Ug_P6)1GLRhf}n2m+!Rk+FVdQ^KH%SX5{^i)cYpyMkenSbuwxtg*hTvhl>BRr`j zx#9?~N8=blsxK-^=5J_d_$`d3Vr3fZWN$C;m!FUKrHty2%e||{&1aTe357srCk7$S z&&5VfRmf+6G<_Za6HGx*@9I0zdr#2ybOpZu1b+XNDC9f7Ch}JS->*Ml=$Mf(=Gc%Y zA#UBs;TS(-8WyZvjty6D$Cm4E#JcS_VBNJhWBHEjVR!3rLN6Hyre|T-*koMW7=r7X z!UcZu*xhLrx8a_#7VH|GjN3;>;_zG>jxWl>{SzeI9TSH=or$=2bSxg8B4O;9Xi;wj zpji}PiYT}k9G`E+{>cX1+8Qc#8FBA;i_{$}A#D=wX^X{mbsnPr!m&>j-<_kQabrsi zR*1UmjI+ZeqdjJ4I%96G8)jvBV07X@j5BzkJ;5CA4MvJIBt0=q6;G@z@FeD*FVCCb(ch>&v z`5o8C^Wfs3o*o+T*m)5kp>0TDFd~D)5E~o{b3~*-H3|jM5vYre#)!x$OpA`kGJ^#> z3d^v)qyqEPY?zmxiTN4X*dUE-X>$XX46DPMq1BjMU5v_9Bl1&C+B3PT;sVrGR-wMS z5tStk7*be`vW#>z7v*4>gvDKzC0JZtitF22u&$yK8%u^@Lq;)HW)-0+J|0QVj)>|z z5OIUOkQ5{#x2*sr`ErfId<-wmLv6YVWdc1;ZEMTQkWhCBuBooWe4_;oz5$qDTPG5S zT@~Gc)3g%*BTif}y&D_d#vil-pbBMassYf{d3DSmx!TvKu`XpQT^WDuG*y>sNHnJD z>r{DYDi4h+jHbVq{@zqxp3`4nzu)tkG%e)$QP1bl5>9Z0m9kuGhgqOl&8jdhqXeF|1@T#t?0w`0xL z9ayntJ38f$lHKnc0D87xQj-vkjv3F7uZXOKq&i4A>wrlXdvuxY zF)7U%Es>(`!Uv*0bRcTN?NJ}+gsK=9Wcl@l&8IIi0tO(}yB9Kp*i&ndoUlR22y;eu ze4vE2$*@_BNHQkFl93_6OGR>uSyOmM*+#`g!&`vI1q7(-l)BRc&(BZ7+CYB<1_mI= zKLD{I;YbJwMtVdPO5+nzogksB(S+eKv6v~UZbm{PmS<*RqbP+X`2|>2T#B_>1-QDr z8mp=*F+DdO%gb_QzX;=Osc5&^&}>LXd0YZ2Bvh>(QjhY|Mhq!$L{&+tguj_+Dk{Lp z(n5?YE5MTGI$S+sBz80o!}f|=ska8r0<4H#)wQ;z9xEVXgSa8=kQE*)b?44}H zE>V9sHV0sPwVS9rKOCAZ5S%GcoER;6Vq|N?4MW4QrOF4_HU#0e5z(S7L*&>`dy&tE zia>0t4i`vxV}^vb;|%@Kk=PgQiTyA#z8{80^+sJpZ`4E&KuMT@&A%Tqefz=cB_V~A z9n9`_C=m6R6X5`pUq9ps@Ji(`fR(UINr5#h6G;L&syl7DD6<(c64C_*2L7h-^w8^m zjwcGwSAgeF-~}Q$I0#XZ5r_;5f;ltml%Oj+4{fF-OwP1mxrCrg zD@w4ct`=)6t8sNfIkxAQVUfUao~XA4DHg0qv0`nq4cFR=u&bmA)8j43j*UWLaUL4# z<*$~6zaXpb45AvjdrgngC6@W{;h(z?GB=yfj_ zsu{T=&L+=6B?=S#u`1~B2d)Q~KK)9j&#N!1woKE5M0Ii~Q}ycW=RTbchsOKUoBo^a4ID5Fl-Pa3I39(3%-+Mz#Gp5d-fhDU}V#@3}Xl`pqwzM6) zdncse_KpPHI6M+JkBG#Ltr56Q)Y~m1VsYmf6Lxl(aow;4+%VjP+eT*y=u)w^Cj%28Ebg|fn8RFoH?QouJ{!qc|& zG>k6JLPwebbE4#rmn9c_LJ7*wrbS;5$lcTX!;+x zGJRg=Q=rrvdu3JKe2D!=rs~z#snXbJ^yxg-o2p;sQ+ZVa#0X-|dy0-9ZB{P@!^_(9;;*3qu5eB|Wr z0^cA%1aq2Zh84v_Dllf^1ax#wKyGO{3~A{Cz5gG3{{i0EeV_T_V_9W-Veh>+07NAT z5^Nwrkf;O!u=if4aAr8Y_b!ck@70oQS+?UMS(a?cj!T@fX`7qf2*({cINXwr{SB$KXW6*FUyCOvj09s5uJ~w`TrsrDc~p2N zD!Vc~F_MFa`ZDksmt6}a5t<3O*^`Zno+MoMrQ(b;0V}nevB$Cz3;Ok#)ybH6n=q~4 zipk1t7?afRz_=+6!<>dIwj!ucKttBuuxD>TN5M8UrQfBL?X{{ZFJl85Ds!o3^r)f2 ztF~0bWHO`Dpo6)}sPwUwhauc)f27uXK;%W@9@} zlf%z$g!Y8;eyeKl6hv^KzN)}oUN)Ss7*$9mfE?6?~Z z8F#}`mIBRPpMiPfHdNfR1Lqg_^J?Wy8@X)xKW}2Wfq%{$^B2rt(w@ltYD6oT=nGup zpSzi*DTrFx5WSwXxBDdc$&C>mYE7Pm9Pf4@s{2skqsHy~k%&H?`hGOj19Q1t(%aTo{GHE61cm%&^I;?eN8Qjbq1_^eol`^3A_gf z^6~IcAs%5qJFVlgQk>~XRGN!i^yT1^KObk^IXKvqtW+xOZ%oAjdpZuaWa3C`29;k1 z&biZZxib~lx{~q2Y&l+>(-4{jqkRj`w#VV&!9qMVn2!hgX>YsIxqc$91+#F8a6QR@g_fN4a2IVx zYvyMOz0WAmt*y)3ibh=)6`oeKpCLD(06~C42>en~Zs?D1 zOu1O)0+vvxYDB-53s_(v7p-g~HKS^;3H5@G4p~P+ma8|2O#iuK$o^$LX`&+gR$--v z3PxE+&LjFg6@2QkZjb2mQ*X%V?IqNGM2A3JJ;!K3N=TC#wN^KJK!pFVhkX9%lG#w6*{6Gb&{MnFM;>{NdQNZ7Vhqc4BekQB~NaMZ#5)NR9pWedhN>oHQk0ewa5F;Kn@-6dP$&bbH9%)8;r-2`XOMyk5I zVWF+9NhRpyX=YiUQM#Mer!uoQqPaW`ZB{BgQzi6919Vh+Qeh#ZAy?Bjnkp+$R$8n8 zOu!axXQP1^U5=-!JW+=E1qCQ5E`qM45RTG3D$fFp8ff`Dy*NbZ&C!1D_WH56yI%p` zc%UCMJ!1&k+YxN6#~{_&J}SE-?oK?|*NZ2;ZhXF_1y33(aiz|PlLXkV##-$6dT5iK znBe{<8tO4*HARM$udaa0YDJ*U4Zp_=Pr!%jRvT&@b(HY6a5R`P+-$>fj~kDMdvU3; z8Ky~)>wnxRxLuMiD)ZJK%+4ajZ&Li*Nhgk9iBQbmPU5*u9hoPF3*492Y`RZ#4xxi&!E^PJUS3})arm{R5 zFtWacx=u7u)wtb`t0DVQU(39DE)wdz>|Z^X=yOufRYKi|EK~Ory}cT;o|@9^MQzvk z?=7Og2TAq&ica~iqf@>|B?-Ns{QXbycR&3Z{+{y_Gc!oyy}DuB7A0d-lCdd{&W+&9 zDJzAotqrw~HW&!Lb@vSwK=OX7z59lWa4D39t37$l0^H~?!TCTgE(VKm)?a|5&MX{j z&%u&47Sookn5~Y%Qe7&R8?vy{l!-(3G@S9J<9cr{9-An~74GYpBMA$}4Op&?p(0Gd zIbRlzw8i6)Lo$KNgNm^wi2&t3O_O7j$>wFX)$J_8{r{cR1J zGUVmvC@L;m;YF)Fv9%vpTx2OJWq$<(UP(zYO3G+&HAUz&mSNbe!H7|dW1T)6_V}>U z;l+ti50+_lN87!a2=$}a8%BSy7h^+x=j~p>FpLwvPV8xH#58T|erqKawHXuULU=S9I80WwHqhEqAxdqPt-Tr51fR`b z1E1Z7aFs~`-xaqTmztXJfTtZ7+8eRgZox=nCAwQI@Y=0ttFK2#y%SDrJDLs6IJkI_ zSET$eH>x}e@+Z`Jf5)V);48U}<@S!g`Bk_{hkkkZwm?Tx8XZfB+L4|U1_SK{my7zA zHg_uYq8l5_4X|g=9;FSPT=;+XXMd(N0+u#@($?vS~*wQrzo!Ic6#y*Up3UvEKf3%QD4+` z)KOpLeL1d-wI`oJ-jkI6#u75vg0v}=_K31vo>3ybI^=Uo$PF#$^zSKD=tbv02R_M2 z1;)?t51b10#;0eHkeY^#+qYrkj_ueO6NArg-h^$*Nl2%oGg34((R~>k8?f$7khYX? zx;9XZtNlg1Nm6mRJ&98;4;JD~ppf&qIN>e8DPJ*;xeBnqH46*2WX#)Au&XJXuM4o; zn2DXW3E0;hkJG+PTnguNnuXnUF_}O6zX|Swt#IaUK}+^#*mJg^A!8$IlJ9~oeLWho0qR-Eg`tl3@Jk{0; z>_p*-D$LK%R~}rgEiXl7aW3jgvJp0wVbW&CSZx(fb@$+$zX#W->`ps9xZF2@{e3+c z3HUKcfQ|GIqQ5tUFqPSGPcOpG4tS_Qms?u#VrUT0y1EoVU0^=a*^Os{19-fv8^;WK zT;^x`LbnsYcW?|Z_Bzl*mFY295TcE1r#)^VxSHy1ur<`8wXGSgREnPZ8Vu6b9`U*H z5as=Krvv-zP1xCF!5*g_-8^=8T^)Q@8+^3EtyMO(+nTU2Gfh1wx=xgxw7dH|1%PrZ zswp8Cry6o`in|6R+ORoz#hK!IRaF$RX zHZB7tNN=U6-%2&0lQkJr>a5u%>UjZ8hDZ_@S^i}=>7B;RQ%lbCxl<`&@i_0{$0Oq8@47S zQsJdxOCqNfExDQs4ej$*ExtpIGv7Ki2LB%BTu<3Jl# zpd*VQEx_)EH1wCOpBz6+z~IE+^(BScHvTbTe)(N=hhw=0zt zt%P1<)<)Pe)}uP*E|}uw@r`#Ae(TUAJu;{aYxCE`M!VKxsep~D&TKF#gMk{Vj07G( zFw;nDTUuI*^wbQ3E#;QVQ`MbJrM6apCmCIiCn~S7uu!S(HR&qgAcQ>n5{yaT)s}k9 zH8tRD?;tIE5SOUxF4A_M4f=6nydU#@0gMF$80rdOq`M0vJwXg~2QkzgLcOsPUZV*k zWdzHrcH3wzt6BT!$IAlRscxxW;q16y~-z3#KYcFsIkTo1TK&9a|AH z=+NI#2anl+*}-li?kB6z69p$XBLOFjllze=bsM*bT&$vUqG2NdklqmLP*Uns_a)@D zI?9R6%R|8>WIt+ORhbt6NzV+KmyjDkg^wBnPdT>Q09f{?KHOTiyWK`gzhD8Qz***{ zSB5$?l{_a`wif`(vDAp3S7ED$C_eRb%eHDy5(%}BuAGOg7d^6z3`!8)e>eIV|K38r zA2sEBxIJ%&-al|E;1lQ#jE-R&VRz@IEx2pTHr%m!3pT}5z~tpaTU(2SygY2r%0w37 zw{FRvjQuVG%~y!Mw660FDVV8C!U$E@n1vv!O5ije)3vEsY|O?2t?Wc~Dt0+aFjJq6 zsk(HG5Lh#H$y}G~bACX(g+PnJh+#X14O=in1-OSmoUtbHy{#Ck+=1STE$AYwd_RBfNy2BX1L$vp1kVL%NmSp_Jd%X5Ou&HT4cylV}a%*1?)d@Re;x zy*3GrmLfD(8)4MSfC3hnm622A$z~PMo61pIUX0A_JR~M(AU5vfHhC)aKB*i}Rd@ot z;^JZjn%dGL)KwH>knq@TtHxnU=bi$BzuiM_m!$GX{_h2W% zGwtiZWS|2>olcDOby973(Oh2(Z(S3Db*&ipb#t3>Jhtl~9vYj%C7%m>jYT+6nU2?c z8}NfUFJALov1Ba95FuWjumjbJaWHP@G&Tt#Z575HZpE@TH`T*lPy0$0+ErVFy*{t< zAnzypdvVHGfHS%@+-#`8aYGJv>iEOdY=J*L5pyj~y!*t~Mq25qsUYW9UVI!X1XN3@ z+CF(L01$g8uVv5ySywJ>ftd;bnHM-p$a?}k8LLi0_8~x2k0BvSNkAt1RF5OaR7X#i zWopf^08+ZHNtZEIO-a9CSw~c#+ILqilaysgj~-Q)iTxD&E1}+uGX473zpiwPlXWEJ z_)>-_@5#LSStP_Ns}@$)mGe?Ri+l$4bIP`B5efM&qB9zP%HM$N{FKw5@jbD)T|Irc zo3PukJqDX%6R&--UX@ ztuFme!j8a8y90GxuQ6`}8gkd8F7vZ!NL!~o(ae~#4poKQVYil|zE%&T4EJuThS^w! zD%zPUvpmv~pwbp0zc3HU>Dh>hmm#BW8#(2+_IS}17gCNVHN9eOMd=yJ%TQNZh_F1w zCp{Gd+-9ZPiqq|GELv(X&M#QHmrdAom~-2(zt@A~{bAgko5s%8di0x0&{I{4P=g+W zJuZxl_R;qCqlb2Oq;CRa;So#+da#%F>r{UimA4JMY720>sRWO z;{Q3jh!3Z{xNIxJbYVOui?eaQhqh13=%N63i*w2lQxWB-{Nk=cL_#Vx$-KHh>EN%9z%1_xc+?AC))xyZ%hl#BD#XN2%06UXg`R9PIXS5e zFrbF?+ZE8t_UdRbq7tLQBl{8SD=?M8PNPG90Hr)qJzv>gRhBX@e414G!jEf$+%60kWg8Cw!k zu{n|Va&Z|-ELF%O7jpDEq*at+U4Koun%SecBVE(u-QM09Bq zIiH9SGpLD2pgaz3w4C137d{ZW|og8)276FcNZ2nfIVB^-d+NqUxmoEP-dHg|!fDwbCRd{~k2t5s2A@UM4}9 zc_$1R_n@jQ1&uZ%s;w4i^i+D5T9`~0LeGdAvj(QhVw4o+AT2!&F^TEe7MHp@FzCm? zyIti8@Wj%J%8Le`6kc1jIyCaj?9u9#0YIm7vT&-V8kZUxaf;Bp(9ncS&KArV$}rtz z!F+oohO6{A)#<{`PzPR{?!_xpVXQP85Vn?MH0;LI*cisgW-!z{fxh4nMh1t`EikF)_xLi?#$@Bs$2sSFk>qo0U%6d1hr+G`42R12vN8X)sG zZro7HxnwLm`JSbWOr@Fd2oI7_;nU^w4*R1bhNLc^>`X=mhWUWNeN} z!sgf%+_g0hcWsNumShT;at$&n%285Ng=`9yb)JeicnLO-CJ7y732^elZ!b;Yd?GqZ z65!_4SCPz2f~zDJ9Yx#GUPxf&ZH2ou4xaMu=&B^dN;kq!i|Vh4M~GI;Qyhm*&UaR9 zM?kZcP>e+%m10*#0)e>|Em@nC$_Zz|cG%LW>X?>9!Y!Q^mLRLkS`RB#p8(H9D{D!O zSXo=n9caiQ90@@S!B|(8jHc>x+7KPI1_LTBR%og63`P?zt^pQ(1?^})ao z4<2_oagnz7NOJ>@cC=y2pu;l3_r^po{^H(K_%BzF;h7#M_Bg9?XtD=0!~Il!{TK=j zVQg>$A*zyKPXK*gE)4iu(d(|qkh2#1T&*}q^|w3NiUC~#F6(H&DjV=Xr44hLRG*a^ z^t3jhv)P7#!wyGXEgEZTXmcCkXt2Xw--<3u{>h>YyvBX}cxDh^3AN)~b0y}giZE-{ zVN`3&j+qH?aXTr9Y3uZHYjhN_q0uH=0=4`~;uf=)hPbeW!% z+sOzn67Rh8j#8l_`T_dhCu3o!<#CoK z5`>9pE#$hno6(rP0S;Qqj?!4P6>a5|_LA09=GzEG58>HS91ADm=q%if_M%OEEz`|t z$&oSdXkjy`@Uk{4qodUFwZKl`R+V-q0ry#DzyMpe!0#@??+#QGdNnduUiKY4*A1xQ z`_=h7&}b+`L$x$O)+s8_P-TVAC2fiMBuBH`lxA^gTQ-?D6=NEg1 z@p5^q!#nYMKW$}SJ5JB^V4#DR)K&u@;pA>?Mpw8Co!y=2 z^tQv}w8P_Rh2PtTu*-=-R~sffnz6IVhQ~X@c%sdZCpIJ2)lj#MR*JHp+ar3JIv>4_gjj3!J=yO+ zwchO!Z7Wr<$+nW8v~BeA+o#dT{G`t&%c8%p+rN*0>X>Ws332PY=J&|UY-)M|$*H;6 z7Mn)3mx?WM=~z$rCFT|rdIsc?4JoB1gkCw;RcCCbV%m(VjIFR_Y=bd%Gb)odLYuH2 zhUAUPdv!V6(3CG=j6ox9ZGF}T=6V81rW-ijs6eYO;~vG9)@9sHm~DZLQ^`gF7*(5; z(aCsswv5%Xx>Xx%CFlflwORKNx_7}uOIw{T;Jb&%-N@r^q`IT6&H60txp%`+vJEv< zh>iL@SZQ6Q-JRKNfr-{qM}=qPFR+Z4Cu5ow7nUG1JsXMf$%u=IU#+c;w8{IV$_v%j zs^xg1@C0}osyR`4HB?QlwRLdu*JH3ki+u(?&d~0@+2+N|Rn2(OV#mwXE%~c0>u8H=OzertXGi+u( zob7gmI=uv+2QH5b?QR!bZVv)37lzy0u)y3;u~c_G5@`zx}hV#lH|D@A&^Oe)bEc zcjVmMZe(T^BPJn>Ha8O+wx{4Tn_>}@k%w%J9)%_g;tTSTTv~*6`sDS5+y>}VHp7^< z4TjXM&?RkxHgTge)f0RMu473j*l0(snHyl1+EhZRI_j|sP1RQOvD7a@?GK8H}0%-ulvQt74N0SlK~3CO1W zO;nqksW3$u-bLuGM@#VzR23)EW){L!Q36AS2Gs;!RTZH}2nq1C#!3{HmLWYepKznh zCG`_x6PY`Z5VuWvX02Lz@k!-)Vr$iMJOQ5ABC#{_s6`JI)4bP((JBLGt1Gd~q{V|B z9$crjeWs zvC`?m)xka-^|fQ(YQmD)fc;g~*hBlJ5E6 z=x(=hDqPfkNkoI@_K1dxC@3lGlaS@n@KKMi?o&b_Dk1NwDWRUX?Dvzv7X2Ayf9iYc z_UiQZkoDxf=zV?Cy7Ha;Q~OwpsF3fG-_tLD&g7qU*RB;56lsx^R)DzVJjA8uV{1|t zlJiSZY_358-QV`?ENn_o!8$|AMpPzmVs0k*1bCZK9=9G9@f)B?*no<-yHOT%2lPpI zp@#4h@Kq6BG9qy;q12GGg}~d0`s}T=pHzA>a&h`+2s5#xQvS9XO}X2s=I()2Dk^AI zErg-~Pxc|ev(pw=Qw5r+{&WfJl&O^0Rb_IUoJ~}kge|RbbvpOYX&o)Mu_PU(g_$sD zD_|w;Oa?Oy1|y7SBQ%v7lxoV6onL^2qzuFmcyaXlN%2WYirazs*sVxRh*MNvbR%T7 zc}ld(lX5&!j8ZmawOY~A(t?1KD$D1>va1E-)jCX?G`JcF;DoIi2Xz)a(B{Ej+Tt@+ z7M$RAhpVfx*V&5GV*_~l$UZ!@a|YK(h6p?l4m8)}jI9=5AjrPv^y72&4%}y{#Xf5- z25V~&XsAc9qXQwYUjbf#`|fnO5%P4xNq{*CxsDDyraT=u)*Zx!fgbE@szWF-9!oj- zxYuCARjVDr@)A_#Wxzv=-QVs&xQ%L$unlrskArr%jn+8SiTnD-@y^+6q=-P{A6B6! z{|WdajX_oDN!+elqHk&yED|bk)c2zSbh|o=o=T|4h=xWq7^2sWo>#4;gbLW`^NWVA z>`yX!dkI-zT^4OkC2s#L@>+dQU3UBacdvG+g| zcMqp`qICN@l*fEVc`$Sp?QnGll^t!Ul(Siq)+zS1DR&bbMcdF?v;}tBQ7Pk-&i~T% zL`E?-6K2(fomI-_sPbfcsjR5aAuw}kp|hzJ)9!{o@eUqy9na@8(D8T%+HO-$EKIox zFyVPsH%q$luXd_1WQ(^cvqqadfGrYR5iiH()s772)ti>Jb) zt>p(-MnM)^OXy{$XCNml8~M3;C?qJ03yV-vR17T@o&Zm&yddzTJCfV&Mz7C@J;5N3 zbo#NMO6)?Y3)lPlaMIz#C3^?Xw{{>{T7rO7D(DUHQi-W&ze6J&j=X6!LX{y0xs=3*gCiHf+!sBwlM@V+MJO~kxo$Y>f zx&s8B8(wES`e|i%b_Q^git$LB1IH?jIF(g|`?AXLw6Pwi_`P)Jq$5<4hsBN-47Cw- zb!PZF97-*3lY?;OF{DY$H?BQQ$rHIDRL?S*>ts?dtQ(N&cc4U%XT z^+^#85p`Zd)>roty*~+gt?n;+8#UxT6)rNB3J0lBQI|!Bx*v7_(a=;wJx27g4rsH8S!J1SFmK$p4=<+QpLNgJV0-v(pm4j41Gpf+zCY6&aJ zy1Xs0NreRGt1{QaL?tN7td=0F&R7p)@@HX4x&!7^+FydrlyWEG_E{KH?xv!nT~6CT zi+wj0<{c>Bu?_{>*73Mopv&O#v*Mx6NrEOf4Z6Z?=y}r`#M0_@sHByZaq*01lQOV? zOm(!OVoRk$LaeLW>*Dr?$nu_2c|mKNm63_u>>L#2<=+BcSxG5$TCHMh1*B5bD9><^ z2g~&O{h0Q6aIhqOGY3gWcU2 z4|QX)yAQ|4CUI_f3|GUw_{!`~yx!=-hpsSw(AkeKG`8V60`%cPCstf-nD96daJL{B za3kz?!Qa}hK+j8M81w|u)!~J|sfFNcgRk0%$r=kDq@YXfRmx71_)GNLnAy+2dqssXl<^=Sc4tcXLgIoi(FW%6Jm^0LQAeONiK{Aj?72H zNR8V86%7k{PkldndGvbGDo}-{MD)BGDsbezX!ywf)b*uiRqZ*UE>n*cecb5tk@e&} z<^AZ8bBkUU{aviZuQ$}MI5Pbup|=M{Qzf(%8pUNLNalULjbbN;@GGd$AtkQ>nY^bH za&rj1ymXWkC}r7_>9?{XHxs)2EM_Jua#B&AolNkgLX($*itGfK3R6&3oPwItG$k!X z$uQ&*P+2>m$=nV@P8>{mT#u>Gj#bi_OIym+XT?C5zJ&@d237g-Fz3cXo4ysL1Zyz? zTTF|s%T9zYHwC(a3}^{EO;H|nylIV8cVyl-b=rT$C)6 z6yT{%RAf+4RpqI`Q&pZcp^!FN(xKf?*ac`~dkMSApdTxP-FRTnBAz>X01qxN;LK1D z)!Q)kQjv`Wy$A+9=;`ufpr;$X{k<3+9l=;{KW2Idu+%?eK z51MLmiFSQbTY|af8tn5ru`lF@uhs-pK^DKS95fNcUPlL&9u;4!3q39#%T0CZbE2Xw z9W^-#I8{@FZw31Cx0CzvfB8r7WMvgra&xg*T8Ja9jqv4UBFxX|E772xs<5q{%CDml z{Wcp;^pEn6=C#E8Ual#*{l(@g(31tL&{KgG4Ir5pV90yX_EwffLq^?JLWP8YDtceH zm*=VLsc?}{;im2{`n~8-x0NO)>a%nu)aB9V6Rr5R0?rEHCdS^h5+1lyd!is=9P}$X9wMN-GV>*Jx2{GNFX;uQTekFp!Q0+AqCUb1Svw zWvC=T^qO+$%1fXnaI|HG(3BORyeJ3dg_#5qrDL%GDGTLb#20dYC&{rxDl4c{y z$j4N7D)`jbWfkSf%E?yB>jZQv^aOY+^rCxS+!{Sa8X*_Z{}-2_e6@Ls*jlN)kijkm zc+zQHM*A2h#C8q!;_U1=o<6!l=&cZb^Efloi>ni~tUV!2_I6=%YyguZg9`M5-68Z1 z_G5@Pc(kuy8E{~Z;A`ivP_Rrx#n*>N`-gGf-lhyVFkEfGNV63?-3}ZIbz-8v7Vd&P zv}b0Zi7K|LtQ;ZQ*|5ilA)Zsf*MSaq6O7hU6s2uNS7sU>Yih$gEdhMc+J$GT8*sd& z3@0pRtnhOk(VH=sS%hPzW{g#s(Le?0Y&2uO!-)$M)1(QnDsn(>Z^24rd=N|BHO1mw8VU|3RA*ntBFZmlbA{-kZ6jO-#kJ!D(;JsF)v z_Adhq$a=C|Qti?4qaXc9QHb*BNh#it2F_po>Q|NPP+eEr0!l#h(2)@u3WHp_wuIDllkPDRl6rF_D`j+Hrk-gqOPZ7i=Tu6Y zH)ExeI)c(tWrW#60GaiQtTY&P%2d)M#N|3hgFKgl%Q*d&c|9RpQ4EtNl7@;R!mR+> zQprNXwU}7~9Ra4VjHoR=FEBYTX@gg(r16%$Re>k31$x<(umU)NUbM=KhMv^es(ovt zyPC;OD3upV&kKGpO|+$6uNQs18TtsS{@y;!3=ZPN*eIS}*^4inIe{1UFXQIaD9&_u z;RcsKxU`4^!vokiF@~j~VT^b8U}T^l0|R}$;RY}`GKBE(Ag3dk?Hxq0z6s6zZXEn> z=AG@>*Wt!Fs=?Dd)}DY@=?lA$YV$%vGp^M%U?#s9)!7+vwcF9zhb1Sy@QiTbhGq&PQZ7aUg(2EDGE{yQI>#a3n zuD%Xer)GHfM*263;1j*|cLbhPY21!Exye;!B%qN#uF{cTdOZBepZtl^NLY#pq#K&_ za**#;fGD9pCQ(A#_Q~THWq$%Yv7oX)Y2+(Vl;Nf1wfbCJnHMmgJb6-iEhCwzjh>}} zv}`Ycl6^|ZC@%tF0k1Temgnt?N|c9jt6jtdgaU2pJ)-s%mSL&nVc{|_WyjL{Lf|Ps zi0bE%8eKW23cPbT!hD%8yE ztbYVNft^g%8e6rAiV8fniHZPEZc@3x8yjf@eLkhd{OIrqh6jf*G%$!cLhtP8D4yT9 z7jK?8ikA=U#Y0nLxZD%Q^}zu=wR<R zLIVRBCfvsQ2N3EB5?T&~+8kKw4B=$YFi!OhVrQoxQ;qdFZfnG&%}%^z@4!W46-Efj zuKH>W`r9!;@OArK2zPqn^SV%1TMdg&0}Yj|H7OO{MP)cfJAAXN8~3(*@MN@-}%mWZvCpuda}LR zKUd%-Ju;*Zt862oz9;L-cB&1QeMJujppNDuA@9k+2C|JFRq1!nRr*a)x$Vk*d@)t#P7PE7-qoQ|p_^1e`24zQYpm!8Ya0yXBxAfpWxNHLXl zM7i-Kbp|7}OlefCs!U0PrJBmKiu0CL;QcD}lGUO2oDH^&BX ztuKrleZ6>kaRE>BhIn9h4#xvw>}hjhv9%2@O$BWFQrN5pIPA>`27>6}`r+OlrL1s> z#~A79$7HA%yF&w5^3f){+}PRCfqS{`BizrE%`LdM+KO|wdYpASamMGz$zV6;I=tw2 zy3y(OQ~hbQI4{{C^|{$k|xCs60&{t&`~nLfSiYfs=P$$$v)Kq z5v0kAKv&&P-c$Fj&I=#~dZG|zd_c9FQLMO>^U8YinPu=)^?O(Y->*0168xDwr-gI! zGc1?m&Ru&5J90n-k=}%|a*d++gcGH7f5jE0DAtsrfDUqi61|#`*!Gw$`jz-D$857wW9K*{74j@C=e+- zk{e3Ocw`z3bRpOoK#0>}TB0MNFwXUKhPcJOu zDgktk^!C|X(M`n{9O&Vc@C$|!qRJbg!W;5;W1_1Uqh3G8 z9c?)5cHu;011{O>aJkBY$J^WSDwX2JW;^bsnmpDS#B_%TLq47-f!65_qpPzQes2(d zmk(i&7o&b3RtVU8M@R5BZ`3c%&*5=e?F02zTyiwyg0%{lv<5suP?HFR8!s=pk0q&Y zlvbc}g{c`80y(+q_wpB4ekjyX$BX;tKp+E(%HWy;4;i39Kq4LeB}7%JD(&FGg9^yR zUP_1^RYzix!2?vqCa_&zURKUSJ+2Bqfv5~@puQGGDCZ}y1-4SjA;*jkfu5X`+E-T% z+2()zkN=@GgOTH}#s51Y#`b5wjPw^$E=69Z3yZr^UZICpCoMQ+EX^9uM?_HtU5|(} z9a&YZDaX1}DZ^T=sVt{8)XE5>VvV#iqKGui)j(gVRZ^OXNGjl3g`5I9HDx*1lT<-x zWZtr{tEsfBQxu&jxrmyJ0F98N>eJ{&Wk#%RL=g(X)r=@R6?Q84O!Dw<&P#coKu=(& zrfP4aRi0E_-yB)!B2!6s%M%V51`{AQKRh~!D;ld!N2MEA} zt`3|T?!l#zKAh=r;uKZV(V7}u^?GoFAbVlwB3_-G!J9p!c){w#>x~|z;qg&(HG0dn z@K@ImrXFVsBMgs^=u3ZYLpS zeo{fR7Qem_=tbI&{v1F3MWo1OVqzX8#X4v;yPH1ExjhRJYWDzjgdAZ2V&qE$PbADlg zqO=O>oyBGrt#;N|`>%?9lrppO3JpriD^OCQMVYp8H3?l^B~Oa9CiF~Fu_CsYc2iH7 zi9MEbK!K(}RV=g$KNWfcEh*27OZd1v?~|Y>Dlac5S81jqDo;5tskP>>f~}z*?SxsV zJFGxYM#u2d=DL(;&UT=KHnp4c3++zqao7ovPCPI?fEz7sxLMnbr=4Cr*w%p?p#UDG zih6Zu6z}#=;RpUv{Q2-MyyptxQn?tL4Tn%^Nw6h7PJ?%Km;}5yo z5b7bgNBhtj4xpc(sk^Nmdpf&tttX7H0z zw+gt3iCBHT>U5)Rt%TUcef#z)5Kti~&^mJD$gLV$sT7i7p(I2Fit>`>Qj{RIq!Lm~ zD(XqzlS&bRp(v_9_=7)CYSCqVQE^g!sDkcxdn+}pQkf#_ZrHHlmI_oYs~WPMRAxws zMHOhvHj)BEQFm&&qFO^M%2RByx}Mlvfv^Bk$`7$a{C3>6VT+P? zZ`_KzH*Cc{8@FRU*V(uwmg_UOF$v4{TVwb-lFA@{+vAkn5|e0iINNm_;G#wJ;fEP*BQNf=;!x zk}@xC^wKjk5F7t7@KoqUS74~nlO`&+YHRrmAS$oL(F!kZV1Vk#Ckl@WsDp~4ofgw0 z&$J*km0;mfgo;n1@Kbu5WYDygCBQK z<4=c{@T2ZgJZrP#WK|tjS{>Nc<;8H&1%H8i&2dq(ixseXK=w-e9ycj83QiSck7f*~jR z2|s_k1LM3QXZhLA4EEsRu|eD*g!WMFUZo;`s@94p_+#^?Jd*II|64}QgETIYlzAyd zQGh36C&Dx(r1C(8Ty)nib(u_6H5lD@RfVRgDpf&6Z!fA(Rb&z}uiDj5vdgj$)e5T- ztwy7dCn4*r`;l|Iyd|JFuB5Q*w(;xhy6&P63ptL}|7n5A-oo;R*CCQj1I&sudV&mXwtuRR#);i#+!t zTIJndYbz>GX_L1)pnz;&S8r3EEaoBf1biL>L7F`{+uGnHpxgwW+u=l@xfy<48TyO! z@DT0i8~z@=Rp-X<1}5>t&T+idf=D+%<%+d;9QSco=VW_TWl= z3(gRt$Ee6w20~cu3t%$r!)O<+EG^q;sGEwf8-am7wA1=_QU&^)4!8=-vDaY3Q!N3! z>FdF}3$u7-z>hDCc`4bg-a?kVr{>4utkcy7mL8QLE z9Uih=g`5fz6-3ccQA5^O_w`ADxgB=$zPwh~lTd4QZ$GX~)%PUSb5+-i-iI1;4!57D z?BkOlxfZ{U5Tz$7kJpS`HvCg8?>a%?Sx{PNfu^(yx(W+)+DJ3wz+ zmEG19+_No((u8W$pd;tVJ2W3RiR9`?1;nG9kJLR6NjYa z6s6ovPwQK4Hp3znFH&xJ8}!8DimgsbOZ^q4r$X=cwt3R~LWQ1`XIiSO(9~jAhHvWd zxRrE!JxaMzYfB4So9$?8Zb2IX;WAfYn5yy$ZRV48cDzfNy;awNH|t&aJVCUV_S~V( z$3(ve7k19zbYCxS_VnY)&;VZR8^ue(0ld&Nj7NJ0aDhru+V@>2;4XLfU|%qRVYd^5 z0Uv^X5Bdgr5vCO#=5X`DpdT;vcHy1*A^hhH2l1B|58?B@ z?RaL$gZsz3aIoKvNp~amaGwi}jW|w7PuEy6VXMJmuS*$f{z!B%X*6ur^IC)7uR8>Ha%J*wAQOJ=`*xp# zLDPh?!fKQkSHYyOgQ3zwi%SG16^TzG*x8S!UJ_%_V*(fQY;^PUMj7EMCehAL1&=cT^J(V)Wq?BZ(Oz)q9 zo;sR}3O!MI@@rtLuZN@EsX))^>QLrgRC&?>w$)D9wX~w6#et6IR=5o&9JJNp)y^(_ zzTQc7H;gx|4m@SG<7RUkMk%Ems>{*Y(~iNQ2QG^d-Bv3WxR3h=NAc#~19)xsUc9z% zA0C^T!hHnVW8q#rJ2Hj``1+{^%;UmV5#ll@rnIkDJKhm(zVd|_%HPjz+UaJ3b))*1}+ zyFB4&$AeA}ZdO#{b7m`k(;LRuq#N4*CYI9W#pRP=hXJo#xPHGvvFM@0#Da{7E^xd*L{$3{L3E8c<5bS6AHvYgN6%45^e=T9_E6zJSn6 z%PGNzZ3$Fc@rvyf1$OtQZA#v?F_L!^Xm=5AcPqIacizM0gw-ddlnq9w?DLaSvA@bx z1tS$8m7v(`d$uM}dBzijiP#>Wf}}KpDz6Zw<+Py&=?Ox=PS{B~o?4k96%jdkxkyM# z{6y$QS7Jz06{S3H6{NMbwMto@z)l5TOB(^_aw#LL2=K((`r2LaJDEKGZvJ{a-q3*G zu(jcjdM5Eobu*r6cH*R^2BU^b^!Xg<>knXDR2pq^m%9VrmS#+Nyf`{OhGX;NxUy#s zH>bvMlNR<$(2q-9KHQ+PyU^c*m7XBxXnm*px-r(*gW*sZW4(Qt7#YSe)!i`Fu>q{~`f+i508cK?;l(}6c=gCJJhgit9vYp+YvVid!SGIewW9}L3-sVAeulH{ zZj2f&SZZj*l17II^%i`kz7;=59!6L@V( zx(O(03@b{{Mf)d1IQgYAgU8-gWx|8Bi%;sR@SEWg+{bgfx4r?_x&4aMfl(^F-tJC} zP^FD@_oAEN3K3ufp&ad?G_cZPDi+ufzSZpm{L>h$hk|P{c0H`;8^v(WBaFAuuucPNxu&B$_>Foefm&>}hA74UUU!6w3 zr>-9zvcKqk3G`I(MKAxPh(3RbXl1z;zs``q3I9fNP4fSr(v&RC9)qQ_9Zhvz&{s6V zpsjyLbiTaXqxH}0d zNvX*Aj8tsUrV6+eWyX~CqEmsIs5r&W#?i)dJua8mGL?+1w_4Umi+wkbea}`&DF>8o zcVIIWUs^^ktaXj3w$@XwN_pPLHMf#db1O|IZ z#Kp-;T$!50;Xdw%3hvTyFD~^4aoXp?TzwU~n@s3zucg}S!s7e{rY8omI5~n{6C*e} zH;ao4JMqx&{rJ*_>v-$P8N4yS4=;zu@OEGr-|8O6S3M!T*xZ5VyZUjOR(z$!jsvZB zES6W`sHqw!Y8vp^^gOR5@eRDNRduP#20IL*_KXo*t3prgt+W?Zp?ABz73hi5Q+i<(7AT`DS6QpjOv~vI z=n35HGIbJu?XYv&+~Rg{(yaZm5HnrR5!7wfG9;&_-D#SS+_oSl<2b!!n&{&NJ zySwnn_z0Fd2}fTCM!EtR>FrfUS#f#Y=p-~lVs|+WNP9r;cd#>n;jSR&`}%QeavBdU z@5Qr64&lMsNjx$(gsbj$JjQLVI9jmZ*?<{O6J}|h7YMxNxd|Lvn!};_8SEb$!&Omw zr_SN+OV{xBiL-cRVGmv&ox!ugK0M}dE8WjN$N2}_{kY=t;bLz$4tYCp!t24Ma1WMj zjd<<+Mbb?a(kj~I!V=o?;*yEznKkIG#it#&L^!9@WBXBFSP!e&P4KxNX0F z893YgWJM)(Mok2I8)NebyaH@bF2<(#Jlsw2t&h!8z<2kKG~B&CMad1E-x!mIdk8Lp z-Flg)6%}bStP&$&jE3HNX+KEtiCvaQHx?FGpt0Ej zOO^FkpeGhr8YW9chEjX`&)8e(Yb!lZs%k8#Z?q9|EpQNQt<6$H%c+2m3eG{**U~D} zHl+qvdSrCb?h3F52&-kPv_l>z4zxAnT*!wb-Zq>LxN$WYz>D+q_}sn~Tr(9K-#+gx20=9GjiOAsNbQY!t_*r*MSOyLazCJg{#+onAkl!LfLyS&fs!bo)uuI}1Rz(t^^ zTvw~VrfW)XE&esdt)hoj=oMyH!(7>h`Wg=^D{L_88W8RpM}O}GEY%ih4HZgd#=0HJ zW!Mm#kBxB!*pgU;jS2aadOUX^ExiKbPf!&7K3{KMsKmi?J??_j2 z{dNIk1bhO#^#rDpG9Lv%6>O? z*VZQ423JQ1m7a9w7lr0UzsH4z&UPH`_TuzF2-hZtadBt>=fWYJ_xkbl$OPWrwGYot z&ft7+4`$tM7$6iwZU_3P@Orqf5Fr}w>Vlt2u#4OEg}SjYJcix9RB!$u?j4`Ta|^rh z_Pzsnd2k$Wb&cS=lY8;b#4fxtI)`WX?585!he!7xP^LEts)zUQ$8#r7;@+K0cyvX; zcN~we?876=yKrNA5*J4X@$k|-9$8w%rIAq_(Fq0znM6yU^R|M^~r|fxd3|!a-#um;O*#vAd3%DzsYknC@`ksmWP< zdFOt7ZTBHO*EfOZ2d41S#3J54cnV)Wae;YWv9T}hKZLJbxP*6Z+=~|u9l`^P3wY|# z0laYXIG#9s05|s#e7l!$d0`Hx3BWURv)J9=gT12z*g4pZSt`-Rz96PToj7}Fg%?t! z@u#pZTGgqq*Pyo+pLRqlnq&r;n>>WvbPG!IYGKs2pw`lk!dwFy>O7d6*^4%3J9LIs z=*6a1B0j@_t%>Edv{ZWW#n?*qw<#e4z|Bdty@`cXeR)_PmrL+5V{>jnFk*#84Myhg zA^7g5+FQS>`b7P0Bpf9pDljzyK&m}?HswaD#!a-!GL>Zly-20T$I#ofJwbuq=56t) z&>9tsTPcGNtd{2q^kk|G6)yvbCMEtOdn;}9#M(;d_o}LDg0r6BYea*M$@#`wn*u!t zA?P6Z>;$4fPqDT>A3A-V1fQSa^P`7qZN~4wzK|1#!akfI8pNY>J8{Mx!b729JU=vr z7se*=^xy#Qb+zM6V?Fj(SK%C0-AQS0*WtosXD9l6RE^ys^n`-wrRr;K;5pS=P*YV2 zYqbV`n;AQ;b$GOU5T9FE#w*NMm-gdJhfm|H$1dPYQcer9nu-a2_2@7}n9 zw=bT@(*)O(hgb0Y*^_wT;yFBb={z1ga|#!hcjFkrw`XDu2Nq_P3XtWgQ7nxTy8aG~ z^@VWsz+PTRqUk<{-Wu%I;$LGZl^MJ+=ckTP=~XEfS6kWui@p^FIR?~NoS2|eZ)#FET;qYOcKYA4VXn}Xn&EVwzz1TN9iKWS5 zOpWxRw>yASCy&X`s6se_oWO3i9)lqZW$9Y1#itj)(!P=xra&)0vl>NYnXar6=E@cn z=UJfFHezUC7K1~>${3(Zla;ts3u05v*p^g@9jQjdrke?@3T&eS+>)#z^a#L|3T#c& zU|YHtTT;uhi4fed3O^;|^CK!y07xsmIjMjuF;9V@)Cg~k&8CIUxdlEc7u=edh0XCY zzgjs$*xk8Wp0|Eml2WUyRAj8y-bw@M^_)*g&O~EN+b!rtTU_atA)~!WuMB~n3Os?I zKu@){0=+6iuZ9Y*P88oN^qOt;u+#Q7*W2J|wWF2U*5**`t%vaP5PHEt2$Qs-XNP)m z&g;bGj&?joYx*!1)9HW*#~pTD@VoHX{3uSg*5eg!^ZoHTyf;3DXQ=KT?g`==E#%Ff zKHMA_#`)ep91R9BUR#ThQ4gn~909AIugzGYLOf5^`G_}wr~8NT_~-=oQh8pUp2i!O zF5`>WZ{VFr9>G^1yooR0ypAv3yoL{7coOeE{Rn>e)@%6jSKq=nUwZ*xdj1(aee*tC zJboM(j-SMl{VUkNcQ1A=%wsQMy?1&F)5HDf3kNVhF^mHT_D4v!D(z$<)fxi62n^St zw-%pnM7NQY;%9+gL6(*FwhsCVyHc)KR#=0Q!fFKkL%go1V5%}j8Yd^DTM?I5jpS?_ zcBGgIJ~QGd6XPi0KF^RO&HgaX4+L>M;KoHN%9m-0&)KOS ztF3sVt_5Fg_2X-fE_|WA3lICdaMI_&_4!FWbZ8%5zH$j~+;=a&^vHww+LMpsJFh&4 z-~Elx1@vk9xjV{feKzU&k z%(@OV)C4GEI#FwJp*X)9uJ&$BOwLf~R7aqflwm_$sul6+wMfZnKvGs6lCqh(HpFMu zU`K`-+a)ti*p^|$=Cn#|PS;^8b8~71Hl~yjfW;92GQ|RKCj2C~rmqRw|@-14LhWggG!GoZv&?rq@shiR#Jw*x@+SOY>7!mrP+#FDl)0D zReNQK$`k0RBdthzo-UMJYE;vu6CuZo-|MnQ*BMr^6qlBXUI%8)IvneCW7gS>prI0h zoMQA9XfR*fi2JGbUR>Hk@D1ack$${5GmMAFdvI^BACGqW@M^dluhR;@6B@t|{e$?^ z?rHqry>s}Zv7LC8+dMiufyWQ+!Q-b63f@3;zC-ko#3lQdY&B#8MR6#5`GDUUo645jh1&?h6!7lo70WhNbqf- z+S`y)j*SH0CZ+&zGhc5?q5_mk5GuczOf3>~^@z`;HI{aOakRg&w7Ft+rOYojLjj*C zzD;qW`XbO%fVT?0^_!(na4aouf&x96mS_y9Z{i0e>;!ZGPeHK0lA`uxs&+>cbr%gi z>2s?-^v7T{QtDPIPl1!Eg(*YC*VNR&My1ze7vRy}wza_FY@x#IfVZO!ogGf}cwFe^ zd|#U#GkouGYYPt1YCp#154X2sXK_AGQX$SY*)YOsC%@=mV-p5iT5xh?3{NdBrr3MHHJOrCX_d2L9| zwj(jUp3^3X@=MC3`b(=JtZEUPX(jY5*phC-_N*#wV{R73mtnw`4E<_Wt|~tbtu8@G z>$`;ta7Ts)aalU-NGp%D0VMP`#ph#dQsHWupHzBeDA3y&E0r12j!(++sPJfS@7WZm zKu>ZbAt+ti1ctdq6=-O-!(3%iz!#lTgDd5F0y@>+3izbCia<|&fVVskM0u!@fRoUY zvOWP{odUgPrKg6o-GO#zD?*(f1l>+5yjJuQcKyvZj5SqZ*<8*lj? zc&yfl>&(4o4QA`jT-S=lzAhY{9>b~eQJkhFz7`JQslhJ1J`%)hK@Yw$F@%?UJ8`|C z8ZS{(0XmA_Qa(}Ie5rLjsd%JV)Buev|P@3Ne zO)=S^YlpegiIRdkG&gh-dh@U~HWGR!tSi-YA-B+lf)Woha$1m@)u@0^)Lv3{J#B6S z!A1CG(*oxchRhu~HOSDmBDu5yvANX*qMkNa%KNm~k|yPUIzq1;TT{!ijquwlwZ934 z1fDd2E~M%!!j`0bY^2)zm!PMpJ*6f$QCTL_^qeBpH8d(C9jlcXl4{wW3|TLr6X=QJ z6Y#0fi#)`;QW#*w9E7DT5HWsO&`H4Td@~;Auy^J7(s;vO~3OC_7w+=s;cH_sxZB%H@_=d9zZ+A7}*{)_Bswe!cWjNl|hLb&B91DAK z#BIk(yBSXgTJc9G4g$|UiT`-yAigyr{@hM{I5>v4yZiC-@E9H( z9mRp(5Uw2Dix(fbiLbx*Dt_y|ckw&lejh*hCgJw(*YMrH+Hg71Ux zP~}needGQ2@Wrox6;HhQ0?uB!g2Ts-WB=a$*fYC;T~jldbA46|x_9TjP8c{@MmF73VsBtRbD=U)L8D31$(KIwS)810) zW#u}Nl-7vCQZI4}oJwZowIC(C5y?`E`1#|H;J5eg!RPzJ zgj+Wr3%KxkTFs{&ZFnIV#7(ywPfkqW{J;S2TiJ()&!52?Pd|xozWo+{{NcCpyTADn ze)Qo7_};hQ!-oXj``>y8?|<-3eEXws<2%3kA-=`wH>d)?^4>dm?)BGj@xB{4eDWmr zuB>2*aGjf)!o&Ei#-Rd=^q)b*t-|7NcvKDLcX-4GYyk&1^P7-(xD9o~< zIJX}9atADxE@(=dVXf*=px4~m8c}*hB|cW-wBxfSYnUGVj zKrbeTfXkw?%dlX3PBpXYR_@5Q5Pn8Q>1|2XVrv>zp8~ydY)`F-fG>%*m(bgq#7r)r z`ipdP+Z3-hUrEKr9m&|l6z~c3Hmx>a`M95MBDTe+!C?X3zuNvXk=E@z_h zqM@fgoJgnDKwDOf%JLEzId9hMQDu@w!B*7OHlV4|j#hgs+FM)D-e5y#lMUVVHRvbw zyrsFg(rL$Eo!f=K+!@Au4jn%9RpY-M9s}Nf0yw(|f8edh54|@0&E6^e@%$)0-{r;? zuM>xS9$cKB$AeT-=LdbbOeOeqR|j702@q^8cqrt-vm?Fu;;vadLjXR}AI2jSgE$fL z;`w8Tae87L*9ode&Yj2WPd$lmeBn)e|6A|jN8kB2zW?nH@SDuyC31(-}w%{MT`5+yYJ$)&wUQ}-hV$1o;Z%31nA`4G)Biq(BI#Ku7FQbw>x(( z-MXw)!6z9(UF2e0i?#T4BXS+8&|927LEzOZ&@0JnKxJ73dfKuUSd0z@dNN#GCB@CU z!creW*N&{bHssUdW(xFZb8|{uNH3t8%eNz?um#CQcElGn(%OpBvvO+Xv|1_S+nQ-% znz1#5(4*D8WpPtA5u2;1zA~kUhk!38tqj}KO0bz0cT++xHWPl*ghgqnOtmK|N>7@u zsJ2$Mw(AMKt+6T4P^Q*3G{90#iz{Vx%81JHFma>uT#d}EEF?&|o;(gRDN%V!TFL5D z*)#;7wzL>J8D2_T0keV7Gh6tJP=f|*q-RE3eFHqrP4G9@qldP3tfd~44K?U0%EsgU zKK#zE0sPUJ8}HPY0*d;NoHY!^|-LiWc_If*ruQWB9v+`|!c=ATE1dINTAl=zz&}j=?DT92GLExb_Tq(^X*vPxl_5U1axXyo~rbuYnuGG z7Hje8M%0flCyO&G^k{GMnxHFfQS7a*%+C9}71Ps8%CK>jqV%$gI*=i@w#zo!|1h@q`C%rNe7ZJI^m_Ym$rM-EJZFUX{sVaNvTa%1b`o_zO`73PbWSu zVd2%td8xfE&S@g_8Wr%#*n0YkW;(f6I=TkTPVYfSyO*q>(lgW!p{TM8Sp;5Ish6O0 zBb6XbCHT@fpThaHGA=9aKzx1+;_{lQ=xPy{+kn^{8+K$-@d^C0YZUnH$gROPLT^W| zs6RbnsOMD5`m}_ePU+1dHM>&Qw3D z!cmJacN37uzTk`j_=!xvxg4i z{K;duapfXje&%U><4a${_dbxayl*SO6NM+x`|d~Ip{*tOIQ{mw-^cs!zsLL*zVVH3 z;KdhTz`fV6DJwyJMG3lVt1w(wjVUU+g~l2zHCb_@%Z*drKAi0Kd~yZDIqR$%vo?|oNMc}jloBL#ZjA@sie!MBv@H@^NgJpbIYxPI{> zj_=uvBa6E*KQ@lR-X3(cIZ&(9qcy(*i>@Fk7eU)CzzOXn*J!sE{|e%E=}1F1yH$oDv^$N;{EL9*Dq?AWScI zBQei`^inqp3?V|$iKN0-Bo^2e#TP@n8<*ce=rtj}pk7gX@r7JgU`0x4J(VF9A8qb- z<_^Lymg+Bt$}c8egYBsTy;7yuMoelkwk73Zb6h4BUnZyN*c9`z%~gQ6F*2I+miQzX zYHF47^)y;NVW(G`qi9!~qLdWmBV|Vn64q}(!kzaZW7AfYC8t4~l>=jbA!h8(VRd`J&s6|8ig- z@Wey-ZwoW{zr+3bi$Fhrat`2sj?Li@+CBJfw-@hrc<~Tb-*SBuMhU=hjTQcyYD`dB z&G!bW?uM~4GK`tN9?T5(W6#7Sc8^bDFD>Stg+(mw-ihV?dvW~85u84L9H-Bm#-+=b z@E|SfE6+cVuYT!my!XyG@X@zFP!yhGYncK$HKk(XgZI9PFMshZJn`rwxN!0$PVU}| z6FZkNH#&yl-d?z!ZD^=8!dGs={=gui7b&|F=qaERsi(@lwOET!IphJ~%JdfWq@7+1 z)t>zp^h~-I1$x?28^%T!&>bFFg2N?vRz%PYx zOrzz^rxi{s?ohy&kZ(tPUX!Bu5(=9MK54>IuYfPHxB=-rV1dbn9oc5uTiRUa4yr$? z;D{9f${+@`x&pl&silZbE8(;NTa$7k0}`x4?^b07p{IaPx~0V>q18fkY& z=;yn#NzM> z#;Ne8#zwJgW)4eJvsj#-#nQqO=6CMI&gEqsI(P`jjvT}BQ>SqD!g<_x;|894>PdY5 z_0QpJU->fLC)89`_u+Rw!293&W<=r9)_&{VcktCOe+jR>^dj!RaUG`#&?7r{fqN~u`4j3z)cl*N<=BW)gRflSc^|DRxiR|$`C7@-!*?4W%;dy zo`6q!W;DX0cPRE&pf@xygMt20n9WsKmtX8fUXcfRgk0TWOT7y8QkhAG4n^@L6*-Vt*g}ijK zV+4R{I_yXn=tXR8C3eH#JhON8l+H8BC>2Z)-d@#3UfEOoJwS z8||zSdOhu}z7iD#p4OnHpD#msS~`sJDX5M~M)i&)R1tc*6lPW?stG+?ULJe}g&3gP zn=Z-2WOfox6MC->`0?eL0bI3L@mQ!hVAV*J^9=L`x&C zmzCl-8l3n+O9=nTJC5J7`SFDkBc3kP;Y4vM`gxq@l2TNblt5EljMk<`4EFV6gf@4I zz>^#q8phbz7^bQ8X2z$mFiUGo`?Ycm7$Gl&!IrCuV(_IBU2IRl~x3iPi0p~(3O;jsrZ5l@bXH!kju>EdpTud zbvuz-=tgFlk2aSuq}5F;aUr$Tg_M$ZLeD|C*{S&KgkFoH_!0^mk!NrtF0U4e`E^uz z7TV!z#1ecvsQR`G{0P2?)vX}-XmeAGuq`?77WAY{PaT9n;I~1__T=%9TVs%#laE$M z2h7GQRO*e;==CTwQso)7FzPCx%gBT^F&&L@X-e9XGoVjPgMrX%rM+#*%t3(rm?$j9 zN@*!}XJuf2Nj8p|N^zg70hilsxYlmR;rd!EH8o(er3K-JCKzdx8{^_JQC5so^)*<| z&cbW;?RdAL6My8M!1wFB@TRVoDz6TEwHkC#N!C_h1j9cX0m-4pZ$NIeHW)PE+ljJ&S8sui$|P9>9}N zK7m(Wc^PlK@jAZnh0o(FUw#{3{?Zo}JNrcf@wFFTz|)UBg8Q#t#if&{aPrV0EY412 za%2c2T_FTJ+;BKrVYl1iuo8L`Gs=Y|!cL&4*4nNLJ@tB9i?#Ul;uD}(QRt+_btup) zE3hd84#+bz1bW?}QA~`_!ECCQ9vL0T$@ieNq8r6!A>}SFOcydo9j7?U*%IV!*72m;0RT^21tE2wmKEv}dQo zpPGO@r6qX0z76kq2JwxCAU^CEz!TakyyW%cM0+b*iu0i@DpJZGE#_*3y22PF=!S+z z(LXSR!6Dk)sVR(8{-UX#RW{# zmXG%JU^d)~fetU6t*vNkZblt{38#i4N>8q<+rg*A8uixV(~jGqw`cL}E$B&wMk#q? zHM?kWorGRJ@3~=2PRvK3ms{vT0l_AjlkY`Vo)_5!Uv6<1vI@D3$}fY^%hz>N+4%@L z0#P3(@H&xJ=0TQLrtS#%ikyh2-Ayg0^(}EAxuliLnw1KU#6nu%>>9*mRU@Ua9vP)g zRE~9s%Q7LB;FDe(QnnYL$!T&SfhW-WxWRI?;)^^bEe0|1sjybpqqW%?Dc7@@QEIM4 zIW4JyYR^o#RVAgOF)0I$$(b;7UXz`H((E*ta&yp9P=runIYx@Ln5#5nCxNtVwJL*m zp7FbJs?&`FE(a#9X3RF!Vvtk6)d+{V0-Dq~H05Q&k+=g#Ypr;tXBe+|`|ws^Sb1{U zft&&ysMKPTu(ai7!&qDny~c>H&Je<(UJMNkQ|S$2fbbg`A6I}kF)@MZ$w@5C%wX^Q z0xldmhD(Q!;xLup0ou`{d-vk_ffXD(Z~(`S9wGc_bx)thc>+z~clq)q#r9sk!s(@p zxW>FpMR<hiT%i%%ug>rzG#<^1x}IcSR7QI;(#8^cg{2{A4MQj>3nP#7*`-0MydVkkl& zmGF~Hr|L^Aq1r2TBDu7U0BpvNT(P-L+>Q_={Nkwk5^`&4lkG?^mVUh!1$ts}<1=+g zD=;B3s{&gSbFnQ^6rX@kny*B++mrIWSnP;Rfw8h0o^~H}Is(jyCLeN}IhV47b)HP-j7(tr`n;RXEYoj1%>ZxYE*rm-@zVFClz}s_}@$grzzw zoK%7hgsP*toxtluZ{Hv-?Fhj~l{Ye?Ku>@-#p&$i1QrRUmHBxbU)qW52an*|fx|dL z=$)X=mEqoxD0X*04pQ|UIdTNYjvZ3~c;duyoH}^|r#U^t=}7^jC_{ndzI`~fi^_3s z7RxiVwgWww>+Qxae-I;04z$N>0|s|cc`jL<9aAUoGZ#TP)it{>U?Qj<$dOVyVp z6&som^7TEmwgNjp0VfL2OXU}VUM3Y^t|^2xjfdcCLuy3_Qp&{Y+7-2zT-=QKJR6}` zi{t`PdUh(vMkM9cQ0>v`u0k(0--!5(GNl%GdrBU*C1n$QVsoR*_oOB_7F)L`qNGrR zaG(bUgBd!Lkt$D*5~Bt>Dm{H!3H0fiFr}nJpOlQ!gm@HXrXVjT6*;+S$Rhl*(~?n` zMgXQ}pfW8RwIvns)iq$G!-EB1Cyvto9r1VKh|hx)onBn+3*+)oFIGYUEE0r=J6f@; z#*9h576+S}@YK*4E;ZP3)7F491{3yHSHoLciWYN?0=!V~0D1^J0p92kE$-+DhQ~%R zF*b%d0&ihz0!tHP*h>f=nViPWl|y*^#A#e5xGwD9hl_;Wk)=hfEbqZSf^21F1qb+^ zOeGIe2_9M@5V`Jwy~{YTdpA}HPigZfqa*L)F?LD2z^PG82K|_5b7G*W8BW5vRUV6J zslsqiuVQP((#li`xyY0Y?bgg%ti`7ntC!&~ei5;^0zGYU2g>u?m8L6YWQ>&O3H0RY zXsz~6>|H*LDvK5CN=p4Gl8Ou}y?iRYT-w`QPSdkGkWc7|)s=F+RNCOI@-C!v{mhC0 zRUcKLt_!*P5P?T%8p9~98$_Ntgmf*HAEB3_bs=5TK|neQ$2LXjCFa*7h0`RGIIXBb zfnGw6MJd;d&#FX99<6Rl5q6|8Q}bzcbEx>TuyqxDqWF}jq-}}ECh3`xo`-O-7xDmb zt-*i_+Q~AiJOz51GF0Z}!JL~1lZ>*GmWtx6G~{F^BReMz1todReB|ZjqBu7n+QL$p zWq?uws@`CMn^teyNsHSR#L-X?XS##9+8x3iqr&Y8&wl!k|K zj<)t;Lhorp?duzwGMM?1S` zb{0Eno0lfWu{1u0g^@vwg?(7)4`RaUK#2Cd-Bv@TXN0b}2;(Ee_nTJcHq##ldzZxy&}S_h$^py za4RF^iVFP5%k>ZhUX+!0)A|N!b%RuVUSv}d3jESbeG2%bTra0GNW06|RC;;lFfuEt z{3={XDQ{OQGbD5L9+cJuQDpHWt+Yh}Up#GYa(*45XJu9+F~^MLJTuaZs*sQ+?f6O& zn_h&N)I4mb+S|SgzDQ%`M5X_&JRvPEE)_19A9c1yWhiKkQ4g)LlJi>VwB@KM$b-J1 z00!xHmYIdJ%uM8^BqKjP73Br_(3O-xS4uc)D`BEqtTI%g)>w@y+T1DvtC^O$kM?$v zD(^&pFV6IYaop`v+U*^)*5fRd+*DRN7OVBxS!==lUO&D-k-~add70d*&Chb8#njE$v#(oqWBd2#Y~SIEM+mU*d0hT}i)NslZsh#yBs{LD!(S7N2sg zUXaRD(Ky|^bdf*UZr<-5N_#y`aij}coyDg>ug21fqesrr-rBIPyx51LJQtOYo08B^ zLUy5ySzOSGvXT%AivkMx3d=(%&`51At!=UQHt2bgQtGC{^C7v6wziz=PU}{bo#!bZj7rWV#K&`Zv@D$q;JF%o*!grBIsa>Qj6BQ~u7JCbv;LmDiv zLQftIDd3ZbfN$NAh??34v^XLoCw+SfDzucn7FuK@zbFHL9jee!Q;Qk`Pat5gtwL9W4ReIu!C(*7$wF!UFJAwDxI&s5l!-!Ukn({KZydDe^Is*fu@U;l93`wyPEF(CJu7(r_!&HM`~)62c@hsEKZ&z@_7P-DSY*!6 zE->e@Fc-=B**VNj&tQIL2GgSIXlM5>%wabnI?>mQvFAbMJA;4UeEyR;al zf-D#`MbMNL@*MjWrT2Hdt^|0Z^wboU7wMI|7Hjb-#wSA0qd>2`z;O$D_0^rUxei#2 z%{X%SH0o{5SZ62?LPLA2p}nmv>4Ls2jLOn5O7i?DW)|lAkxv*(lNJShlG<+M>$?fD zFx6Wxat$G5RniI@x{#ssaVq6|w7jNHE|&_9h`miOZ$(N;6VghWkzQg)n!t~0Pik=m zdU2VR1fGfWx_<<{?S$U8giHl`>j}N}+Y@llwm58wNkmbZ79L+0j3x^-RA(~8lu5?d z)9a{Si%?!z2puh^A+HeC`6aMq6~LOuX?`(PAeWbyD`V=(fTH>e4XX48R8pB$6Pz{G zCbTr#&}OZHx4H&F+TM9nEgo(4<2PJA_z%HRyl%ANvdN4KjrDlk@5e=g^PN@?UazXh zgN@CYGjSi4l?ZnRFiLAXHZX{Z;V}%4jG~Xg8<-x$IDt1s@J&=NNanV=ha+V2vb=;FOYKdc6afL62$RpTD`b##aczi zTKsE>)$8(?%Jmq5US)}w9HYf0^vVkx6zIv5(NyRiK6DDLEge`_ONiB$1kq651AFBV z8nuJ4mh}<_UC`zSxD(paTpvofPHq8Jp|%GV)g#EF(o3hU&6W{bbOFN7uK-W1ZMNQr z!m1$h%>lmeroDBeq`C`*Vs8n)1ggBW;%21K<|gOYBD1uC0JI_@+rVWeBcD)cy+SB#4-t(YP(Jyq41B=9DC z`Y<&xgoz>A+M!|ejtygQY7Aq9-sISb(v)NufwgaJ9LEW>tMj|?=%Hh{|L8HCpshVJ zJdU$-yKrXTL9FcDgT*O=Zj^xI`ZE&~+y?IOb+&AnAUofDz)F^ zK_}ttG*!Tsoq|ci`ebJi_7Z+Sg*k+NG5UIfypW`8zk_k?zEkXwawD|sqO-b zy35pd(E7SiU?c!derOwdQAF^i>)H`73Xhpy)JWLL<0h++RcceLZA@A@QV6|-Obsn< z5q2aNAU3&>z{{o5lXAUuY>r7%o`Wm)c6)3J(y83qI=ra0H7bpib-GGrv=zNp1HGmK zg)(Y#ZXPNKH%loMTu~{3R|IQrK5DqEzN`!_RC@JXzq-5>HMGSRJppJopw3o+4ZpYePBJp(!H|u9O@s=2qZUXEz?KYQcdTE9MEwi&Rn*rDd4a=rC=zAWVxKu+?Ic zz?tdo$9OP|>Ha}Xiqccs=#62Bpc|#7oubNHB&<%(FXAlo_{1bm(XQS&co0_)9>4)Y z^4i!OZcgvSdBX7g-u*beXBo@VlVflYQ-ed?@35jI$7rD^xJ;gXG1E`z`Ft4j`7zw? zg3D-vpLV_`BLfXtIcVZ{QB_pPU&=z17Z;$nFU;%d7yR!NZ0srofBEx|N%#-{AcIeF zar9H?qtxKfekLY4644K7R@YIti{4%$`n7rt3E7AGp6pxp6aBur?597KYM4l<^U=%J z;@^0vml!XpL(A7-EcHTP?4nq6Fk4_KZAGgs0IR8$BF2v8T}KcO^wzS>~tN$aMK=iR!zfgnnGP!KTPF)w2pnq)AXXiG>9BsH`2>GmG*ksdfHok z0GX9ketIuK=t92HOE9{TNAP8qJCJAePzkmpRm-e!C_OZ!OfMzhO6XaTU1md8iItXC zL+BZhoK=a~lv2bHe6cd@RDuex|$laHrvru--wcoY*;h1;i2l=tF6X#ZYkz$RvZj?aJ9b=vsG1?CL|}@+R)>4 zAnfkIcy~A9GoZj}Ngy>giLpT{GFsfx(Gg5g?afV%V>iJi3h(ID6!s1c;n?&vuC1)# z#^FOaF+Pd&{bM-SGlHAT2e|woF0UNGsoi_9m+Q@m!XxZP`uZ@);|+$x81L=HSSW-c zj~C+tJ!c18dJ~!oOJU*XXei-lrlmI()9RO%qP(;SVJbahMX9h5L+!N>4}L*iH${GY8pzjvQU|mL-6IpT2O#`-ta9I4hbw7Wc^f0uC=OVmF~D!0RXA26(K#a7Ys zE?~NSKN5fc_kVv2QnHRZuO8zkKaoZ?kx*f)Ml>`fZinGo{M(L?f4extKl{hfb3>Q! zWVXRr(gsI;0FBlT)SBBdKfND=10z^>s^=0O7=H*)&OU)>m!8IxyPn2_JDk(q69?1-fRG8e5TBT#fVsGg1h>g!Br;$&gbr)?QM+ zQiHoAiE1w{mC$4ENL~eBA~aMl9bVdKsf18c553gnYPBdWEI@8%1~TONw^V$&%u<4? zGA|dF0;)Y)I{}`Hmbin;P^O(06M7qM=&Y%NhicDnt%lR6M-$ahRbCFEnFd=?4!UWD z!(}BHwOMd!Vh9h?u0AwAfujW8VxSWfT>*>`Ktnw}7?H{Z+SvY~0SpfeQIQQQ&=Wg4 zN((wWG>DaZ3Ai4DubZ$7b#@}`XA*clUN43Ty?!b_uRH_9%+Evkc{pEhFrl)z1lp2P6!W|2 z3JUZhuv7EaHMSZmL8Uf8N!m>AiG@v&*F zJ3DY456?c1=XO1V7nh&IbNimfGY6i-Ge=*-BL|)iwiT z-89MxzS8PZ6ju+Sz|xPx>ON*K@=QS#nu91ZccH}6r9iLP5>V7$mfl4udXxsrsf7(l zFBY|DRg_+dp@kN=MzOb&@fj6}PcI|*N@#Hl2){gL4z|UoE704vs`fT*jY9#Ii$G6r zwm@fyJPJ}C7$g-M73HPK&(1<-Y6>z^l98R7iu{apl;mVVOBHC&&qH%Lx2Gy}5q=)3 zy@6UQ25W0LtwC?K1)~i%4AoiDUQ~dl^fdTskwf`~N`Kvb);b()YsEvfr;m<}<9Qc=Bt*oS9AlK0WuhWTus{?_K zcDS3H(N=3iBhS6TQVnCd1{FocC@1&|seHR>jpdTM9eV0zCZU2ug^Gj$iSOStjux|KoqeXFvN{6c!dL;8WxCpZ~n_#JU0=Th@{CQ&~^;anC*X;4`23 zjPfA%AN=44%KCE5|MuVh8yXrKlyxNUyz@?Fde>cdDeE6Pbm$i7R6s|c>stKVj(-Vy z`a&1oOS@90XSewXJvRn>=P)Asn5(iG`sH=?ftQpMsmu_y z7oS#2@D(x(n0dFLrz*XTTVj!sQ-BsHt!JH0>6xLCiVP#6XQ+gRb~Gz99Vv+kO6E}Y zWfOV@*_kMz;?w2kpq3W5iAtxHcGX3j*G*d+;CACSD~7Ah=r`!F)ZB>0hI-8EjW}$m z!->jT+}rHJ6?;4G4FvJn$S58o^e$7;Ep&Hbv^R)Js=5iPkb$0F^fQO4)COpOM>rkg zbWmz^hexn9JdV9%lUS4*-L^J#Rn;O;TL-_(f$knZZL1Gcy4y$|dzrr5dV*lu&^pDlIlP7HMf|3WQFaIH4>TAPTfLZQ6u8?zjW;#Jb=A z{oluL{Kjt}m&cTKRp_Mxm|kgSM_0S(-0LN7TE+`T52+q65|yBW)Xb3Sy?DxmgVF?FEzXQ1+=V1ipA}u zl?|Kpm};uW6u~z@08X0>IAm|ex%w7-wQCX|bj{$Mo*6vl=)|i-<9LQj?WxHbTo@U~ zK){7i*oT4sZYsI~3{t%f_ezcGFx49spA0(C+fO^zLu*Ex%XOsob*Ke^W=jA7|MW>j zK~%pN?e#W{@)#pFJKQ{eFyKaaryIke04;3@Rv$EGa5MR#r9w1ZZ@qu!~NK z=yEc3{>qgrit5_EdpC0Uy~-mw1x^xTSGRB9uBb6tCoeBg$@S~kE1;6cbNp@Wx2psJvW)E z^K0>MJ3dzJoC@@ouM&D*1$x>%Dbs6*q1cJWYEgL|s4;XP+&PKEhfZQ$xONUR&iy#i ze-0PMF5&9b6+E=(K|FQzDLi%ZSv+#=X7_J<69EazwV`b(##(GYnCwv;M?JKCE z+OyE|s*2CtGKcE61ynocU}>2~b?X%08-~7q5CsMwa%pc1&72Z?Sqk(Vw7528meiq` z_BI-NiCH=%W$F~@#ibQ9BhcGSTe~AZ<5!>;lb8mxJhPU{Mn+rF5l{lX3Y|vjol#s| zL`9KI}Q*qN4;+B zBP>o)H9g`E;*0GAc*i}87n=PzT&TrUp*}oJh&?hkfnx;MP){d<{as4)kAYskCh(*T zZ=`1cL*ZV|^Y!2$`Ua`?m;{vDoFn3Z~lh+wkj%5Z0+I0hZSY__~VZ&@Bi-a{;pzI zT`remfdzJAfu--TJcL~opD4wa7XC1%q$o;I_ASTy=%bGm^%thCm)GyV|30d!s_@)% z&nf$s_2gVE7K;K%qVAD)fw{o?Fn9)EBj(-r|AP7Xt1Nq+=jFQ3sUk-NXUplFEOJ6JCbQ_3B9CDX~L4NC_SYH zR|X*17Oyl?)*3Bnb-7`zspfp7J6Z)*Tctry=oM4#WmD-TA|ovwx!HNh&dj0B%|af* zS4ikdg@?98x}}xEsw+olV;y>#dkL5$ejoPO>T#~agPZOyJl;+_>kQypO9u|s)Z#dA z)F)=9aixC%hx&TZPn8$!4^qi>6MB8Lu~K<4jFEonhBk=MU@rnxfjyi`P3u@7gy|qv zUuPEq*9kXmsJqn;Uq>5yq--x3RNC3~dE5&0x_pEj=RN$qUZ#uI-s5z^+fH@Y+76ea zLz$Olt<5cH1$O;uEyKuszb#LCvx)F?m{=xbzyHyZSji&3x?QtGNI43%IoMI1WtT zz{1#d1bxS7ot9xE^vrcLsA`%=m3@Jhb{3}Q8QA^%;2%5zW5WpawgKpDy|lSuWNX`$ z_IvpRU=EdDuBL_4MikNBmYJPMFRVgxt}z0=tV)7Ti+7)W^-%5kaoXL9%ibWa2f{cir@tRvLxkQC;W)zO-L$~|E{yww=;twd z0|9gfyzn!_gzYe2PZNft{A?j-8@k+->IAXREvl}As?G_g-GTO&Hn<2pXOjc2mUeiZ ze4mQ1Rct?jS64%oM|EVDE_wu>^iwX^YLQP%pGRw-OYmp&2f#R#XVUqzo=OIayiu+H0>VHM#;Wzdy2n6&6z4D?doGjZ|>hY&Hd)Z@>Mv zVs~G8{5hC@x&Mpw#pVXhrGCGTFa+esZoVtYq2kP>Tka z3O+T|&l$Z<^!gI=T3vSgzSrW{75tmZczXO3@0mY~^4wOGW;Qb^p0ZjLrB|bOqLxgl zGkY*Gyo@KGcpmFY@@WT)JkV1Pj<|Q@>hi;Q{`_lr;Q~Q-@;N+l;blB?{Y^YZ06t6* zUOhmyxAYJehAv?+c#8BqjdtfgSgcd1ZkR`neGzre-LN#zak>+2ffX2RBQVh3YOUQU zuj!)VYiAO8TBoA)atOU#Z8M5Z4rr=f$S#%23?q_rBDJ{5Ih6!p8RFB6sP-bz+mVZ3LN5{9;*yb-Ukr!KOPgCq=;>iFo2h0vCG_M5%uLTfCRJVrp_fnK6=dX~AS)O7 zIk`&tQ%-&^3Q7x6rYnO+Uyg!;EP}5XjkL%^&COWp4C0tSgd1a%I5#wa0|5_?_Yiut zvU|H+IMN@&nc+b!QiYB7bP;$V^b%-&J-rI-qUq*wwo$%Xk-xmJyd*D zcai~Lq^wR-fY)O@chJZ)PCwx?L1YTCdOUO2| z-tFrO{61;9y6pDlYw`bY@OzZ*lDrgm&7MMOHW`vGod=pwp4|d%z8!U@4y9pooyCon z<+FJ8l{c`iyu1?zT?oZlb~vjCabfpEcOGIC!OQ4$AA+@Z2GzD%R5vZ4$+Zvl?Ym)Wm_nUn9@Wj0sJ4%x z*yyLaa}jP{Dv+32in#OwrLA6! zwAYgwT&4=W#I!8fY3HmBO@x*KMvFzUw$fhD$k#cUSt!iSLtaLv(j~1pU51%TMGjSi zRBGfCdWD2uQAG(#3BJ6-Tq?d&n9EDyG+Qv@aAMBs!KvXfoERU%>B&JH@9n}Kw*xDE z0UR3Y#>wFU?56sg><(hECxkwMp3-(qDlPiZ)6;`+PZ;6eurh?xKz}cKLqT*Cc-`F2 zODpRm(0ncz0)$!@Rb1HFjy?jfyS*JqapChN=s0XpNHI>Yz27f>FMxzJgXPgKOeU% zo9}$*JNPgE<-h#OO)a)o+Utor`>VhDE5(}1YZVLxrThW%$D% z{!oFafJ+6FtSfMpvcrsw3`|Z=-s;UEP!iSo&2N75)@wE7cxof(XlP5QujM$B60)x3 z?aEIL_47w>EBlWA{GSy6()(-iFGu7%l>8WaWjS_~Gb?ghAdQoC1p>Wxn01Z(;qS!N zi;v)`C!fW-GHn;iD>_j`58P}SzzqWJxr=Y%;iE6$>GN;k$;)rzL4xnxzK3xCiD&T` zq4(&q7jR+Elh`?Q857-S5Of_tbHf7aMCmomp|)iS4&MRPx9ubVccHeGKqT~Z)*gb- zhZ3`&*|`e7HWV0~oNq;m$q9WO->1z@EwCsySD=@aEjG7;YOjPAR~jj26MESMUZfUR zpeOcrM?wnfXyuxyex$t0$eUamDa#FF=Ia6~x}uys6j1S1WM`o)Jr#w?i6~4>Mo~sO zigU71T$qOvf=^qiLAeYfp#}0JYMVy_;6u#5SOXab~f0s;A+82 ze-LNJMzE9iZH~)_y9qV`yg)<0+Ajf}fUmck>Mk5YI2=T%y9-@iex;3FFYRQ%uM_>j zkODoI$4zV60YB5()=K5lj1ET&Jgs&FcplvZq2EDu*Vsr!*PuMQvDwyuX3jS;>uc+7 zrHu-&#$-`OjET$RYGIk&&K8+nAy}4pP0OekE>OsNVGd zKla`NPO|el^KQ*dGgVg?Gj>&17d108<1jO7NYZG;Xj!(*6vvTc#}HG%F+1znjxjL@ zyoS&w+2Ce3$!@YqOwac`xB8As?U9q%`4VS;b4Ev(p{now*K=Mt?{iu4b`uO+VIwmk zW16{-U}&1TPUqCTU?g>E!fq?bwgO4u(lMqL@2hQFKZ~{(?<3PbI)~O5gKO*k%e1|C zA9H!@`djwHS>bP^@38>AJ1#tqTzuN-e%6+lHM}D=jgj7YKKot@-*%}@q9e}(3|n* z$KQq5Jn%L=c^BWIfYWZ&ZT~vGL(K~*EKs$hz zu60B4HL1H=uAubd?XwJbC%Jx>>TrW_)L6U2h-F6*NcI}xttXNqZ4giy_jEGL2ElhTE6H?Gu6SDy^HhdL^GmPcM+EhDX$YbzYFbQKiVTf z|b8R{)J^6SYE?{ z^_^H|7&u22H&0b3pPpuTIx&Xnsd3CsjAD6g6dO~MSecr_^oW$)AjXG>jX`}>W~Zho zgLKuR4)phb;R$+Y5gnRYV(nfFkLi==(Q4b?a4x|t(vQJXDH+bmoPpwkF&?` z!s}l1W+UP%$#(BSyk!Z^%}eOX?Z%X37X}Bl!s=J7&-`z6;%B8%VZK8r{;OnIZU_x)5mUAo$cn zBT2=VgjK1g96_UOPYFe98XN?lg8)xsh45(a>_)z|&1fgqN$7RvIiG7q*zH5i?LyMk zfL6Z^eK9`5J2i@* zICvOuJbeattS#W=`~)t|&EoF04XVDQIJZ}!>u&6#tEt7#ojb9yvCgT^v5egW+_B|# z9G+XjE{5eBi_2J~N}ZaWLGRoI1{bC=LuEHNG=fj^o{1nq*oXZdc-;=T zoHi^h&hl|m@7to6)qKV_T?q$dg2c_BRJ_hqV`g|-yiMhSdVu97fJ~U0;iAmkM`prh zE6mJgZ7;sB;_^D}XP&?KK4d2F%;RO6n&&sy6~o-L;%jW(rekl}{~yNh^?UrA@aw+o z;uGj>Q7;Vn0(vq_7-(ChFj&F-)Ee%*^bqd9=W&dWj}dyEdysD5g_iUh+FCX+-**xR zCa&P}`XhMFJwJ;FPrd|?o_hrzI`v{adEu3K&9&F#wb!Wz?|2$7rYgMW^viMi*o(1i z`5uP27qGVe5M~zc#uAm@{QA9^UA@~-dR-$&(K&hq>7Jd0+bUXmHjwC4+1>(@T?~(VPK2XRz65uh*b3*BvgTkGB+{=L^TMx^oX- z`~gCbuc>=F3nLsWUMVg>*3i=V0$|LZcjz-K=!G0g~WLn(x?-jk+CWKz~s#oEYpZuh; zPL&f{e%ssLW`wCGFicZTtH$Xq?xv<%T=l7a&1+tRm%Z#|hQIc;uQhsR=v-=`{0D#V z2l(5+{ad{0O>Z)qyJ+}JO|?zq|LR!ntEnnHbls1A>|@3{l_$#76h_;B#JtYB$&19x3{5Qh$)L|0EIN_qy4 zBiptY>FiF1K>IN_d0jmiB-)18F}{2ZS;pdt)-hzdr_q!ff-l}q__Y&wDY!x@ zG`I`N2jn%4N+EAY4MSY@%$Q@SI6OXue49dC)!eqDy{*j%bDI-M#0b4~z=N#MiL}RI zc$P}1BTB`WYQ_+i-WZ{`J~fTQgw@GIhjHZKLF{5^y0pH6xv4QMcIC0VBZrd%y|}>8 zbECTry9vJ23=OXxI*4n>jxqE6;!MJOs;5&J5YDhf4IFIFJf^U&NGl%u@2`me6 zRC7~fLl_#P%A6SDuhayl1iU_~y1`-e_VyVKi`6i^}cXxLi?bD=cjsC_AW%CSU zRXdxbV$JgNsJBCkpG}RlQv`cHl|m{SF}@2wLt3xPY4lccak{|pTEN?ayc@tXHxw%x zRamIctJ>bl1RV9cP@6gRj*vPs6^{U;!O2zYs-`9~wbj#ontg6{uHxERovW&<$^eq! zr6w!I`{+1btN55pmo6F5Gn=s}34u&7)v@Z@_Q4N+&{!wa{VBPD8b?=FR^k(%_=Ew$ z_rCYNMh&ugJ>83@x}Let<#OS7e&=_L`zyW=bKX2gUQ<)EdBt1y!yBI^Jx=O<^E6kD=BD4CWQyjxxNR$Dw`42)yGMpO{9jy%i;WgQwBjc@UY_J;)J!J^4Kt z?>LA%_C61y!S zr?Jj(_U^l0kIMw%o&%3!V*WBl=PzP%rtm)w51l_LW7#Zxz{rorty1 z5rWIyXE!F-P9Q(9hD;1s5w^*sP3ZGBuwAA!N@KC!%xnQS$uF2+RG*R8f znl$=b3@rqoy5Q-%5b!)sCsm%^XdgE>H&@KIXUtr=x#0&v?+ah}g7Jb2M(TN?c5`MZ zE0gNcHWPYk$EW>O2K)HqkK?h&9y7X}X}hL6Uh7n|`!|2{H-@^>e$RjY^Ktd+Rin+G z+5~Ffr=EHWFMjchjeflXtcn(NZZ)6L^`*Q_2j~nzwgy_4Ys_JN9`>y?F59w_(r0$1pf?)~LCiTD=4P)2GqF@V0&Q5X0J4hO{ea>R96ZGGc9Nw>OJ) z-wN_WRA~!`80M~{e{v_{RC%#XKa#BqbK4P2v?JWyVf4(fd6hyw2#dpSq?@X@d5l`z z)~-Hdd|5$HU9D81A>heVR5(GO7h$)Z;Bzy?3!}9uy6G`=G5i}OICe9vJ-)FEXZIh( z?F?1VQ0*y2(=6dMs_p44&b4>pj^Qy}qf%4S&|||RMti>FON%(Xx{ST_HLAH~PUq>< z3~9%y@J8llFiPN!PN?>E1Pddh1l};_2l_D9-HxHIR!lIAo#^kuR9_#43GIG{==}s= zhdu`towCT}8M;c%33R5S%L;Za1YL6qE&QzNi6YReGk=t@3sc>xJ)N59g!n!9-7dHZ zI=Pds_HVQN>_u#Q#>|zQ8-5V=;{DJCf!f$!~af4i|wfO-1qrwtegYEo10eCIoj zE@}s44tVF40q2^I4ojpcp9Buy~t7&WINhW(mQ$@ z?E?%ad-kKXdmqNfPhfIx_Zi{rNrtbx z7w^FO+!f4^pU33L8LUw0ojvz*>^t~8hJp8DVEhchbrz!wm(V+T60Jj2lzn>%yA?y} zwT@_$8g&ccSdes&?V$B&GI(8Cm>QKK4Ju|V2+@hpaLA3pT+=T*FQah5h}PrD!XAS$_avPnm*CpiIL7$ z3=?`Ioo(n#x1dL3dxWFtXiA_vn?pWFu(eXn(3z?(BULBZC7T(-CXG-&DMd%% zN!!r!qSMMWLe9ml`MwzVCnk`;FtI zj1;a){m9<@<~JMr>u;p4W>R&^t|FH@l5$fwHxqcK>0DBON@Sq!Zn9zs2}%N~?oH5> zDJg=KoX)$-A2MxIx$R&6)8oYD_ItPZQnShd=ybW4Sm~H>c)zQ4DH9PbyLH zFD}Qs1-&2DRP<8#4%Ifp)mN9fOv>oW=JJvIf&UOMX$4E#;KYYhZ z+6RxKeds7UM~)MIhtNUz4fGwxNH>+u+?_Z=$Q@j`j`hjQSQ@{GRj%8;b}tU@ydTrU zr!d-o42v_Dv1^0NRESfYpPapdL4t5->H-F)&Y@q42c}M=ZTOG@z1-klBdk@IG(j)h zzky`uBI2zxXd7C`#?d=4yYm!U`{$AFnnbF76p@x*#L~S8B|D7p)*ESoJEU6NFdE$< zhPEEmT3rV8{E;T?J$MAYgwPG8#}6!*DpEt-fX4-&)5e#+k@HTY*+yF?g^}KFLVOf! zvokobrjf&T8Q?p*wiCN&=CL|DfrZW<9PJyzsop`H=kk@gMO>U;!l98d9GsZK>5ZL6 zrsBPX-`c_g=4WOw!H{Wqd<^}g!{{3sLf_D!5&HBoyc#BG$9lRk+C`Puna6Mk)tj2o zcsyuzdC_7em~9R!Q$L#GQ7ZB*QVdz8TBYO+Cs2|EV=~@k^dpuxMb*Vl4UZ#6?*_q6 zWnt>qs&Lj#*cl4X{*20#3wr+&`$5oC!N0;+HM`L^rFxR;(>*H$6a)m4C!c)MXrA)N zfBeS=6m<>(N1!n0wV%$V?P|a*Gr^{NP#K@@Tg`N|ED)K>P*bx^Pdz$h3U38HDLyGY zm5FM*5|XM+Pg!0BWGOw_`Sa(EdwJjc-e)YEW?oBH3|e!YK9`wTL1maGu+8na?1!@g z@(LAg#v>@`-SG%|@?#VRgj{-*K8()nAlkFN$Yt6Y5veaeBc#p_LT~s8!_H&qo;XQm zbrfyA`_VzgH!^sfA?qnj(#QHv5C+FFHgtwBFGJbM3mED@iV*^Dbm$~T=(B`f-@plU z^&Ugt@M&~2+#Q*@#PIe4dZ-@zr_T_2htblrn+oqB!`oE@e4XP5(WDw&LNC>|i2T4R z)(&04-V@ghrPn^VfLzZMqJ&-|H-LCX;A@93nkM*CMp99mCwv2XYPzDH8FNdkMk8f4 zT~V2y%D?0);BRV*!%OI?iH6VXLcs4qm};+?;cahcJEjS}g{es+>E`aGWvae4>|0pE z+SnvkhsUwfH;A1*eOSunvEJ2*sw9X}HPuJb$5@(VmDR<#HfpwIXk|qovM@Sgi$Nn+<-S8%?Pu zG$rX&aO$txL>Jr)$b}<_g&4jDLx==|2>Am>f(0L8r!p@UxGU*`ptn`wDHokw;qAY~ zeh~Cj9;T2|=7c|v!3PuWhrN}g|iU+kl)cO9) zfB7%pr>+D--M;W_T+zq4$xGe8dQu zO)Cz$Wdh&Ub%LG%{)RWaVROwb`%z7>`wqUn2|i;5?z!_3bmvFWLD;o551=j8hqg2Y zPqquKdBRR(=XA6qPcP{jJ7%PQ>Yg}3HFgR^gwy!!c?=8^UcE;!N`OsJwT;n7#xJ0E zW9fdPWAvxflf<+%vFA%Ss9eQ~Aq&hgx5kRLpR=B{1H^fR38 z+llPJE(|Z8ruti_+FPf}+lj8xU0B(F5xc4OddJt1>7GKSYl5)rMhv08W~ja1jfxC)M-%kgTG33kp;}6}3h+H{`29X3$DUMFM|-Q0 zb8c>W3QKddSYQ~sNB}PI*MFA4S{fRm4`aHw2P18H%rg|;IWvtT>uWg3kob@)E(ncf z)y9rb8sYH#@EE43497ZqG2GgPfwn^1LDeX$J6H!p)=qggjZ8d(CIT<*bRcK5qnDqh z%kD<9-im;w9(G$jd|@BL(XbJ&M(AMyZWD50LN28K#U8KWez#Pe%Sf~!*s1JGu4O4a ztuqy#fTyX{*}ueo5cJIORt5NK${|ye6or$5fWlF!Ey3@(%Ee^ndcjC#W2Rb@nab&F zU;CPI?+R}PxndP42&u2Hl%i@@wN4<_vfyW$ny*L&3#`iCqB1rJz``)>7Qn(x^Mx*GiNa{PL(!shT-8+42_=Q{CV^e zetlyMQztn;b_Ro^RCB$DF+}ibx}Cr~f?UUbv=M%ji+4ihg}qdHBlDMd{NeRJz6JV`{1% zsh~(WWQ-4?v2q6n`Y@?VhDjye9Ks}hVzA!`L1*}48Rc)>XPmxsX%TzZRTYG( zsI8qNAg2ZfF-b^H^bTN@;qXAd1HI`ydQv%bwd9ab7s}w&d?ZEXnQv)EN2Ueo7(r%l zL{mcpI$a*jHl;8Yj3HTPfvt20YHQ2k3A*7^4+|< zP4$`SMmW0(IaA@u1w`e$`81U6t#ZWo9y+SI{#nGE5~_d~as$t(u<5q#$*Dwd*rYFjJZ; z!C3n$6qj1nwUqBq`>VWB_o91J|77zXHC1R_+^$b1;7aK!R90>CE&I2!g4W|-ncuZB z!Ig{G`407LdN(@q1RsHy&$l9-5&fWsSw%^iZ(5irKVt+hB1OQel*y^fxKqe4UAQ>~}eUOJn(0X(Uo zFcp$iQkDv^C7Cq9*VDt$v%iO6SKGH7I@+=r=$c4Ftk zA{AkwYuIp)`nt9oo%_|_wLP2XA0kr#U8=>XXfVp(=*mzW730Y)LLl$!t{6T}1xh04mdVwx0?+VJT|M{s%3i(X`` zSEFP>Qm*wnm!<-#xlUz`DsvN5iq)LLTiv^AU5mp{neI{0lTuSfhL+9j&H|AMD4kCg z4_YThSzM+kg(v9hK1^84bq;f#zBjYH(FCrhQm6vxtp>^emd3xCdSZxn`t#ART)Ydp zG~d-6)m>f+ud~p6t1W|cJKyV$B;xr9d`UM-`o<1X!5zi`!8bB<8sk)Z)617JF@F)m z3`>WnFA#WlV0z_R0eZ8S7=jXJ1YQ3a)gR~k3BF+}Ks8a(xTe}M2~s7vsf(2M1#k?&ujGCYK-6+w6>T6<@Z=^Q8UM$z0lgc#%WNOK2* zF)F=q3*3G}&qL_B!f12`(V$fFR)(yhCWg0r2<9=gb&>w<3~dP_xte*XHdD|O93s8| zngU@o8wm-bXk)0E&u5X%r_qv2p*fvEi;@szQs|>Z9--o!A05NO*f^Far?ALSc7m#I zgs>YRSk>LDHIwH48KVh_3hxtyX1E@az=R^U!+(lL=U1V5~2qY^bk~*Is|HK z(Nt?ey4H$hWgWcROVO}>8!UB|2J{>ZfenBWS^}S!%1+QT!RKOVY=TeFGb<}h=ruH2 zVYM~FzPYT*4OMoijIcQM({ovI*_Ao32F7M6D^pXF;$@i>m0UeGREDS8Qkl-7>?z99 zA~mRE)w@CSN;fYR^(TMwC&s=4mkC^RUguIfITK=NwL9-YXL}Di=qkTUF>1H2G)8_77XdOPtP*nK)K*7jd~t$Dh4}%(BIflY z8;K(wBJ_eGqzR;SDnU>YPV{6d&d@i86ul#xGAbRW1{k{b^f7epMt`mq?ae8)Qo*$n za2dia5sMSoDGys~hvhR~8i0On8`P?qg16CJc(#$%>CH21c=}Gp}!& zc@Ev9Ij?)Y*?kn>^A9rf+9piRV@(s-w%)6`ulCz|?OXQ48vj;mrzgJwy}Qw&^81bs zhOv3H^|z5LNd&SUG$!g$-&6%_x(=Q$2TJDFsn8}5Q_&sA=*&s_X-q9%BKap%35U%#7h6!cD`ZRj9+Cr@Bv z^)hlp2ap}ui){Zca74)d~GK}%nghkL(Bjp&wtU#j; z%^p9}z93TG0Ad73b0mx;0VUYAq?*vuOeiK|Xm3fPzcq(Gs<|$Ps~rqi^9)m4s1Vap z?n?!!o(>vkH%tW=qWkGSx`&{1%Y%MHsrh{#W51~K!YS0YGi+5e8kZf`>MGbOD&a1# zg0Hd${@QvfKnonU2G|@@Xa(R2Vse$wIjHWOo33zHz!MDJgqvA`A$QQd497h5fGR@> zW%bxl?+vy0JAC-?=EJqs1P0TedHH1mzzyuRDO33vnyAO z5O;R%P6K@MndLhOG={7M+wf)>J4(Pw2}<4dk1^yO6*Mnke(f6O*6zX}fj2UD&PX&c zvTz(&3PidMQPPmWqavBPhF?lju&TkWW$#QpL$zR3=AA8la?N ziV~y|f=pe!LIN69oGKj5CL>OIqrK5+HlzJQT<=wxn$=3xrpg2=IRem0n0Xr-;phE? zY)*unZh}nZafPtdwyE^w3Q-lV7nkwrngQO6l7kvGy}CC-Im&yEFpLew;)o`wN~!MD z*jY7P>dL03CEss)xKw~lNbsK-u9+`zF>o|rT$^cvLohI{7<^lyVw%>Q`?4MTOP9jromUl~efHqK#~&>LSkiJ6rPRCJf9 z(yn4@_uW|7c?~nGcQCvq1nCoVw_{X2EU5AXy#c~cl@&e1RCKDPojOP7_8FD$oubk^ zhT*w0gx+EFPoKop#uc=W9Y#yR2FA+EsI7Gm(nE6j7pMZ(g;`8_ttPK zIaT7Mb6K?Ha!6%!NK^S5A^&FMZS~T?3Ns;B3`&`fGl5$SLi0M>-#mBmwM{c2SbTl0E52`SFTPjJ7vG0@eRC?)`EK@^ ziq9*%1-ySN)Az_95yDU3`sI7Bp{YF$Zzcq5%7MC!1+{IpsOhOj)j$Qxhe}a4Qi}44 z3Y1K)pTXD?fwXd#kh{dt^eW~Ez198qVP4Q%yMy4nN(Fcklk>M@dj2A&<}PBK3a_B@ z&Ju8E82Vnq%qAE|rq5FKsjToQhGtKrkLu2FhR?a7{l>uGg<9KHGy**>kgoYo_Dy>>h+@|96<+RqG>0+ zI}|||rzxtqrhpd-hN^Lf$q|OO1xrIQ>5M-(_QNLTd z`jM*EO7Jq1ENE(EU1`l5u5N6tot?<)2f+9~3Qc-c)~REU9z9xkrVJ}QTmoJ(^kn7> zytT$uR{U~HiEV|K%!Gud#oGm;Vqoc9CU}a^Ywn}<=6o>-bzO6RZQlwd^Sq{QT`o4Q z-+G@W(6*kp80zM6rahNpy=DJ8(_cxhZ&rVWGk08sCly9R!inmn1@#?PRCU*)tgjq9 zhIU}v=ysHhmK1z;H`U#)+cCfU5?1!zNoQzEMYeYEe#~xA<*i)CG<{;>0;U$Y-|`jA zE?y?|ZZ|3`MyAgZdJJ``?k46gQ0bk);P?p)sj)Mm*Qcg1RC#?<$I&rz5SjknNcV0S zl^4xQIM6naCWf~HULY|9U#u72s4}XwaVl?xCzORNK-dLZ4B$BgI&a)?JAqfI#314A zel*y;bPrtK5EfT9@PZe=6g&6qW2oD;`Esj5qZOUq?Pyi)EW_4jhMzgN50j}JhNDRW zFlH#fyoYKr5<`Eo8GY&l8w?@Ous6fdG3>AtXoQuAuyVT)AiP2hF_o1$Ky?-+^g`TE z6(vDVgC2&tT&IDFG%A}aGQtd>n+UXIFpN}KP>UOxP{ItIBbgMxmn_mMryz)ORAsI5 zTpoFWjq3$(wK6wanhWGyC(j#@E4b2X31$Y+Hfyhq!d@;LKLi3_tF{YzoTu_Te*8q? ziMnB0csBIRm%kWJCIn1yY=wdBX6nqetzatNZeCC4E(VWj#p`t3&5kuq+lt|)?OT_( zp09Yj?$KN>zMrkv*}806@wslVfN6+;bP^cil<9%LsGl)}{7T znQ;a4RDC-SJYaxubpAZX=+n!WFvs-^RE5)v7cnw<5|eZ1Fg8OK$?$f0_njD;AqZzr zAWe0bXIPu=T0(}NYM(<)`YL6GA!ecn=U>Ixn6kuob zL{M+{8Oa9hUP8~M(b3$fYj83=bRyE!jFpYu_?b8VEcP8x#udHng>haBSy!ln2#7W* zBm%94N-9nG@tL0ma49up<wGwu1t=yLHKxccm;hH~s zWD|PNdN;LDc+AWV#eh;@TZPvOy;XLp=`H_{b2VI7v4?t@$Tv1N{-05qdiwO_>1XY` z-}^TH3IBjS_Z>iWFV$U-4W(UHZ0ok-wmuuS4cc(qkd4rjTTn90@OJ0nYuI`CZtOkr z01lpcl+%X{@GTHj3%l>a@}9dfyLJUr%M5FmZZ~{&?{!QpQu(c1!X%fC_IfeV^bvya3T9Srr{X(=!Re!D?cYHAzy|WYD}1RJkjzgZqH?@=FWljF z!YvOC0;Dijp!0+a?hY10TDv<&z(oq6^Jst|rIsT6jNTX?0?%bYuQ`*)?U%3Ojc?TxNp)p?-@+nP|TfBE^%FhT+LCpl4!bG_$28!KWpH3?zm2%_#5I%CLG9x#hU?$iRdUAE0$+vUg zwyZHwkvuCEn9orO=Fvh=wYM6;lcx!Em0{)xRJq`0syRdL^1Cpu#cgeQZp(6?9JdpA zc}|ZVJ^38alRpDJnfW4Vs-6+*YNqydYR`7d|6^V41J%AzZ4Nb+ZB@bl8BI^0pjY_G z{Vt*R_t^E&UQ`TQP(i?z4%ksb;Fa`q+TXZ2brq(AH7Gf7@;)3mc|Q)Fc?d_(KaK;Z z9>&I@d$4|xe&AmEeOTCeC*}-zUBdLzMFV;&_gmV14GRo`1-!}m3)s2uKJ46kA64FY zEKwaw?Tyc#ArwzyaO$W5y^f*X$aXJNYZz?>7w4uVNXYOzo&Oi!wL5}Lq z=}$1MO&Vo*HdmBz3lMY#g=eIdYV;V;vpN;tdSG+<(AhhHCtmm>yzCXP!uZ6L@zM)+ z8HR@Hj;5MQ72In~TqEPKLc$DTl4?Ol03+y{sa)hO1e{#$_p-?(^63_2H=B19m!k=4 zW?5SCe6brfq;xaui>|H2nJRnJF*&NZEImWO<&7)Ftk@HDtfXxh2~Tmsg?}VnH-KD zqtd&nnc#Z{dS(b$9PZr;-~QuGe-~4Uo71h8D*stcPuGo~UOsD5dOIK3hq6H{q31x^ zkPF*K+*EZAlnfDi@?k=6#7VD5$*GHv;Q0B6aQMuFICT0!0`NiXId(sGAEh68fc~HX zx#gWyee{L3t61NA4_2rE1;ROoxpONFdzY_Zag`zQ@>NXEp2x)OIgC!7!p!pR7??PU z-qC~T9NtUcV<^3rj(Nlx)&>&;^nL{5-G#6=oP#f#C+w0`a&cqSGaJ>NDl6=qw>mW5 ztq(O;FQKQlc_CPAe(n=0fY0uQKNQEz!ZKd?l9%D~)jQGI)nh0;qg;z2t;)bOeuUgC z6EiXp3ryUvP0EijXd}#&5Ww^fLP_4%`bse$yV5 z%!KG%H^b#i*Pf4aS09JqckKMbICA!3!tdb%_>RyI-;cGucN28iu_5?v+==x)*RgZ| zeORQLTOkN{A9w(}sPLB8?!w&SWlYaq!16i)xq8JoZg~0xdPWYQwU?po;4ZZGts&hx zk3>#M1cnh!4I-TECF~g9232X%VgS!Z&^1!oH98r_5_S!CDm((NzR}NVzyP6*p{};6 z8(N*!13SarRHhvVkDkEY_uh}Cl{Eu)tx{5iN@urPZ4*S@okn?Bh6*Vwm81#)L5KSZ zR05Qyh6>zVX9CQ~RxF4al?Kmtvxd}ktuvIJQI5Fj3a@!>!v#6IQN~DD*_~8dYgggk zQp%l2sHHMgWi2Tr6AYYsH^ODJ!fvUBqrMjQx*FK(s$s3I zf~BUy=(}4}TaFq_C93Ny`JL6E&QeXUA^hr$_JbND<5_kCc$>b(%vw@Y;pqi4mv8xh zj7#0=@32+f{Xg}iq{odc|0m9@-g^Yw`W@IYk({UPh#6z z65ChfD7o|g7vjqGCvoxGlQ@6J^Ks(BBZlJJcl>_rF`#!JHuhh~o`d&c-{A+b#*lVx z_uT~D{e<8>*n8+f96t6K4jy?FyZ7CX)s1Te-zAJpoW#)haZE2>z#x@gAC+F4(n)o% zAd#Oj%JQ1BV~Dj35qN!6dY$lw^Kkf5RCi4U*wIa>X;AMvsyj^uz6N`c>%*KX(Ll&h zd<}L2Aliby!ExTxDeT^R0NuU)2K1!l1fZ^-ZUb)lu8u+_0D&XV(Qnj%5>`@VCftl= zsWO2~+vz6c1X43?Rk3UR4L6h?&z~=NR!Wk}Et6-cNrje7DV=*9(P#w0a1h>LfFZq? z;CI8~a-iNpWhS@TP*-n3ZFManR*%NoI#}!KV5zM|eGS1?O^8)hpuVac4RmXfH&mCw zLa(bSMQv>B-F}YICyHX+KCg6jyNEvi~?Ue|-Mm zGkqpLf12=7Q)|i9`%mDuK@Y=PKT5{vgx^({qckmt@JVq6G@-gh(cMn!~-G!C)JE`pM zz{c+DgdM@i=?c}|{L*DiOrOE%F>lLa&*yi^1wtGZaBBSO8vw$8Zbh8wfoqJ&R4<&ZzRZpN-IS`(kLxc41_E z77I&j=&;Z`+Rvy|pfZySoH@cvO+J)?)lh|I zSiI@FhN!}T$GX6%RF$4l3csxG1TyBPv4Gbw6T3Q%p)-$}cXr4f$S97`AYDE6#%Io_M%mr9^BT~@3*Zux(Vo3`~w zsJKz}e@@eLNyVxkUJ6g)M-QI^$s1x@PslcGqff2((OxV%1k4knLw^< zj_c;p(l&umQqYhU(fHRM*rPX^IM7ODJ*Orq@uJRaaLTX@{yRt58LF31Ai6 zR!ZMdSC7($MwHu~D78DVov_{0#j$;f;VTth$wm`O_9n3HC{^B> zJa*hZfbEwiaNC6$l)U=`AH!RJ?t^&wPyQ?(dGfWm^S&43+|?%ty@#ms?!lpx58>RE z7hx~M-1R+oVR7ve=9VvDVdWwg)IM+VcB;FJSW-K^WsQ$Pl{cl(_86xp2)(1|8rXwO z$1;`OEP*#^)YuAmt=+RoUj|~ z)ScZGCiG$mMp8(mT9M9nAd@4++PbLFlr@>kPL&u0S|Rk5Vk6ABtbF+bpsDUu3#;y0 zO1~b9Mq!LC5e&lb^BJA?m8GPip`oD2>If|Yri$UD;8g%vh2h2XRfJk)Ip?=}1-DgH zqPl{|RuES7it=idQoZe{u0yG%5x3P_QSSDkiVCrWpu5fCMwvH&+iWhB5P;il4(uQh zcksSS8?C6YHlVV;7L`1ofmaDDy@B^@F`SSi;1u!-gpGury}lZDekPRz zE=&@70@oJkX;wIsYkuoXWG?H4Qx3f^ed$a1{O3P!G@meELQ@@Ts$n9PVYxW*r>RhM zE{%Pm<4tAqm9Koo$Y!jm1_@W9Ql)k>yRMn0YpMB6PfyQ{c4~6vyi*@#V}K+jTcE2i z^w0g=&ut#5k6iSDZ`qHj>2LIHeD^z2dw_EJ?0)GTDBszPihW6x9co62{8$PlrxGZ+ zJ&SF34PwW=v-CBTeBdLW!h1gaN&L(^-jAnV`7;c2Ux7>4UVsB9?lIKfm3v;!bw5S; zJ&cuISFo^pJC)uQED?UQi{~*$u#HcjBm^&Eo<7F;0VO6KI>6~6^p6}wd#_a9BB3{f zSb7}cQFs{6 zdi`s~DXK4qlUYJ6S8z4RRr@?aPf(MJ6ZG1t>{N3r*d?ggf+2;S9wYryL%oFx zh)Rs=%k*jkPI3XOoWLrhml9IjQC_y4(;bFuzPxOQvAwFYyx=@W$5s(^TV3mG%6SdK zuBuGn6R<2)Z3I}O6Sr9iGnW^o1f8H);`CyNj{tOf3J`RAvEA##4!+Q3ZWk)3GAjwc zDuPQ&(n4=w*xSgdRr9q4x71V_02B~yT$URZ9-KPr>uw4?O-$e!UIYhI-4z2v8I+Y^ zQ>oXLDOuZP=2ZFe)PPpwQmC(Qamb~4jfSR>U)wc?g}|s`Z_Pnu4N!q_nS@_*2T-oR=$E>6_m4rW;U}70=(YA4k)3CZ(rm4p2tM-LJla zs>2CX9E)Jbi7>XGP2#rm&DeICYVTSG+aByg>Ekmfd)|5hdLQ_e&*07P`XHYF(%0k4 z^%vme1*yDi^t%o4ow@uZb{)JMOFJ)NY2zYRc3vUiZpYO88H`LG!^G?YRqs#uS{Oe^|}wMF``?Y+mAB48|7X<%KZc#;a9?S+q^*n&s|ui0xS(jQ67$< zoT{b5?M4-qjLHM`nN-1{$_}H#!>sh6*Vn1yqsG{7BNS|wdNdMt0;1;Ur>Beu%6OtS zO>;MZNB_3H=P(Y(@enfIqbdx~o{Z>hZ> zchlcTT>%RjR{om*-}CeZ)Sd~W`ho`)7v0!#C4?P!MX}=^0`S2&wmpv_?o&f3d&vSy zKK!xIQt5pHuYKds;l9USjQV*l~GvApLZ<~PoBdI^iW2)o^vvAX9f72jpd zE}zBJg8JMZ$He?8%&n=n##wX^?=vballh%&Sd#(D_6E(Buj z@B}jqX`76YR%Ljm!jrla?23VBv-#k32KnHI;PwQm&|-);rI2dLA(L&RqGMRwYC6MM zDm+ytn3W6apzja*VYOL#ehaGk@>fv-sq9UoUsdqstl$e&O;FWvzFPC8gxHSlhF5JT z)V33H+e=VeMioQX^cml7cpX(>4V7QD;hX2HD1VmMQt1_-SIP5K(W_BcS%t zH*#Lfg$k1fetx+X4GfJfoN9f8;KvYoc6wTG{^r9~FmuU-p0Uh*idB&b4h@1Ml_%I~ zupXsTmr5(nlB}%50+qID>D*?@b~E+5t}Pe*w66fF@V9uI zAXuDwO4rtXYm5tXcu9@9d&~YanfWC0`Q!ipC;kEZe&!?^t}-gT%YvHg^{BkpiHZk3 zD1XF-(kDFF{-P*KU)hSX*9=MNeHw3i$9wU}3to+LS0BdVGuN^I#5JrPV7R*T9A?+f zVtVB?rj}1(V&Md)7EfVvL4ML$9-Lw*JGS5O&Y|7N_pTw=vx;VduDNX%&8>yzDaq^v z66sMy5(Dr@RB@4mJCHWQ+6H@=prg7|`@2S^doP5vbqr^>0?*5^HV}*>5>;m77Nj$3 zULxQLa&lEVsBABLqZZW|2q+jZQl!@qbb?)jt&t&fJ!+_MYUwJ6E2oMp-F6$w3AYNu zt?~xsZX@)z(QiWym0fLV2`puFO}Sjhsp<8CAJ^4T_0>}K34S#MUoAskc^yMud5tk& z0Sm)tO9ewtdVM9;S2e*$NLAN2pu)m1mmzmGVOHx8pe`6joj;690=&Hd}Fjj+<~gqefqklQ%lNCnuy^>qYYt&6IVUPI+mZFiv7re-k?G}5PM zW{t<{W@f@qwl$>HvRvV;V50t>pZ)A-4b>)B`JHL%t|sVdKec}pAmz#psHw{16ut^_ z=5b2#ul+S7q|Cgw$_&kLSEG*=t2PZ1{?@m?^@hqcA*}(xb#9$c;jL-KYV?-~#!b)h6$DtnELK#ocEyvvv~WOUE&~a18yE2QWbI9@&ka(Y>7R;dBrB#`mFf zcsE-6*OBd6L8@Z`NvS$cW4TGhsqB`VQ3beDZoh&H!D(Yc*BPDz9x-xm#5=r!{Dx*Vi!o zRI?R&0R2D$zop8x2rC0jPJSm;c{MH{s)A8eMVe3-OQL~pA@u6J{O$xUfR zd-QfJ?K*+Eb%v`;M=-Ih1fYk}KXm{dLpu%d33{EJZ|z$(x{Y4KlDe8g`#9C28kGq(Rfen5p^?Ba%GwmBG7PWb%dc>> zo+{1KU^9letnr3WtTrm`s03`sbmR0Z( zTvUmmvOJlR5|~O(qoZjENrk{FZxitT?9cwpP;UaC8M3Oqqt2u8YKtMIfq8T;ZPT>` z$mc!pc{eI8e)ea7_E}|py3c+4_8Hp*)z`oN^*6w6ewJJIV{G~p8NbL>df)pN{ulln zS3h!cI(8pAi-nCN zm|i}JvH5)rS$7+Q^Yo7Iroy9|8`_|%+sn{(+2}r&Y@J3TKZ$5&6n?6=U}^}>9W!X@ znnOH4fpB^R{zN}~@m?c)N+3>k7wt5F=LzKD@n;FU=w?`}lu%N6RCffP^5HeA53NJM z^TN$gPGxtocr%()S!A+Oa;lNtY*R0ePob~*L zs;`B^Y9ahAXe977{)N4fZl&|PsioKO`?0vZuzUP)_~wuQKkwG9jkkIMk({~Uk&w|{F4ex&U> zPD;J?}M8O zJWT{c)!r6|wX&z5e%b(uz<2%nbwh2b>4~OhIN99XY_zMBI+GGpIIGGF!OJ|KKq^R? zHLPZtp7O&<(W$KPwXc0GUh|sQV10evSf}%7&~3BQVq#+AM$ND)X{2)1d_`ff#<5j1 z7@besHRi5NVBPweZrP8aNsWJ7&{L{`f56}2Pw?hnzY`DqQXlU4U<=NCB#1*F_hRp- zJlOpy7uG-P#PXM%nEwqYroLiB$>Ppqm|Uc~n_NTh=rX#77SY->gIw1%G98m>X(QzF zqll-K6rc~GL@y%E{ahZPife~6NSKAP@Wt9T)4Eg18Ts%8x)#{I&1iHd47a+P(BO!{ zVvi7Ve$-febODblk0EVcV*z+h0WTCrR6`PG(wh|$QgzBkLddDjodLc=#f5rGgc(jb z+-ksVF+$if!fiVtBxP4YRaefJLeQ(`dNnJl~_z`RY+8I;?UeFpG0R~I)5mNHY>odC4BxM-{^4k@MV6n0DXNqMRER!|eH ze)*Sw+1N*FQQ(nDk*QozO7S;;^EZvg&H|<&C0Lr_tN^QKC?`&wxDlG_z9uIpjqL)T zsRE_?l5i$R?IE&A8_LpQyA07Fyj=p39!d;c8TdS{VtA4jTn z6v^B$VlDlITrYx6T}BoZZ-iju)I+#AsMs0_H9O%Z_&I}V19XL?0Ve3;XmG|*Ptesh zhEUTGKy|%$(>-)I!ADp%x?yFw<_UxmiZvmoWSrToq2ScM&MdDp(>bXFe=gU`7oi0K zm1j{2RWS4^EmNqv0Y6(1} z@^a7hg z*f;?w@P(=JLO~B=AwQDg5aJrZ+^=->5hNJ0Ch5_56rOAh&bDTF_#JxN`8VzAhJ)X+ zwIh$ZR>Clw#Ps2PMkj8=L^wsTo1`Ea`6L}uQPOy!9m{NM*SmkMg{mi;F%JzsnVH7w9~@fUdSd#>Oo zf1(d>d3zt;`N2NC?GxR2*B1uxp5LCryT37mxBl)pp8nk-y!v;0P%<>N!Ekno;i}Z# z6f&I?NahK)7%EPXbB7CAjpcSf!RAe(L6s360*>=G)y5KZ4bC{~Y)a=I zp|UHu#m;cHF=!|}!A>DzYcu-&8fXb>Gl$BRf-IZfoDMMJoPS7hip~qkOa(Y!|g`xZ^IWMo`3t@Vp zJ)OMJ&aRd!O|`ZbPOWr-Pl`_=u8Y8PS8p>Ep9#Jg0cSYj7$N8aoJtwG7}h!oJO=^i zpn`KW)-Xh^g2z^c03jG)nCsK85aiV8^IGhcs39C{3B}sV zaw9#Hh0_MZ%h1TFjZ?W@;cAVP-wp$QUQPp4e<1=dOvr`kL58zlsX|VTkd?r5S*i{9 zSP3~>C4$yU#0k7eods@(8lVC|G7n!2zHr{2_u}1we%sF zkg{tf^xBN6Cz2!Z1UsrYFIAip0=QxZ@J#3#fD_zQ8_Q{}RVmyHpexkOO4Ydy*j3g# z>5d|G7eScnPOviutr6H-1vx4@s=7j2b@id`FjAR^7^XQ0h}s$pfmcWMR*mgD2snCa zMK#ZDAxs-lR$YtjDkv8OsRBzmE#tJD>jlRu&R16!yrzm_5SMGJs!>~2g*r8rsHUrV z2_dMqbZSmwmvi2sDb<_G>72Dxb95g87m&K+x&T92KiyMT4tG7L3}@Y{pintoLoIxb zweV8q`5E3OEDebByKuMysBg)kVt5qWCZ|x+Ka7gO2{aB*VOuJP(p(0v(S9^^QDt{! zV9O=pOhg&d3V1V7LP25a*=7R9w5@>o=OCi(f~C2ijuWVg0ksuy z+P)QXCM0$JV(6J>&TD!zD9g59=a&6=n*Ln+ixr-jzrknT_bA@;nnir%r>F6Ww~XV{ zKR<>~|J(>Z`}Q&X+S|wR$+r*TV{hxjN8irywnb%kS#|#JMTnuSKiX1Uzsox<3vI;SzMwYs^F z8xC&}euk$_sWg?H!dN8%P*!7QNKrZ`jR`^MaeapBF5Z-cmx@O1)G7$7GJ>nTf)JyZ zmJ@tjE>(^>0M$K zn*g+O-d0;%XhTOgI_E1Z2^E~eRw+3z!&IN)nx|9Qc`T}ss3zp<;HL|C!G?PH?G`xP z^|0A%5wp7xvHSV`MqqF2N8{WoN+*|4lBe1mT0&L#6mHA)qiT2(jkEKxO-;cuK7!c7 z1i}Lyh%_e{&PI^+1&tvqb3PyP;Q(^U5L%J}q?-xCWEfpkdVR4bdeZR0L^JxDljtVw z+6cQWA)3lk1v2#Ab5PJDgp7YGHz~rOX6WDjb^#!!vMYwm&Fp{w@Bh6y_Y7FfV~Wez zZe}KsG!+<4IGV>l7r=^5>rCHjy8hO4+_E2Y)1Q%Ecmnm6Hb3D;u&IwMA*ZY;X8ya{Mn9?;vP#hjZbtZO(bUCdr!iHZTAMovr!Q=% zyJRMd9HC}_ZnGJRQDI?JR1~16VG^U!I04~gXj*5$ZhI*eTv=hdy{r=3=sN_$+5+S% z8*R9apxQz3RXbd$b2F@_I+WMCyr>~)qzJ327Ui|-@5*Jvxm;IM4+{Y)0LmN8zN>_& zywSpWOD(+ycDi%3_SIcqP;&yEmm#d%nAXB?t4E;Gf&hURvNj-SZ-m=tgUw+<)agOU zL6zqY!`U?i-`ZZ(&aI=QeGKl|{n*h#rPnoqs%gS-Yz{T_>i!`FrbprJ$sv%4ArX$C zEfg&bL>>vFlgh3&MdjB*;L+P!xQ%e^OSWL3Ig7z$7Q@LD1_{01Yzpmpg0C&b?>~iH zHi;vL_whmcwt+j9nS6^+*A1=B!YMTiqb3a+}zNVSS z6e}|wW5UZkekF-D{B)<4x_|kuQ5+C`Q-T36&R`Ka~&Ew-g zKY>sF;v_!*?nV5@d)M%#_bhRKmd+?jx$LCw1iL~rlN6zsG)5w`dE*3L)KGZ!4u-2# zB(*jwIKhvKPT*69V@<0q9#qyjP*v|R%IZ`>QElNomo*lx2JCTq)b1{cSTapjkt6UJ z!gh8Vpfggl(_33L=#EDEYB$DyoZ)LJ0cR?^?Ofi*>1`^*BbThNu~4@tg*Ak3jBpF?jx&2=s4 zY0sjwHI4RcGrDtWoH$76DKjnUCiH0x0-?k}hj>8$-WH{&^0C?3Sxixhs>#a2!UE>z z=CHK1WUMcqs;k%=-}pwOE#6khY1ssm=GBl`wY6&Jr+kO%m7z3X>ba3%sIFRCrIk`& zUfs)coky<9jYl7S)M#3x<}c>hu)$!^$i&ju*JoVk1uuBPjnG=>FwavA!{YVDQ*&OX z{dHZ_wkq6P_QRUKQ~fOp&nWO$_{xVK$EV)155NAitN8r87V!D^&f)X#o5z=bX_>x) zFT8smpL_QVzVMzIl=!6X)UM7*;@NDdHzo2k2IbM{V=jiX1YSLXS7-B5(Rt`zBXl*r zhQL$!D&PrpDzB>{=xQ1qu-II%xqR>hB1X-u#-q)&5_Vl(Mn#2sQ3!Y%IjpNoKbgTtU^fzm0lg8XXo`@Zd5v5*iInT z_yZ_&QgQl&Xk=*Tpwg)i$FR-Phzf$ON}#Lbeumo&udTPE&O%t$Yu<)>eui2uTkRg> zbJR9)dm}+C@X6J$RgHt`PF~aNbim`V!A;P4=w3%7d<0&QpbPWy2kZPAD(6>WS=kHA#5(+hVUP+rmM)B~k>O|S zq<0g3U5O~VQuH)cAH7?^%Qf@6&ZDKL8_7Yw@FOEg_IDxMr)<(W0*?UX_uiAwqL=fh z44$%n~k5>SAiO6d%|>3!96_8o5HOKmDSbsJL;3$txp@ziv+$x}N$&8sPq^5Cfn zi_T+0#)O2XYCt?bK7OM&hj~8r;xO0ARWq;N8%jwfD-NYi5WeeO?=srtX@50gd9HiZ zva-1dwlaM--HTZZEmN7H2`=6DR@JHNX4tWj~7X2kqPcfbV_Bd@BDEU;p?E z@oPW3A76au27Z&u?u#E-#Fsw2gx~(q2EO{io%pQ}uHnn?U&NQ+H-{3Bk)l0y19%Em z8ysOn!8K?=9+lHk-3fLzR)&aFaZ+*`q(>pFV5f?S$~vkn0gsT7vNQV5Iy`WC0|-!s z#G6};vb$V6m7U7&HpiLB7!8f7>{M|<05(yncs&e-Yif*UBx+w*UaBr+70;@r-A)j0 zuPXSq>S~lXP?=EyGz1uW(@Utp9El_r}!(I)J$@_q@lnoSqznOM}H=XF2Xc5(1Y;g7`!V> z@aW`D~5+)q^XjEx znhB~;e)5w>_cil)P30y$i_a>KU@s`TW>sWw=S`)doUvO5v$vek6e!Ul?VCRuvUnjfKGD zv{tYt>}pKl8ETeJWvh_BhTiCp!xe2oohM8s$xuH?00!d(Ekk@?giz!U1w(iT)u7YG zaFww2cr^9G$9X?J$S^xZS2L54*NK?lft1gMq}PG4*9O1e25*?4jB}K4B2`ZqW0#S(MRwNayrOxwY{?qO_QSt z?plNG$U)Q{+Yj5Zod_RTMP_mm?ft{(85%<;RbXbY4~Ng5DLhwtr#aQEpm(Ez2UmP= zQ|Za{qW|Sz{-ps9xj>>WUm8AhU|_&V6D5#nUTNEyjTz@DhRor^hi~+{ z(0&3_7o!6u8&&xE_~VZo{b!Z_-Gr4~{c{z@76Vz=FWyJ6)cu@4f8I!taPi_r9655t z$o&_MMsfD++07#h_aso6P&2c{$V@e7!q2>S6LRMAE&I1NegFLKjfPeHyL|gk@ZW#) zb@;7cz8zovl@s{wPaMLRKfM=U`RrcKAHdf>b^zb}_&)r>ukOPieQXa(T)qU=SRqZj z-5sMkQ>ymBjf9>BMMp?b1ywTqD|REC74WF?Y7`a{c#TdE&*evuiYnfmMmi_J6<}wy zyHoj{YG*lBvlNB4Dx2}B3t2URwqtuCpPi;f(9?rjsz%2GA4A&G%1V@SfB6pnitnhc zL8(#^SsJjNDo?rMYJCBt49^}*!Wt&D5^0n%Of92|s}%HZxPYy2oK7`c!|=Vv&amA< zmCNwHir+<@uZibqAvBv%?N7jx$f3R|i`p<1ay&yoHKWlJfrE-rWtT3WJjfrE5Imgs z`NQz5p>ZI}&^=7|A{y`@Nzk_h{AdpOkcjvZjnU&lG$&(-MuJEt;zm0AU^Ipxm0c>= zjuau@$xya07BfKCmukUaDvg1b7W5O6eXaC-5`*nrr@_K|x)5EQN9g2n)So+xZAT8l zdSo90dv_u}yMWa6Jerr65no$GczqVT?z_|Y4ZTrpLda=(GXNC{jJaY{`JDbnreeAo z$TTl#yyPV>d6wc*XsWC$rl~=(z-TH*?fbw358MEcX*%DhKJ_W1T+Rfe_BRs<2u4zj z0%=hSuqQ)VhlnUtzbp{qVi@ix=UrZ_U4+k~BLYf~59n_Q;-bUsrB79V%Zei+j; zrRp`6IP359SNNMhcqjhkOON7@zj!yk@#|;sJHLGv-~8$s{O*^|;&*@T6n_6R$MA=r zK8*kVtNT%+ab@M^fZ76{%IVB;X3Zui3STQ~ZJXX`cqQkn=+y+ELRy=XiYCmLm7!L1 zCP%ob`xwJmDLhqB5OPv>#)xEuo-!G$K-x-Grk)aIgq?t=E@FaSg#kS(J_C5lkU|## z2|ObQo?P9G2)y#TI+WJd8fAOiREc4=(d{T_$Z4UvtM^io1!CAHU^g;MC!lL@ctPRa z03Kmq;|NkY@;E}ShCc{3eyYY`3YApEwNb*ZxgFKXJgTB;*pjUV_!@k1IQ&g;QWZM= zQMd>=xjPtxHx!32+{92nVTAiZDhdry(-aINMHQN%YD*DhafbKt1OXTDAsz~#J()y4 z97RX03GIYlC&8AB$BeSOzLdh)6#CLF7)fO?%8+*`m%%`5nxQH|*p)+XXFD=fFp>2& z1gUIlZ@+?)BgbK32y5TH8~)Xui0#>j;GunRAKVGg!8NQtdat4EjOR)3Cz+**%JvG4 zR~Y&kNgSV1H|p_F3^L`!+p2U-XbB!>{yTw9J!VvXr}YAYx`q{p!Ge}q#%A`;D2B`v zPdsr0wz`LDWqzjVe1e|3qG_A`+;q)s0PQ zv}9Z1r+ZL(Ex}n~d@47X0%=nO(ZMzK>%2uow zf#Hwfr>rhFcRZzuMGwiJ5 zJ(e>hFH@~D=W7}nh8ymrE2I_h6w)%BH-HyHjcTq5y7~~q+HeNdAu2^duc-}HEgdLt zZbMltgStd3tW7yX4K@ana0HWZhNz%JO>hx(Zh}tk;rHW>H5;A!y;Q;8rj(HsH5o}G z70DpM@HrB1L6}M|-okZMaBayH+6cWaD!tZd80qE+(&-3t*(loc33Rlk(8KR@tT}_} zmORFDISjRB2)r!%dRo!b*NrSyS9sShcut+8(z_GoCoiG#%mvi$KM4EbBM9Gq9?rAJ zVLi4N-b1^v^57=)^hD`h=TyI|3MN#wabqRTgo4zO2`RaO0VJ3e(N&D}4UI2R>k&_m=%|rYBEN zp^BW1F5v(4H~8jL#jq;|o>3cn19%0{J>yh)8o;K$k+5_bAzYL%a9ZP3Xncxxc_H1C!q}`r ztd1Nq?ODWH;#56e)Y}+>Zl-@yCKP2rF%;f5Dm;4S_8rE6JX$a48D3G2^2&mnLo8~z z!>XDpo~y=i!LOPju6kwI+&*J`ScS56RH2nrbQX62QZ&0~x|DuUi{cPsjP+R!`Di!MSS zy|#hq{-f|6zX0!81g3Xz61V?r{O%h7x9C8u;>2!jM~~8(4*<~7x<|0 zIQMKg2o_RLCaCmstJc-rR%`;5pqI^Njs4_OX99&x=aq8Odb4suWxmDN(>cs#olA)c zbZzr|QkkaWQVzXB(MYLrJ~fMy72lW6r)6{9)~N;|((y`+s&ff;YD0SR`}(S!Ih>Ga2o3=bd*N3Rb|pWk0;>uVO&YT=>Vo$LD|YVSMP$MSSwX z1^m}n?Zuz|{2Bb!hcDyrKY1Vi?-w4%|M}{}_`BbK6o2Lhq zdbOZO;8oSy=>=t1R#}ho>UyJLZ%u;@c8?F?c%l%>5_Sq@)m_Y}jqUD2rY(n5K8+k- zz;t&Oo&>?;sz!ya66N&_XRFJwy=)t{@3;-6JCw0lVJ&?-LAPxOs!PiVLRDxGf~wpg z>?$hDP*q*d{Y%hbA>?WZz^YQz)>omys-%>+!DiDK5mv6NgUura*GS0OsPwFa8C9Lj zhYEL?9z}&CfJy?e&f!6$%MY8BZ$l7`HjUkpg2k1E#ZRDxvZ#rrQBU<{C0y#739gn- zR1;j~RC|>nD#CDzfNX}HVX8F}Ak1P${qUxzPT#!TP80f zm+-e2loxq>I?=Jk0U%MZF@#T9^qS3|*#OR+aIy)uGl}k4Co>4NeE|UCmV(nycJS-RayR)DwbrE;j*3@VN;*wNG?5 z!QyP@HiFKVK}{fq>Tm+p@i^)eN!XHkI8vRkwsbMvZABf!Q)@J1z|GEm8sj`q#0P&o zf=G&?tU_Ui#ib+jXufluW7a2$dD zQAFB$jPZOs2*oxo=lVyG@9RfbZy$y_d+FWi>hD75KnMEiBLkh780f}$e>X-4doeLQ zh}pprPDjx@JB7}bIdrWpqK%&3xrFS#HMH*EKl^aiCpP6M@sz_OvO;dPlo=2`m$v0~XV%p81X97v*HOutm|5E6gYEsjC z?zzVZU)9!6ruF*FCRk+}4@93|jj~nNx3$v4G=;#op!cJho;rQ=h3An=|M&mmk6!*0 z_`;FX_`;zh_{!14_}spo_~PMR_~P*meDdTxK5%XnAG&t}UwF+fl&Ec5jk<{$UJO0E zq3#sMsu6E>y_I3G!)SLGWXRT(VmO=6p}niK(Cmb;)32DIXLKK9n3bl=i>KnS`xzor z-Br7&%sjQ&R<{jh1XNko4(upXNOT*@jf9?skXBw(R*t#~f{x*@#*eM5sYDGGo}gEy zV{1wnhEn}mwo!F%M}vd=a^C8$W|&$5ht~qT%L0e9fr_x!XbPht5FL$nxEo#Y@H6@d zdk@1}hnwK{5Qbjfr_YVbfFIStFsl6#)cWJ7^CygHtv`m^U<{T}6YNxWu4s;cYa{G( zXbfi=)@B*jw!j`?ILlDf6_3Faj=~p7aa$gZ*aq36u=Zpk#GF><>JN`nw)N za7ye)>_kD~MhwnrX$#5h*@{A9%t zGl4GXX`kYSmt;Ll(4TKx98+ws-Yr}1A; zoxpD}n)%AX{q()~&0~A;m6PlE;;BV^?({TD3e8Ol;47#-K~E}A>dtB?JgGaYgMbKx zHX9kYA}dALUg#@pb|Etpe9F`57Pvf4DlMuoYYi%`6{vAkqt;!E(z+cClNq*FY)2`f zr@?qC7~U43SMb{M!cd7uLeEMnxLQF^p)Hp!1fR1M{xrdrWY}#fK}BN;D(xkx_LicQ z(;W>Z3|T9vC~Dwptb>=ouC6)@JPgadRC>|6MuZurdo32Y9F1rs@M?W_g3f{JfD3gh zcMSL#+WOHL2s1>Dp@ClSZ$d*b$>k>4g3Yi|;WdUcgkT1)a1I`V&qc_&6I6Wsaqz@a z2xq%tO?03tH;jhmgM{4yl+3SV`|Wq3_MsP`@v)a;+Zk0bTt>$dZnJ9QaeQ0tRW1LRX z=O#xmGsWfEA^I?;r@1^kj`77wjI2y!SiUrcsl{pBapxsIP<*jByQ1kgRoFA1v%*XP zIOcRK^i=j`HWSfQs!oba;jH#E!&;SpX)qr%d=(f}&~Khcu4n7Hl-|vt_k+S){UDq7 z{e!Hy(nQT=^cj={)x6KGl`5*C)n}E`lL?IGJ(#9rr1aE8=a&5_rm|&yxBMG?@BdKg z{ZIVm)33wt+<6y%|LPU|{_VHpk1t)pADlUluOB^uuO2*v|9W5_zJ72wzIkY)7^ zcf|^?nqZK+E3~_dBbm++b_`iN)rG9k>_nlg6rTFZwzm>?sTRbdQ8*oTJ_J-`<=c6l z3hb!bhHaGulZ7vULR$i_ylk5>2u}fc+^hrp7@Nn7f;JA=&*G&YI;$tmOT#GY<(BK^RzuLu@bq5@y<#^No!ks*O;>H^)@B^V&=bh? zd~Ge$D+bp;SEg6oJ?>ws^mKkx(J8#u@ygOHEACZc-p3cd@C9QZv$>2+_fQovZBxwrI8S{8R|~GDq=~6vV-VOEMA+cf|W;YPsSwxa~w%BTPcztV~l19%nmN~%1KPa)XVl$nEKYf8{n zm7`Xb6jX(EbzEP&19en+7H<{oZ8nr8OW|m(M@5K=p=ldDeT}GSEk}7PVK^H_YC_FL zYT>q5!B52*vRDzeyXZ~?3A>2hf-u8hrFxPYlzVJ-T&_dNQHP+t76Ciq=v2mHKj9ZM zy6ZU@+S&-b#zYgHYA;Sz8_f`QIrt+vhO;?1!&G|&o+H(Y%481pgX3tudJVlVc{!Zd zt|M^YLsWIUP_nQW?#Ew^=nG!Y<>OR;kHGfC>rngD8{vA@&%*osSE1qL?Xb?wB0M&N z%=jpBqf~mMeaJIZ?U)!v=hzs!#~7+k&7*yO8JUG8v@9+mx3Y|ml|_cIbLgdt8(f{j z@WvD-*QX5FZ7fVU@0KML6DFuj5Lq?=b)$Q&7fy0v8~Yi*VLSv>QVz6>3DN2 z57TrF6RuzW@|TUzFQsSRtNEF9kD3=mH4wV)=a&8OroX(tb*00A8dv?(U*nH|`ls;K zyRPB)@4OR#LdEyj*YC$)-Ej~8^x`%A(YZVDho`UL4^Lgf?;Sgb5_P{ap(j_DEK8$n zv+S$+{)>Vz*tOwO2weYTmP(4_W;A8;yCaO7qErJd%TN?;52j>Ysn+-9- zEJ8)+cUs_cHy{{jM8H=Mx1$_kFOPBZoD7e{4l5%30f;z)^caHf7(Br+9FZU#Qngfl z?q~|0Xqw?|n)7M+2|Q;L=hJPlW!h1j?}BA$0=DH1I1U~`+hfmz_4X^Mx^NAaYmec! z)7Mb4bO2b7B z*)`8Dpm}{aV*3vvaQG;k2M@x3l^$PQBg8F~@9{0>v;Z7)x)pjVhf_GHnpm}M)3A+Z zm}!Q#YX4^TgeWd2(=d+adE~0;eXc&Mg2~(7_O=`Q3T!IVyBYN4dS;8&*FR^vFNL?d zXN{s}Laz9JWuN`*XN~ilgCgDR8d@jlX>_(*_9L1eJ>zlY-}xW^4*%`7KZP%!yNs`$ zyB&XY;S&D*{3ZO&<-72|@3*{s<$;CVH-_Bj4>bo5!b?R@b@K(Jl2t1pkFxG4= zkwQy0ZzT3q`X_^^3c-{?WH3BJ-D+YQ%ni)DnI;8$6( z!x(Z=l@uzsQ-N!x2>rJj*Ni{NIzsyRD61fI)B zRoCJ~vO9=Ss~wHea=2sF1Y0H59aW;6%FoqkBnmJR2DrTNd))loUSn8Ae>lppHVHq$ z=c9XLE%cPpHqW2V!=B8bA(ex*wF^}_0-@0~h}|90#o{`~Yg{MCia z_@7tqz+X`P{n;IN;ZL|+Vo^2}n;UkeZYS(QktQ@H)5zomw?ZRhWkN~ksQ5OM1gI=e zzrISrqnvUorzx+fG+f=pwi9?pKiN$e3mYV7tuCZVn zYB{Z|K_f#_tCh=EZfhh6trf6)>flSc5Eu%;)?%u$W_Eo{l#$$EybwN!BofkQR$`s)$(*wN&2BgXTDjqznQh))#3P#nQ<6T<3s5s4#^B=}mW z08<1ZRkJUXLxA85^}*3Kfrj20)bvlmxv&$yJx5T#wjYke1l1jnB6j==>@#Z! zPt77VNx%({AwMz2Fn5I0adc12AvZx4IJbewp)&}byM~4{cc6@tKJesA;Cu2ZgdTeW zP1o*&@Azp%E?hv%ombF&@hmduPonkIA@m&Ihtb2kFiik1?pek42d?v(Dg+RPC+rzF z|H({9>7`b8ui~;hLC=JX2_W@W$Ye4%_E89|A5FDqGr=GQrBJaLdV-dELzt$0i$St? z@7^2xE2KVp^ym#Rsi%mUlt7?W#fi>k-n)5k=Cbh;ZJIeRrKjVi^oq9^+vh&_IpZFb zGw=tQYHx2Ty&uu^C(xgZkAU$M{L{Da`>%REK6mT_K7Z&KzO0IiLkIDVg9q?ChY#To zPMyS`-f;zgeb06L->Y|{#4cs$2_T@dyLb{U>6|f^Y^(asDy&u69U<4z)oFN^0Bll% z0I%1`q@k{1w{6>w5`ykFD!c7U2vAWCrGQe>Po;J*FR$doSZUZ*e?W`i|lK|`hvv0e|dBOx>my3sTh zKxiR|`jG}Cb~d9bT>)F95`jP+BCZA`eQqi~0?*?`)a`@I-T-&PhSC_rty~G}=j#zU z8s@YX*(1#;6P^jD8*!HhF^?Zn{%Az}VMGYO zus?!Okkh6FBFYb#N^#m^q{nYcv>?UsH=bxiEH{8q`!HPXeQ@;+z&$#OhM^JICKnM| zJH*ek3*M1wBnUi(v8j;>_(^p$G0k^1!aknVs%WSz^?*MwKWk zQ`F}&KbsmS3%puy4kly*-TV&JWJTv!c&l|L9CZy7(mF=9x3|jlenit(%STYHsg&89 zUo8IMl~3b~$1mbbhfm;3`}X6v4(`WS4;Ng`R=#xf0KR$Z6#n-7Rh0MxVMJpIB%521 z&8c#t9U6H|NdgLeW;X}vp{KJMDw!}E86Ik6u^m)#8f~nQ^`sglrR6A5lF#xgBZYgZ zfTz*Q%Bu|k*3{JV53ZE#obn;6=O5o<0FD5&G!SyiPHZ(AZ(5a=*k+}=t0(XpU~@O3 z!DWHPS&v3v1FWHX)cVR{^H)%TRm0mTnE?Lpk@N1Q((397zSAckhDK4nz#219U%LJWVSXilW)gdTrf z;+Z^Rt=))s_aW5VX{3Mm_UGXn>VkK02*KeAga*eD9~nbtY69u;QKUx4k)4>PkD+~P z3Y}xKNKdaIbnqyg7cQYn^8NJfsCe}Gu)X?qsC?-wQ1$o=VZZZHG@ibj(0c^o$6o-) zqYoqeg2#}4Zp z=!!#0z4(HbK&5@nvOWF1%}N3jj!!@Rv{Cs{3`c=SwY@4wRsvEbA~3hfbv{*M2s9>) zG_Mp==Di8X0;JCE;*XZjA;2nP(Y2%$S65e!doWG+t8E|s=tno#e7|YnaGl=_Z*LYt z>+>tTl~}lCKfG-+5C8N%jpM@S<)8i@zt11|FC96L-#&B_zp?iKKDWM(&+T5vXZCO4 z69;zU6MU{eM+NweeaBFeP|d8ys>lm)1<*;&$))hr@K_mA;)y1>JRa24)-!~yFvgda zvMVL<)X%lXYQuIaxe|?8A<)%Wu%oITJ1Xl?R$Y$@jb64nCPg)&S6fH$)e~ZboVxqaj`c zN0LAb)gs`xP~};WbUV<(`;YS6afYQ)yA4r4L*sZgLc?~dDi>^vm8hK|T&O`s zwghFV5;%v-;9n+am#bhuY=v{x21lGKlghD);EQ_*Qm>%rMH3aDpqKP0drA|cA%?n9 zhRsb>da-7t!YO())m{oQhP^StkKcW~BZEjs649)8zU^;kJT~7=P|dd=tkF<3skCb(Rx+Nn2m`NTzh?eH0Vb@xGhbaN}R{K{UGWYw_PP;`YXD4MD_MSG{(;k6p-E)t8vMgVC*ZKZ49 zM&H4ZwPZUVa00M`>P@4JDYMCTLQNg})xlpK`^)MYQEg-~QF{MwsLZ;E{f-~TQ-Q$4A=Yq>^gOf_o>urQL zV1p;@K%mJ@AT}TnvLhOB8fAIS9yeONKE!$6CITuJ;IY0sL_570Iob#BJnwnB67JnL z1P{4TF~*mxqXc!6W$^8Cz`^y7!%n!)`cXMugT^>RVct)C(<6Ql!&)Cf8bF*OZrmG1 zJV1y=GYHVbO>Jn3wGq4-vpZt%=`#9BrLxwD)!+*WHVJ z{{ZsC1IPf6KybfM{pCjc2)S|O=`FKk$SzJHGc$$e#d#!79759_7ZADU4uq)GJdZpC z-wU6Foo;#F<7jx`VbtCC5UQVe9Cc4Uf%?ZDg6o0nh~Ie$ofpqwoY0#)vKLE-cH*AL z1wFNeD*U3}0G~0@zrxyL00=B*c4D~{kNKj@isz-~q>QwGG0b-D+I2%A>D-!s#VcN6 zjEQ0HBPAxd$jx<9ak`%R#LCQYve;Cip`IWre-`L&22m5BQh$QDOu%~Z!3T|PrW!S& zR;=b!rl3oH9mLW<9GaBe!t)Odw#Khhu`_h*W(Kp?!woP zUclEk4&!&$58!LN_v3Si_Tf`hd7mNxKefIKzrMa7B}M~df==Gnp-@(3cv{Y(DcJ-! z0a0hEH!3N%?_jv0@U^rs0$E8ZA3%n!<%C{Yy%o1n!QEEdU?k~m48~CF4xonct8)bi zK|d-i4g%6nRj7oWHqylf=t!l|t<>(h zCUoYKXy=bndq)>K`uov7MEDJLqJ6Xny%Qto7^m`^8b)r80Gt^|W^M|JjU_}W<3mUH zBXH^{{O3<2aODyL*RLUP-@OPv@F4t;KL*dso`Uz4FGA?4=OOmMJ;*S$?LT`QvnLK< z`S32R9$dryPux%F898a69pY}z82>hbX97TwdFxx>YGgc7lM^-M)jsBSSuyyuO*!V& z+(fSaw(oo2`wX>b0!#Y{kgAMOS>Wqm|9ZUm#VM^ zR16>G?bDz{8c#zRQeOJfml|Mw*~?yLgw!h2`-4CD1LGX#edw5XzVn?%S);kG7^?b= zYI>v3|C!Hx##m==FaFH3kAM8*c=x;Cy}5MDemLXbSNR;j%V+C*{NZ7E``I`AG~R#p zPW<}G+wt|oXYt$1d+{5qy9m1teCptCs=mGW;>KS58lBK9$1iMVG1&QW3Rgxc# z3@w$nZhPqt!mjlDpjQ}Y)_Z4=Ng*RDmwk z*xaZk?CKb5TBy7PJ3-D$*fn|uxiCZ35S%>T5sVt{3di7zMBygzeDNea(FA<4B)rk4 z&Hk-1qt}QA0}l#3^caB{3HYhV2qYg>7^iVA3xol$4e_WKL7&wKf#XRBd`(n-T@JXq z>*1SnAh|b&-izI6oN7QtS1FnnLq=tZGgSv)kSdZt2nni?W-5b(ac)8{5a#vc1algZ z-ce)@9!34y2I`L-MdaFbRIlxWb7=!!hJRj$QK3W@nU*%RQuTGDn$exr0Nl;!$hV`T zvj-i0y=WikM#oSGx(U94fnE#^4Pa<$4E^+x*=Yn9??TUo)0jMc1Z&6lVb75b>^{7T2cEcx zLBqFhsJ%_rZJjZ!7!IZhG{x)m;+r7Swyj{2nPqvV{d3#5n!vVopJL$3%xlYv_tm{_ zy)NKCS{P%$m{XE|A)T4OM zU3cKuE?mNYJ$?pX-m?!MUSGh6S7!0~g++X2aTUM5u@fbQa5ig%vYIMuubUxcb!|1u zDhN1Ac2!uY98Wc}h47a1l{KiKx~nGOl&JGIhOH%aR+Krss3Gtw2`yWq1+G+<0F0u% z(M{mFjWH+c?aHS|*lnu2Mi14UmkLY3^G9GK!f-pm>5exW?h@#lQdCS?!Y@rw zF?>v9IL~$59-^xZEkYmvl4JT`>f^++U~ zaD*ygYpz0Lo?&c9E$Y(cXh>9`bDZ}OsiC^GAmnG*OyCK6NdtO5G*Okt{eo@+&O{#W z{z*jk9z)6S6iW6UK;WsD!gBY6NZk7{n)dF4Z*T~%jzL8FxL?;0;vGH6bSv>{JG$Dt z3~%jdLt9TfdImbs*Vl%j-cF1U^<#Lb5B-C^7#kbG`u>3q+=?lU0LzR%p7 zF?p7m+iqsX--UV4#pf%wn|+4=*G!*Ku1_yFmregsP3O?J{5RkI7XF%7c>gzl4X=CO z&)_Fs`Y7If?Fv45@d`eF`Yb+q@+dxYcprXaWgTCdUBD-o76iQ>7EjjmJ3&Hya^mBgp)jHrNR^ zXM&(hwIa}zhb!2OSWBldA4ulmCHz9^R``=?gqo?Ml3Z?TK`=%bh7tr_oZur2c|YM8 z6Nnu#cPGvO9kj{K-_2JdTt9i5LWo?^B5ju zXdCnUxIKtwU!3Ppqdw7w=H-L14Xk0u;09_=-3|9+uSEUbFGS z>|M`8$?R@ecOF5@fg^~IjH08X6aDRN=ZT!+SA!d^d*9?8n&YLzq5u7;~o& zVCnQetex72UG$wN_Tc^}A2h;SJygO)@xxkZuwr8G87tnW*q>!LUHR{2&v{pyc9U&4 z-TkI3|J7!m<=^IP|G~E9gJ>eq>G{#mkzDiU^JGHsS;yVXpFLYJE@=55cosDt`rn-X z1*dQM(=X!%zx5$J|5x9Er+)fnc*EV-@qzPK@hfL<$ES~-#OGIc<4XkJFV9XCpqFSS zVB9WLRuO3B6-INELMW@o#T6)}vMXaqtFdP*jOHgbD6eTid7TZlRBh!NP>1lV@e!I- zS%O`CB1Lu7f(C+1shH}$8b4Pdvk!K)DwJm9`DJt zBHlNI)~R`9#-`!Uw!=z=7tZ$~-ZlVNJcnp&AK{pWn=o`Y=iqH208=^mIPXhl;inq( zb9*2{7)A+0&impq1fn4%!V$uXiZ2vEz~e%b2I*m#swOB&flRfsE*Dy;NSj@1=jTMo zr?R|4ZEYk#5BU(|F$pT%lt01q=TP50gz%nYsO()r<-`F*?tdwETzUl7M_z@RbN9o! zd;m4WbFdt^fLcOu$GLmqy7N&451m4Mau!`(-56@mqo;@7+lrx{4vh44VYIIYBUE_9 zqf~o@-qhq6rs-1?Bbc5Z#RQ=@u{es!)d@_mPh)O<7K^(Uv9f;+3kOy)b9fo^Cw5|y z@LM^t4=bnkV)gW1Y@FSXU1tvxd4|E`VmI4@rK8 zFku-C6OzFYntA`paIm5$NbcZx`X#*~XBz72W-u_IF~izZ+vzd832+J*!x)@W zSUZWq=`pIjDNHZVV{T~w5*hMJyd$!YV`9)uX$xae}ZT^mY<@ zd#L{QpF4#8rw`(h7d~b@RE39)_RNz-;ekLF9InH}+=I3bU z=g{zszDUR@H;nQAW6=~GEj-Vjb<`c&jlh|+Xu5hgs*jyU#o3E!y#Ha8oj3!_(gy6y zd*C>I6(vW`qvqa6;JfExB=;UghA{7?(i`sDgx+8qx&}MZH$)IB{rk`$`sNqVziT(T zHr9|^TS9v0GBUfC(YA91-7AZjT3W#heQj|bYs<4(-MwHyZ<)S+cnuq>v^c&8dl=5{ zBlr#wdWX**!XZNMd4!%GLc@w5X2bQwYUV$=nV-in&6scG=38mnvp#C^e$U$QuQnZB z?EgA5k1D>GfBjj0SkskE6y5ZjFmK0n^EP#m{JRC@31BKj5zOS8*E($x_6o5^0eIgB zy>IcvDn{jb|Av#-eEpL+{n@wU>SOOMNXWD;UP^P&O2*ywV-UAUsljrmH2q+7_uLxmvEPM@B=V9gPl`(YDS) zS8Z&q-Gv$gui8=Y8mET}&x<-Ifkzc+CH(9jLeCc_@Pe=fRE8IZJrsq5VXc#}am7={ z__TI8)mi7lIx^EsNKP)m(=&{~z&O%tyV11w0DS8^k=%Cxo~c=6xUN3a0av~U&X#t< zu8pB?D_nG?ce1s#p)u1AOSS`+9G&}GvxFqQp8M1{FGkmpmqv|pR5i9uMrDe#IgRQ}7M7kq*t-Vd>YIRld>+k*Ps6>F_kH99 ztVa*Qea9tK-*E>@u3kmy!w;b9;_dJ)uA!xW1l`?T80aoQuSd`u?nL)+HwK6M&^CbiZT5s}SuyeeD`zPd*Rs3+Lb&8ArC4p=)=iF`Aj6 z*E>My4RxYtv={xO18AEbhi7F4q4O7EJ$*YYcU*`2B`=2KWiNpHg%2YB=-o&jKZ^dN zCox9QP8>dk$x|mWed;h47}~BMUBlkP8#u%e_vrE6IDT>uPM+R}Q-t1$QwQ+m3m)Bk zxcFp!-$Sc)T5%II_ci^;*>2pY{y)i^V;SAbRj6uV+lADcdYqx*;9{!pup8T)Bg2j)&4%46c zDa?HC^;r7s)7bMHZ^7~Z{vKTZv-jiC-~A=L`r~iGBj-<`M9OaaHYN4kf$FLX0WAtA#-O?`x$m+sNF zaD<9UZSCky33w7oLaqfNhJz7RSmZj9Z0|v`vmebpL&%TLVCKLH4DULOA%=Y&Yx^+3 z`e2FYVM*j+B|NRoc{CDe zjRbTf!{hpv3@i+borI$=PSDYV1f7?l^f1(oB(i9ss*J@mh!ULX_FlAf4Is#SX&IbB zuwxWq!Z)$L53%`WB)a<1KRANk?sh`26$5?k7#Qfl07KjU5pEkFLT-K}wi!%h?S%$ag&K|@mLhpqyEP|eCnl1KcCY#13J*>sP^#Izlm;U|i`|jk% z3jI%SQt>N%W3Ok;78bPK^k;4S7n+WF?&F?wtGS_QWAUo3{_kbFmicX%-(bN_jaG1J zI#rIzHE(`d2jpnu;ZK!*_~w0s$cyy-5gy)K5hU8v6W zqGogn;e!_t+;axuou^RNHi_DyB_vK?fqnlmH0<05_v#+_hUU?b>Opm)4Ye&Ds7bd| z5#~{o&7+!LmFIef%#DPq%NWC!Dl^qVr&8tma5|4jDuV<8+1lQXSh5A#JXISNX)Dj$ z*EWjoPTgxiQUqRWdlz~;JB=nPgKDZm=nW2Z8SV0hM~5&lIfnG&EW&3`pyuKwG(7MW z(r@|z!aws~)V<)<@IC$nyjLzEdgcsLSMNgj;V0mK{)-WP@r%*=_(Pbze99=tJ9=U- zPM_Lm0Ppx#$^umYEOxGwV0kn7`O^V-I;@Fa5vR z|Gmt--m~uJ#!Z-8%ulNMA~W`R#tPLt#r|yj!CPg|z13W6-s4U8+FW7ocQe1~asSp< zbdlnFDSU&4s)<5)YF1JdO6$I5OgAgZ%yXD0DERCl}z1{WfLax8<9Qf5{C` zQR(gZmDge8W3R*V$6t$;U;k+=ec=rl`PElq>eqe}i(h&pj(y`7as78cR8V>viA*Do z)ew5MRC{$wB-(bSwoBAG-p zJ>8r_M#&5~)wmgHg03Z>Lvvdm&F$^T_Vl8)ZxHSMLxy(_P~8kqFnpUr-`F&I={*y3 z=$=?W=j0;Vrk2q$yN>SVz35v%gznXY7~XRNBlOPsJ;)Mb`BCmi&r$K^>8aicGS{-*y%j;u$2&Wa$akQldl0P*fjb1jo<4MSb)lm*kHM}Ej4`|&;V}dKofsPK#mG=U zhDL@kJU@f31G~_C?L4Y4pF_nR_rv|v(+IupoeXhb3+H|J!FS~XqUQEHd9`TNY{zN6oJ9?Rp*V^s5|sWH#6*@&Du zZ%&Knzsuvazm71s$xTy(-$J9`XSS%(vbpq6jD>VvxiK<+VV9y~c<%3h8{ejzYyROM z3-|mT9(3bA4CiMrYJe;=kI}jH9hukq*0&3v@mYS~UJP z=9M`$`oXt=p2NIhx#o?~R(}Vr*ff0+<_DFl(VzC^YE86CqYFRw#rIXmc~dO$yT7@ffI#I)fe{ zU}k1YX68(#l1Z5 z(D()G_V-udqssMfggo~DZ^*qS+yCJUpS+IT`2Zs7c)9=ZUR5{s3b+KO^54p_ zNyz&i?f>W}qI`dSe?EM>g8BQr`G5Wps*mpBNAc+W@_iowQW51*tUM=B`xy`JXMBkq zqnukg&p+me@T(u5=U;!WJpWU^=h61Zd{UnO8TZNR55AXwu7rZ?O8o!&ColPh91#CK zJY@Cn(OZ8hCq{kf!%+WZM;>R$3>c{te#WDd>0&DQYktPkBNgG#xi7P5ewUOLeQrp{F$(U^1IwdVPZW z26wET(E+DYcLF>{vJK{BD-3*(G1US+-zOo%UFsu^s0k8&j4}dIM~hrGxdU}$JCGUa zL7KN6>9JlE&+bx6uYC7G>OvjmmC4?diJq z0c@UJ#HvIqR`EDihZ_k(FVYG0tpwXfLM)%>sVE#po{hh~xf53(Jd3Aac^OZ=@HC!! z?g>2e!adx6a0Ac1^fX?2?ioDy+|zjCm8bFKhn~kPFTQ{eJ@+DBec@HS^2$f>{D zC-EX*`@*}Qz$@>)gJ(bXQ9SqNNAS#tU&B+cy@DrSei8Rycoz4bc?wTFxQDw>+{P1k zZ{gnEn|SKp9X!v+@7=kLCm-C$r`~x7Pd@WBZr{5{C3y>1u3y8o8`p8|`VBmO`6|wy zyNDAf&pwQEIC<(kPMto_?O7Z@ahh=oCr+Khkz+@(@4$W>ICuyL4;@yy?j(*JK7_-E z4l)j??ZJZwarEd>96EdiM~|Q6_ddYI%hz!1G(Y?7MI1VElFuDy9L3RN$NAYO`MP5` zarz7%3p2VF8&)~7EH*u1`Ywy7$IL6~Yd}QT#}}oI&$Rj4%A2_R>}$Ak=UH67{S0p2e-RI!|1h3^?QOjLkx$_{ZtuVFVcdKEbzHyy zJg(k(8rPqA1{bg1#;Nm<L2IoWZrD$8m?} z;=zF@AgE+KROm;1w1YsE&)0iVBq1)}pdOs}f(<=w;O}SrbBoT0+h2^P}EwsH{? zG&I1#*ITrDm}RY8+FYGp51qvX6V;xZ@A3JA@CO*7AfZP9h67Z4Ap~M!Mg+d7M2t!& zfnc(U>aT^-Mi{mslSbzUW)$LM)awFBbY(;Xh z5`5@8@e@|GQuF;AL#E~YF9z#MRU!+x|h zCJ>7R(bd_GJ$v^+7A7}Q@mn1ZRRKz3S`%GBO>I32WN6d29HeLDAUmfJnOXU2yDc*p zTefDXv}Rk=(p3VCHFPI3MN-WPU%y`E+modTWE6t5Pm6nzaxH11WOmWk^lb=4VyI^& zon26(h8<<+7W1>VsRXxa8JWn)%ECq&kD8f-ikb%6|J``$wYPBL@!KdYuSZn_-&2arY%^vIUTDvrePD0-{kP3vAqWlUl=(><+M|qv0?L8m4k8< zx10Gr8+m+^CQB`|usIFuw`B0WxhSpC!xe6VC*A>*mzA{1htdW+%C%1DSQdk+UbGI( zAlfzrYp@x5cU-kyYw0w!Ha`j~8?Z@MIFtpxa(Vu;@_7Du-uOL|R!c4IuyLardn*$T zWa5EjHebnZzA_16D_@_Tor`q-zDJL(UArEcnb~mC;Y4{)+ISshWjVF5zu5%4g^o_I zQ|a5ZJcs73+u+)i0ss1RMAoGvz99ptP2148B@4}IS?J8l#&C8HmUx`|wiV!@L~arG zmXu+CWfe|RR9(@Taa(J{WuqO(Lw+2Z?84#O`*HfcCvf;n&tcbBUck(kpT_uaJ%`@k zCg8sQ6xzT10Lf3^MCvoQ(emXd(fGBe5dG?X#J=$qT7Tz7^nU9ltdb!rs@GO*ZTe=l zE=6umCJGB=z1LzCm6oEsx`rxFk7^d0I+Gn08WXBC7XEERt;Gqg$4|TFfh(1y^5O;H z38-ugr#1_(Icwreof$eXMY-f$A`2qWGI7hz|UMROPyszftwpdpcjmhaK>J-RpnD6k7AP!@<` zt0#y}-U#x;&1&e1VPFoRe{wI6NH$8li|5TgO!$0L= zX?uIrYbL3+mA#QC`M+O?6;)rx_?5guj^G!B<1g^ovkx$K>>zp%>_*qVUFcH zW)iKFW9XO~L(9kj60*WtOA_8_04|>c9?y))1#|?kR1#|P!3ykT)|Je@l6?W5WH*kESMy3%WT;E-cqn z=Iu)pjl-yisk|Hp0?oW(3*2iqBeZH05{xEB>zXZSU%wSyJXgK^%<+v|uybPu4iJV1 zSpn_m_wLEd!GYoeoGdQJweo5_Q{I4QDzvytk#*8+#Ide8j+~gqzK>qP!dITg6k#{` z+s~kv*0uec&oZ7v{PTB_B=p+8_yA2`zK_V~ZX)p6>xg~%E}FmoBvw5drjn^TE;5iW z>qZpjQRPXJPS%RjOG|jw5O_5v8|o~smC5~eCTJZ&n1XSbqX{$+V)}3hwY0Bg4YDwf z^xoD&Q(X&PbshBe8kmhHINdH<+JMTe8{{@bB@m8>2s{CuAKthZzJyP0{Ykzq8KTvV zAe@LI9HaFO6M~^Am0?7+1$t-~J*>rDGD)A$IcSfaw8t(&Par7AW{MJoEQsbftuCQg zA;VD!>nv9QInEH$%wBBs#IV5@K!z`gs@5Uoh1-zkiebIgPtf_WjoVB!VQ2Fp)8WHb zvjdx1qpxy>v5FQuYwu}f9e5n82KHjxp2x85_LJCn`wj|j-^Er+{8b&JDA{!a)|<~D z``{Ur9DfWYI}RehwF`UsI#Ff9ImIAh(L?A-dztilsRm$fA3ZIh8je+`0k_ilq5!C# zQ14ao!2|Mv+aSfrCIVeOm_AOQzZvl_WQ?gmNsbz#qjH zS!FLHU8yQjSu%b!lxRkNSq;@%4pmSY$_cW{8Xa;aQ3k;$YHcmm$vUc$wCobp8QidY zQ_xw1aQIWmDXyhT$WnPBMG;COT><45D%5oZ-YUL#D?u7%1s7}Wfyo`hIx54}8`H6d zVB3_oO+ixB-U@)WaG&aszf+EBU3xyY71yEK9E8!=f*Kp4rw=fKDoPE!zw9aAb6wE- z+L7PDvRGwDA;G70rC<-YD`l8nT2DCVVzW|wc@Lo{s!r6LBn}akxmhW=-<)2WMu>}= z6O|{A3-meZIAu5M8wq zsZ|@$!f0Q!37x#Yy}Z_ggy=LC@UHb~IJhAlM|kcI^ZO5GZNp)@%*$ovxJ8xxbU`Iv zE^EMxwMN{k)!~d@iydJn77tEg;e~TpAlN3q^E`UK^$ePR>j9bxK2dk^cWi2` zT$r{f#2dyR^AUDlgj+*MbSL0%^26Wgr<~_&l0ihN_#}-|l(Uq582qC?1$X8ISMC_3QYd z>J1}6K=s*_&9Zm2yvZ9H2 z;SA8)hP}KgB8XG@`KkJb2YWFyJpr9Tr&iK4P$B3|Cc;m0HyT;7mMRspX06y!QE!`+ zwH5dY^u*qZ3S3#DU_DjdMuLwp+bW4KHYAFRS613v0iIahtjui+dYgGOZQ^Ih z=m4F~gAM6eR>WyWfj8gjpEefw-9dbMzz6#%%V#5oGhKNRX|A? zh}x4?14Z%4B3iPXf~dSTY5CZaUyY&$C(Pkax~CT2U(Kj>wV>M3gmOy)b=)^a`gxzV zq0}5l8Kc}Bh1S~)SGo_lIP+ykM$yLxj^s9@>)Di%l=fg4o|Rx zx>;|)WvdIPcyAw^8^_GuqnP@_J@in)rM`Y2Np2Hgx~HHQ{?t`?KYbOx&t66F(~raV z$#d|0>Ks<3Wo%_3%TT?v`T047UM{zk*@XotrBszc$K^Hkw6_MTJqt8cNVW9XW`e>% zc$DZ&sI=OlAFN$>QpbTjlgqQZSc6=3VH!5yfC2`2~){M3AdOR zo`ehjHZK~-8_~b3AJINq+fFZBtxhid@$4 zn{BkQyiqa;x*UQ&&*J0bUSv5uSZ8ygetst!j$c6j=_|-Qb`^Ce9$?kreylxu70yq5 z85M-ws=j%w>Y2mFou^TJ=Oq+B^&u2L{R)cCUqepUDC!!!U}|i^(JNOKbOdMuHU&Ke zE$&G>AD{ghA6>D$qRSpKiv$zk$=}ku^;7Pxq=u4bTHF4EyV0?05pBzJXkVH|8v$t%(Soj7 z$D5JXGdrh%@o1?6f!-#i^wKl4l)Y4KVzh~yH*Hjw_R-*awVEEGCyHv5B)cV`^t@Rj zO&!QBsX=yrCGrYukWYw}RA^948+vKjsCzX=+^EpuZi5RaTt19Xbz@l9=8GWV3ov{L^E+flaky?btlU&>a=F)%yv{*Y$$cOsQi2= zCE%(Fx++TmrL?nocG_A#p3c~2C+G;vY(Ac2^AmbrwP<(w_#C2lohcEZ-x zg#$OQt4@a>QM!pL`;`p;_yrLtX|ahiF`z3dZiQotUJnT-gDO^3-a|F_5Ongiyk25u z$T6RI^u7;5{J58Xf%%X~hFO{M+R)necDG@kkaY2;FdFqr*@>l0c;a62a&y!TF3^-!`L^)p zw|m3Tm|Q3*sYgBmwqZ*?@``FvSlWQHY9lgo%8`*rb+)yDQHY8DMYLefSQpRFPV~qRA3ymvm~75vJTc zm{{SM*RF$$&?;<>(Xd()3C@8n|okG8jf$yz-cP{Gray6dCXVx zOK`oo64y#=aGl`0QAOZY>G6D(0oUuaSausRJRHN&;|mxk^b7$H*Top6{TVRdR z>ZEjt%!Oc`vBS5+3qyMa%ssWxPnpnoz6YK?Nz`_m(YB)-8S&QOlaCjzBKen`qhH zsM9%BTTX@9g(51s9J`Bvb93uKE}@ub4Pny)bt6p`o=E zp0OdEdg|_qh5ZHbrP|yGA{o0x9Q}k_dF1DhWZpxt{o+^evz~=;fYB^DYMxd9zjkc0Eq$s+TB}FA%GanB_j1+i zvUmfTS?NU^J5WH#Z6Mgvb1IZ_Dga|bRV(m4SwoyA5f*DXBOG=c9{ zf|%A*>}O`Nq+F|H8FZn}>_&d2mhh{@R$AO*!qyyUhgsV1JRPWWcB0PF4NGJa&c=DF zyB^wJ{v92KuVn__rg2#K+G;}(1r5voRx@l+=znC$t4gO za()DOk}^)(7!~+o=ii{Z(JZk!SK;>j6Qc9-5&sbJ|9(|_w6eP zee5dYZ(ql%!V;+o2)u`|%O#=n^72toP>7psb_}6|}ik6#~5))JrEteZ3lp ztfh^WA1i~8rTg0LauG&iUBie*B5EB{>9mlQ-CeY|E&|WPeR<3mpz;b+0j6kS+iXZJ zCE=WP!#(FjL#hO+d0O8c0eJVspdEF>J{Cm#ayNp_yt!zlQyzYgKSCQDr*cdn;)^56 z+OyH;M~n(Gl(Zu{OzS)7hOylQS10YLwF+8I8JxOmST)=?)k9u(RA$S3qN9W3|+-WqolYlDqg9Zq0fEXDmOHpg30*f)#S ziGHL{>_ExUbEvuT1hyQz0L|SuPY+6ijkeO>CtE(e!mpJ#Jba&o`o9bwee`|_^-p#ly@}Af zc?nZAVr8$5VPn^H~OESjh)! zkK0YOw(`1c0y2Y^Oxmkt{p8%7Yz4fWtSmw=SC!>-0#t7LoI;l4Iz4~GAe})9n{&#L zQB;F+Esx0)L)-8Sobfi)QpuMaoG8}V)ZFcQM;O&6AC5hC7gry=h6YQJV6LDus=y|K zB#pge}@f_0^(2M>6#k*VH3RhUG}lswl7=SuL;-dF8y9Oine_ zWJ_+TvbdY5Hn$ZwsLlyTtPiGO4|KjB0&fKFra6j)3Dmp0q4jpdkr+j&ozP1SL+ePP zw9bK?5)BH?GveN~I#r2+|QEf7cOgjZMu|cuv(^p?YymyvJ-56*@gEWfd^6V$uaA3!urdl`$Xh{|AN%3vR@rohh2t)B`__T7Y@T~_&`y>+*{(6rbHKkcn|ijZoo zLVVJL_9Gog>~Dp6gthIiZbV0t2sILFRF6q_01?`i2yLw>yrd_B7Eb_8Vrj!>_&aO} zFNG01+=#$#-k|dV#D}ON9i`A43wW_AP;D+ng{cBMyABPkHLO~ykp{C`nz2r2LY2Xc z3Zn%T1}nD?l(R-IA^h@eZWPc)XM5tvaL14nOdvZDMY@+jf^rkv%qxg8o5hdq$vd?}xE(2(HN~I7e8p2L{kIK81@jpki23NVvpr{`S&ZzykEXgQ~bUxOe3)4n;(v85zPJ9e=E>i zQF@P4>5UM20~j3dR!K8TCc|11SSqnx-1~5K0 z44c)=3X#w=Qw{LuFk9`=>5V8UDOESnqvaT-z3vh81bA7rT%z=3!T4NRONA;)Zbqf_ z#A@;Jt-M)t^9mH$MCD~s)#c^nAd~xfIl0Kr&Osi_bNV)MGDQk{xwJ~o5ETk5$)b9* zQeqlcn1HjYY&xNi0n|D}sI>Z!-(W$d#mDmD#a3EijVp>+*BIdvQAs7zX-zj#jck)e zt;zs~KzYCEc=$Q~mJ!t0QYh92Q148r&&{iQUiO?G<<;Dy?ksDp414T*iReX4UH|1 zl3H5vay|0PG)N~TGIC3hT~MkL^%nCUmMrBtvb-wKZH62RD+95*kH$g%W;_C&Y$cbb zG#|+Il8}ALuoR)i_0efbkGynHS!4x2Zk=R8Vs0j454p(AG&VuLF20@(e(BuBtCfsiBDZa)2DBt^;5LBr6nb* zMtlUkd>&|VQ5i~#%TdOYRasSw+B%t2t7l=Qg*91ePpF9OPJ+uvh{+)10KrE1M52UE zL~WDF6cUM~8ptTm$!k`|<^>U;`ctzn;!*f$kG#!%e@_^#3$0XpjYuxguD0>}B9(~F z@-yeD;$~84S?Gi{ZddBiAM_Ehq89mCfrx6yYozk)pzLn-5q42MG|^Jnm+WvI3&3`e z_IkS=jSF5>y3$b_$%k{ugo0=>DpGZ@61oj8fw>082AMmoC-@9%ZCZ`iNYI;k4lD$r z6*UB44b@8pFLD_zaiPJ5LL;}dzXiS!@(JHEXA{c(T}Y3#VO_LI4Nl&iXu*1KBMMuF zP}JUwlD1ChdIzEJ8${heA2hUshVc=Y$46ls8-;yh0!Kdl5y)z;vdG~-GyVrd1~khc zWC_`qaen`u@wX4-pBMta6+h!M_(gaI$622glG|E5IHL_Dm+hs*Mn+LvO7DSW-5ITcJ5e2b5jx)Y3Sy8 zT3JlqK}BFg4J*#9tZb}bw@$5JE4{D(C-m|N5&@p{cFMZQdAwOgk%+Zjv0+OKOqB&y$Sb<#Y+AP$Q#* zpVb15yA{Sjt5SMfXwf!Pm2S%?@CZtQo(y!B0neq?T4WWJ6Npr-`K459Mr4)L(e~C- zg*KqZ!uzRX3e7{iknG!mc<)ZMjqXRs=q~u^BpR$y1-s0`dfN1Ieiq?YR4B$=q% zye`UZDaH@ z##o`vQ*8?P4)XYpQSC|l-ubj_TujTuW9bEWoRE85fsYC>yAan2z$+O=xR_mvv!!)7 z!h3a&PGfYm1wE&x(fr~Ogx@*?|3}XtO!e3F(Mwc%^xnBFI0|^YDETZPC4^l$C1Yik zwA*RXK*ggs^9R|Su(`Z&c>HRJNI-^+$nX$Gls9!e(WoMkY(lEBMQ!DA8CNIM%_P+8 z2}UoW=Z%KpiUwehx?xWeq8+rr-7eZt8!fLMu0|s|mpc#{qq-PxgrkW!mh|8zq~V;3 zFdTwgRHOjcA3~B4ZuL6R7&If;q=#)l3on6ZKIumD-F76-grV;&M5%uxsyYkc-Rnki zUnTTYc6cU2us2I@6xE(V3$ym&Y?1~&O!eHdFdFMkw8pYttqHX{8J5$42BQ;Ni=P(7 zjcTh46@+dP?XdKssWDceCS=j_7Q|vGWkpaNr3HyMp{lh5m2K^WULVY(Bed^BsHZy8 zb88$Mfn|CMuAMs&J9`!juYVZlzW6m9{@7=*|MgE{$BQ4q_LpD5%(Ks6{K;o9^YZK1 z@y=(l?~7l>-uFI_-EY2yb~%*F6W~cjX<AQj-{WaX&d$ZdR|v{{vuRiUW78iiD21!XnZ z!kZ=4Ie?zAdDK(UmFpcS)!7vQOLYzuQpGh$k_v;HYKzyf!H$izoLgwC%-%TeKKEhl zI&xl(hbySjB9ou1W2I~4wbuFDp!E@at`-HoI(tB^|C}YcB>9`tbBeJouZ-o!LFgH& z#;E#a_5^{ql|U@1mR0@C$S!F>PPq<6b#|0!JgBk8q4m&u+nZ^>Q&e_@U5y#r3TjlF zp4^myGn=z;ehXC}f9GQvg}6c$dzt6% z3eV?Np3lov$QO&MaIvfoCn!{A?IsL%(fRG~N9^((B2Vl_?Ab$Tdg&Ba6%uw*Knf_o z3#ssmSR+QJq#Ho6a^`>fC+&b8`8aNuXa1wZqIs%l| z$5dxhicg&vg0DeiLVbgS3fPM}trhjsR%O)@czRf!9yFL7(C{}GdpxKL`B5JB6Lu-& zM1v@6ZA4W^D{8yjp&#gld3XTk!G1W0hiP#~VH_HQZDAhnBZuKRcNYG~A4lN&4Y;me zgXiuY7;auh>4nqCKXn?FS8u`g>Kh2Y{Vsg3eHiY$PayW#1vDPnkD+6GF@A76cAPwb zcfRx))j1-Rdj!1y6MBz&fGfnYXmSsJaX^w>(Tud=jRe2z-5tgIY`tiZ+6N_oy> z1ul*CviMX9p;Yt`cw%qG_LUMY&7DJ--+36NHAZ9?Q(X}tb!Hz5s*EVtI8dQ+vdqV! zw@0Ybs`6sK@VGDSt^YM-EZ<7V^Eq^enN;8+eu9x03~uYPGZ^E-EB<5GIC^`$k$vvp)iBC`R}s;Ue_B zu@rpqMuZ8sSW5?M|27p(?LA1fc2N;frKMWo=WD!tkB71H9!VcX5XSf!GQF&c;B2C$ zjeFrvc;JdTVD=gKdIwx#2`3zW2O`M`LNYg(kPAj5gl&ZI3$Of5poFC{J?@0B$%3K7 zJ!n7G4$Th2^RN?^{j^t0T2wV|Lt%IWG&2<_7|DnAfD=W%tigvYsPR|9WUqr+R|~sd zOYjkfRC;#S$QG(RY3Gw!DdiU45L9(lti_9s0hHOoD07HywxiNq2ffpvhVSScPQJ#0 zTCW!-s?sW#6?Igpwarn~Hz!~s7|cDi>@4c8J%)-amr#EBI;w9!g@)%|hwh~}V0i9jSnuA0@4`7m4)4Xl zsr?u^vIDcn_uvy>{VXIs)c+gs3UimxrwDsO4_5u`U>=`ha#gf~}>s$Y-Tz47b7(?PcXX1aqVh_IMxk{si*W z8Z0KH$uUuhRBP>O3TG)RQ=`+5!peGOZL@i7ImOikUj@Hk#`sxT-n>d_R+$2*Hiu}l zHQ2yoNy{lwLvS{;VvyM+8)#!??4O$PLFFc~mFIFB@9k`!w|t73JVpl3&juN?@*B2R z&b@-&!;o1ok}+N4H=&o2u@WoLD^!YHu4$^dRYA{W7PZHFm(X*X?6BAK`qQc#v&v!K zQViRMLb%ox5PZ3?uc6{wmj?fa3`91hAxW^c5rVx0;PARlm|`q2cJS|g(yL489i!Si zPUxNB_SDuKoXW_>@yuKtW1M7#a)xj}#R~9vK@m=uR^cdPKh@@v(TJgl8|^(Yv@G{v zRY`d{ib`1UjZWu%!_uy%7c~J1uLj za)TKq1XxW6?aRnG(vpPg(m~`7PGfyjGtzndwP6o!t^rj}7F3T{jsL6nhSa=Ihsy(< z%}iw*fsZQJ!5T8y+J;DH7XmFUi1+p&IXnbE0T>t?hIe8NM_+saH$V4TIOgY|pPhka z_f9k{EkM6>H;j7@!hGN~%ts%G`TSj2uRaC)wfpcsejTCX$I*8D5c-ep!T9mrm^`ru zAN|^ARcFX=w)L%`ZUuevf9V*J;Vq9n`2_lRETDCM3e9s9Xr7rslGZ!i(*qH2}#TK1KM0yC&X1DQXlQyENsw!w$ z!4wwDN_nNI)lebIxJiO9mr6|r@Rn2(EWBxDaIXwMQSO9=TRPi{`MxTaU1@t3WtT@= zmsiM2oCUs=YN}3WQNXJp?4&ods@|Yd{g+lWAd=|7=+q9lLoKwGHbTjOt+}E@M^J5)PKT{By-g|%fnPS&UMW>&wOU^~ zEj#Z6r6)-feiM2bncG$de-mtvz$X<`c6OEmUb+nY&dFxwvtpSGsbG^$Evmg%nlKDT zGp{`b2JaUe&%32k2VGVLbeoD`*-!%a`f_;Il~Cp7!@D6DzV%rM()z~MrlXM%>{z!I zeT3i8+D(|G%A4c<(uS?rK``#wn2v*6vv4#m2S>ML;~2qrG>u!vX#((Eb^*@j7UDE3 zy|b+FPUV;3SV1}V^4!j{%#GI9W5{g8s+xudRMxQ|K3v?Ujs>%tmevx8(sH)IKy|0} zhvA}K4RrUx-A3y=!kb`toR)JC(XL(vz-UJgB5hs&B|?N=ps5{xg3r(Q`yK^ta6r@{ zZE%G9(Pl!FwMDqG39+Uom0iU~J7^71`T2bCc>`*8MVv}8LaQthCNKnmu|OCJY0vW$ zdQ=xt86+Eb!QbM9tDRQ4!vx2G4Gry7u-nT4jqQJ zvl~^cB&rDbGOFFISQAR4lF&2^r`r#o&jW|uLM0R=yklr???f!sh|ZoK{`N+sdb<(r z?Lu_07pLz(p?YGU_~@I6@H=A*3$Tq4hI6xU&rHKNzX1Q@9{6a{T!f$d_(gb5T|nUY zDWr}ZLeJ6t7(TiO6UTOA>cl?0@wLyXUfy44T6w7Vq;q41dkT8;{MmbV(Yd^U#`zf} zW~Y#xnL>1Qlx3h7?#2{6GB%GY&qh^icbQb1UL+d8&Rxrh^7@(0X4Shd&|6UePSk7k zgb^)VW;SG*(X=gF39lU0i6)P2ql&Al(ZI#?C({VUl2z8})MD+jpp+zI~2r9kjP~3V3-`QvyKA5+vWA}Sw^BZ3Cn<*W)VL0FFpgA+NG8mc%q z)u0u{0>e5Jp(|tfs`$5w-`~qhvWxB~iGpe))~4mE_Bol6BH)s9mqF#bNnj_lQmE8q z`rRffxs4KA2{oQC0bg!$xl(?rN1RGot!zl>rEgn-oeBjxUJpL@NQq|j*x^I2GU|w=<0ShXCA3`0Xe34$CGh060Y;cWNOcV$+0n=7{Xiim`MyA73l*ibkMMhGr^6wFE|^3#LMu&(gd*Gz z#tFGNlD;T|KqUAgRC|7z_@3~=7PYIH9-cUBGag?Q<6lyHCP)@fJ!Bokq^&BFdUNP~-Nhv{|8$7aqc|rJZ&$5=Dx? zwY{Yktu4)jU@MwB+Hn2uofRZelD+luchE65jySDvYA97`$ucWL?AcaXOPO|U8*A+927?0K*~hLSoM_=q zR;=JBDU+n#xtf5iYcLXcdejnd4i776!b4_UlvYb(TO%r|rs@rL-srs9^UKvbC8G37 zYcnzBr zGh)m|=A1Yhg=Oth~{N6-2+3~orr2vy!Fp*KORJGEg8 zW@&eqH*Ldi+T4A#yL&ceV#n5P*umephk)NttAA+oHby26^7+GC^Km?_2uHRSVfWSo ztdfk!78g&rMvHnDOj)nOM$j0k?zFUJ8ebU3ND3|j$w5$A2(Ff?Ii#rcLWH1?01FaW zkRkIfALc&K63|I&6C&~=)s2m1=Ku#AKIs1 zgXa2ucn+RI9p7t|*(pA^N~aZ#$B;-R(MI?s6LGY*HlwM%1y4M6UjayX^!^WjhPOWY zHWgqGnrMAnhK7+EVkI%fN@KnYtqX&M(-@k!Poib_EL!$0pkx0c2KFst;^2179NK~T zBYUuT>;OLTr7tL`tqgKr>9+U)dO}!kR-VPlrBol$* z45U<;2?&!jjEZ_Q>ZqpjN@~=Ab6+rl27`@issUMLl}Mvyt}r`MsI#MEW*6T4+8?5C z$8nsz_Zpu5i^Z&-#MscxhO7vX3eM4sM*GDjGiP%~;gaSDrOIH=}i zOkg=JZFZRkX$4hk++H57ZWWKcj0&)rFqeFJ8+n6o$q+j$ZFtf#kwyrvfbUT<27%rd z0&(SeJ|}jV3Q^ihMODif$HF4%z${VwnY64*aSQkeQ~{iXTK1J8(34qO0=*L2MHvPp zZKw4OIt&es(8bW5QQcb|kA|?YZ17y0sthn>*FwLk0+uxuaIUT-^vdC8cnCr7>Js=@ z7b3JKACcABh_A^+Gc9l1dV+6b2KuS&h8QCow_=PTgPA7?!WlldK(H-r$;2$7H_vmj zo!i}8GqIOY+{5R0Z_FeVv$20u4i0S2!`>}nWUcl$or3it$uQ5M!j z-!PK>BWM~LM;AY%eRKwGjOMXfG>^`qX>b~@)?R1`Q!Q^WOQf0VjfyBvXtLn=o4erR zF}M<~RCEj~KUq&hI!HXUy&m3>l6cfh!zArYW;k7Dc&Jc}+Dce0 z4YaVdR8|6x>cqubN+n3JJE&B6!6?y9R#{ceffC+i_2W}0o|?z1WFJ-y?MD5rw@`ZP zO&DH&7kNh?$HrtYa$ARyGqVRdM=zrE?h7cp`~gO~+{pw| zZ7pc-=5K6C;<0--RpL;EBViHarH_6Tjnm_B^>m;U=} zhS0k-!W(uRgS)0Mv3m})`<5_&Uf#j9)^=3F}qnIrn;#1oUk~g^M%)tHn_O99=TQ3 z3VKDf!cw8NP4B|t8_(kS6R%+W$YqQkdK?FDzKAD2_GSFuKl~7{f9ZGeg+KWl{K>!k z7~lPupX0G-K8De~r_nmK3l3V~bN4@tS3mOyICWV##*J6r z#=gg%z~YG;7~g-9*K;ow`*sZNKa1||N6^gAFvUC3J-Y`M9{<+D8l*D>c*QkTvNEiO z+e#{0ITz9^yD43U^lTyQvQz>FY0KNNnf8_-d^8kAsXf^jrI$%N%X7PhYFehF$&eoD zex>8rYqeug2@izSv!XUVVK%%6KiSsgz4(+#^+E9)X#NC6GuYpf~EF zHRxavrOz^8H4=hUwB`a0Ok1mASxexpsfJ@!HC(G|sP=eWS62{xr37CQf~)foUX_K| z>TO7^%|Oe;c0QT5Ci8U%XmMp0M;}#R-zGjrm=C9CVuIynmhck*F4O96SCNfnTHkp- zHp%BD=BNNyRn^v@vbGk%XdDKc6Go?(g*vW!Y6W68LM7Haf)JHigvuf(5dX&&f8)_eco*MAB!IpB_eRk_x-O7m=}E#K*hQKHiJ|$pH*b4`66+5JQW@7~MXO>0Ps!-@Aml{Zx7f zcVqGJUVQS4pOuePBwa}@Ab&Ar^N4@k$PAAY*RP;uW(?7>egucQ5a?~Cs%NDV4Z`82 z%5z&`_gG=3f_6|z&n?c;uJby$T}t6SYRj|P9MJ0@&a>UH0c&Yx*REcz^5021C{cQ% z{3OeYSfzEeE~}{aath1UI5tsvX*oqIsx>k`(n8B>L?eMWJh_CUXRcuPp|jX^=o|(n zcOud{gm}j&D-f1F0?<#8#9R7Uw*5*G))R1bvbHjnUIC$3X|SM(Ac=I3Qi1hh=lMGr zJ8%&l3&+qra~Mt2hp_+Z^LXX+zlZ1F{RW=>Q({p|1I+=Dj|?w>>J^d2Ne zmT>K*cktAS&|B281!QbQFr@oGx zZ+{Wbe(u|N>fNtn-?gWaq-vZX1Z9D%>`H@Dd%3i_g_T;Bw70lY5`2hd7x?LvO_h{c z(u*oRwW8=G`?Iw3tqd|=8N@Bq%Vg$7CRLtfu*o3!q%$ZzL)rpmEMtKJpR9RPR9vdy zCxgCa?5^b3Q`1pPN>N!^sqT%+sv6iRj%0U?#3MrEUpT)#7 zpdtK zkdP^C9jqwZ`Fp$hI|nz34Nj*LOvfZup3Lc;-nb1j+@B)ohBl|6pXzsb!zMznstV;5 zm8`Wrv=3gGoj$5P9~!J;a}%s}l2mg&2zOCoP?0r`Orv#Tjuv)Efi6lgK7yS9PT5sL zQlh1AR9V;*U)wS;hOV(0^i0m9V`3KFbIYtH521f~FAL2QYrbW4Za;v5J;%|wv=7du z9f<5ZM0GO;XR?QY9U#~UJE}%AVQX*dfrDXf63b01+|&k3(@M&CN1_=%!q6RWhKtY^ z)oK>IP9SK(JnGNhMS06O z8vImA$tY|xzAfZOV{;=Cw6ii~=i2?d3O159O4ObDgL3ab84uq3G<+xb!fql*QUwoIF={Bh^+M z3p*BR9sMiwFllK8cw%d%Cr_3El`(Y!ymf0=RGwH{mDXJ)wcJSCyOKe8gJeqHoUzgw zLBPqtT>;*ftO7a_A8NE_1wE@f42wI8k?EZnn%ark-N&)x@CCFE&g1CCySVfGn>ha1 zU33i2(&k1K^mHZ{>NS$pTZc*+sZiU1+~Nu}&>DLP$C1S&7+gGtq3x$x5g$P7I{_{9|`9nB* z{dL;mw+O>e;idP!h0px{U*ns9`5*YqAN~X0{r*4U+yC}+eB&Q~geN}vHJo|&qu6)# zDd@uOsCC6rP;Wy~odsnL7BrZoosyQ96~Wdl8JCv_omG@yJ|BD3Tgkd2(36l(p+}0ZxTIWdrDt3eoMcLFXwa&P%jI&bL<1U`tz@(+ zwUt^%_{HXi{f}LwOp^NITi*~n@ zk9AWO_ODOF0AD{sSdQ^;=?od8ixA)qu;N`+Sy_SFx;p-VdicXJ*4_c=EVKgLnmv-b zDG5s;24{>mv$$0!i=H+87ah`6-R(b@{|6x;~9wt;bU3{9$N9UMnf|0r6ACeTe- z_DnA*YuvYe4|;a&MaSHBw9W27iU3K@?Lyby6X-i}0p8vF(RBO_ytKPELeJkis8&L= z(>~i6_NHDH7TOyVEw8?%LxqvBGbP(#p!L)3@uD! zbZH9X+ov$Ki}sdEZ+7ppQhRfUb`g5-5_RuCgxG8Jbs>ADrTEhA{MJ^(3YE}ytEvf2sQ;e znRZ6-iPDp;C_RDRN(L2LT!L@o)=Xp&bUKThH+8X^qLp1#si0TKN=RpsG-i_3IEqMP z7Y?3$9FO090mm=hQ$4S@pZ^%1|LEuO^oQQX(etXRSExBu*aVrKtkbWI<{?7{1p+U6Uj5woW6^ zIZrUst?W334ua6%F$Z_k41(>;aHjZt`vMY!J20^0B&LsD#oX!Jxcd4RaQ~g};46Rk zL;Nw---}=SE~d`jLF@8y^zS{3mg)V-*ZGiHX+mzb38f7-x~~w7cAx6Rk*qEmIR$Xi ziIr5AhWbZK03{yn%aEbc zvMN*)s0D?^&>MISWyq1$j+6{DldML(e=4eK)!1H9gPS*QLKH)S`g z1f!z(9YzNn)fQNC8erZ+mAA1Dj!M}t6x&MZH8MH~yjDKmI!?8>xD&m*_Mvyr0kqHWL~?uqjk7z^ zec&WIkDf(h|54bf1RM7sgR^@~*^W@#5S;N27Ao4?RL2MF;2Pot91FgNAaqfM+Nlf; zRE4IVQ5d_2p-px|M^M*N{gqJdm3jlH2}V)v38TstL8T{-N^c4ko)pU63AIdQwMRN| z0;rX?DT2>t7t7{Q1+U&7M2*!CEkUWVyV2kcp_bsR=4V%jqNt!{EpKc@VMjZ(^YgIn z+zsu(D0Bp+(;ri#*8^dhO%jBxD{d`=U_fYb-ZgYdgtabJWt^5oWaQQIA(UwVAsB7EbW}f z;{NUU*k?aUt`TVuzej!72mc<)y%i03^yCpVwlyNw97iB330vK2=7rs1;Q27X!kXOf z62%(8;`SvZn^3dS)fNzTT`Rv!){!^Vr|#Z%F3Ku>AX<0Fj(`hq7IhUFa8HlDZIY6}H*ErowM0I#Hm^`Q^?DX~!oS{}sEFHnQ8_(nK{y)G+KJy(U zdKNIWcoGNBKZWs~=iy8Y6L^!b#RlmlhT(0V!tT@eapu;Cv2f%T`WH^a8XZBja~XP1 z7aHu%Fb6vLSU0RmmfxWz#0kc(LsxL=t*_zJKlpbXd+y^n#N+JRdl5~GC(z0`aN}i` zX~MO03Pu?iZI58ziHoWSS~?*zXeC#YkIDi@29@r9OIkJoCq1m$s*O~#%%rCgdRsQD zp(dNRY@r2~VJkVvDv(~+0)AEj|Caf<#cE%s>1E{=p@3j@QSg{74zzW4V{l|tB^J=J z0+aPkB(Xr9M#eZ+puRyvMOc6>{2s|xT~Sen&K|lkw;g($8CvPZCr7NjcN`iUY-I*$ zww6P?t_+sdyq>EFJE}bw!RJ~d+Xi^n)>G}(5PVd9gr9eH9(+`M{xxX`tl5On+6{=T z+lUw;CK09Di%{)_X?a5uvd`DWxS!zjDFU*Q`^_pgp>gd-tg5M|(yOgOO>M2JnN{+D za2O7ULnZ!{%qd>4AK_>cp=cB0R8{e|UL-ntk?QV8bKel!21n68JVD@1p<|3eut~HN zaw)=4qK%eTLR4Pw@@@ieKiZaeDd_dk)^_hYjOHB&5#7z$e+038M-W=rO)E=VOX&HU z`ruAjuLOcx*1YVi18Rda?l=)jx>Tg83FGd*B{(1;Cw-XxP%myk%je{1~ z=7ZTAgVE-J-pq?@cf#m)!%F36;cN6x0ihoiVZyO72~Be|k1@W|F~r*07l^>kbL@?T zaQ5;A0`Hgj>HE@kAkg_0{;wZW-U*H5Au>qT&UohAe+2*SJ19JP5;;6qC5O+W>c-P3 zz4J86pLha>E0>WvbQHZy+aIdE2`UvTy*)G7zC4Y6dk8)%zE6JPU4A5?rq;$#U-$5z z?5sdfl%T|sV~1$JlZaD^20~tyOh9H`SZ&$DM-%`6Y*9%>K~y>d&%m3)s36zW(u}3; z%kcVrYM6>72Q~8MkTgRw&asj=M+Vi#I!PTxrKc<|fhTiqCCPvQPo*rP+S?=`1f=U!uX<1P;t~9?`?9dvFsIIF=1ud#5KTkvky>}~%y64m_Tz>FT z96Wy?$1XjEr2`kyH@*vPeXNXWcY`wK%|i#{Z&J}YvWSCcZ{b_t`v?5apZyDgM>QSl z#>CFEnA~*^-BVQ5%@Z&Nx)ErdLaLwVnLxbu%qK8^=mxr{k7ID@92M~_8V7cv&f3V& zrQ=|^awd8(y89Fcs3L=+3Qym|^4$+(`Nk{Qefu@coPQFr8J6|&{b*h|iTShl5$s<^ zji(jmX1Wk7E679#Dro6N;pOD5*i%Ukx_)yGO3Lf0+HzH|cg8m9Y@mwUx>XG>76mOs zSJrRZtU5im$t(($_@Gz;FSn?Iz$;X}FN^%k;$O02Vwwz**|d==F(2_%LQN6X+bq!VwE{g0p=Z;v>{J=hkX;GQ=5m;6 zXYH#5eDx|;tS-SvyX#(4Pw3SUdX?OkF!C6ggkhSRv+E_;0)$zRw$;zy?RyyVxP+|D z;a#(lHhCk0j4)v*V2nPDRrPgMtc@#C#UEiX=v4yH#>N!`pF&0?;JsG-y|BE zdr%i?BJ7eVBm64DEvSmLqB7Ko3aZ-bPz0KI9Q9EuzhD3rJ|AiVVbq1Py^#@tVL$G;*aJWNiyqJw(HF(1?P&Hb75!Lf3^OfDvC|r>+E?&J%%t-bVfs6o$ z8thjO7yfz3AlaAI+aLViAHaU=D$)+^L*|}+C_jA>4fmge_W75gf9@HWE*;ZC#+FXHI6XE1U23I+~cMc>ieXxn!c zNkX&#;AKpoy@#HCk74N0H4GlOf)n@O#Qdo{2y{!{ydIc?EvPoQRjR10yi!Oa&`hJTD^WHR;`0)^*Z?0tVfWKON1D!>T4@dQ%x&D z#iy4v??xSxi6~m=&0P+&THqxS4Fhy!{~$UChtV}Og6@%V^o$Ak zR^TTPY#$z{3O6krjVSNM(gwfI_GzwV{tc{7xy5wa~~S_ z9fo^q9(7cembL-7+6G}IN0qOqNP?i}i<7;CBe$_O!8n5Qz@h-&^_6HXFxw7gVg zweb{+gAo+dl2$je@FiRLo)%OF6Q~TwQB4I`MN3;p0LlPvBVlO^CSmo*VGFIag-Y1N z2{>4L+Ja%;2qA(mg5UkaKV0FC8b9(&{QT#V*Z5cX@vkHuJi$kmCt^?1NnQQ=SE0FZ z0tKfJq2}yqG(2_*`X}zg_WTR5KYbtOvnSwRm_ys_l$tL)wv%dac}ziXFIC?1_IcH| z_a>nyczyq2&+rGKHY?a$F}J_M-b4Elic^_}{i=7?=Hd-NYb#l3Oja%7W=1R#R|{{; zGM+{$;19v~XsC*;XDdB}k`79GXH{p#hRtelvZQ$bFZPzu+q#(wg*Gi?TdqpbDbOpd z&>+9O0VZbr&ocBtF6G61O7fDuX3;Ryv<}+_#=h4e(>7Pgc z~lcm$u1R@X=P`4sr77-a-q5#g85kc0&pgxwYeJC{K2 zVK@mo$HTA_c=ETC&~vlAc?mv&pTw%#%2L$VRzllQ1B0%C{#%cr&xxVlb|k_cv^PhQ zi2Bjg7{~GB$I#Q)i_YFYboTe7qkjOM1B0{-LkfPs4nnSf2+fqr&AkK4{z|k^^+_~y zKSp3i84)VRKxaQZ9kj3AL$LP_!_qqhJ^yZK?0~(UH73=bEhRm+-Bft3u(tL>Pvxgg zbU{m{*U&x!9}BW(cnQY&gJ_sJ02d2kX2&dQ7mveDyW>B94~9dRP_^q6j8uJPOl#K$tR2O^Mray)kuTtDY)5sf9rZlU z`cxaLQO!~$eEK?0TzeXKUiluLY9p8;`_birAN(aU{#0j-f$0U-ytn7!DsQymF&oZZV$7wx-s4$0MPYqy< z_H}sAS%iA$p$)X7#*rXAsnn?Uu0Q{Av`-(!zALX{^vFE~hY!KqzXL9+xxLq3z}>gL zhEM(O-{S3W{S~H<-Gn|$XVW->Dq8{#t|Ut8?T{=mo43*47M8E{;!^3Q7geIx>P7p= zEL`z6lr-2dvHcKUdHeGin%xVnBMOr@g^{@fXr|DXUPkW{fh_$mxPx>@9)k;nqi26CfC+2UliY$n@3f`g#JgK@C>Uq{>TAOH*xp zjm=F6(#^=CK?Z5twK>$#6{~^3q`IujD~CS445rPcRD7kdtSg6gZ6(21LqLhjYk;3t zS3Qbt>e8OxioK$%d4#MwI*a<%y!%E=U2s{Y~p(h~#bP4q8tIA-| z)WT-c!(!0FZ8O13h1b)Xz{2Dpn&Y&#Q6EMIyK!LuZgh0Cp`*JC?Gn9Ie2=29kJ0}@ zwD$BN+1^Df+=G^$el&LVA<^E=g4lx?Biz*kZ)-a|%`!!;9qwisb0CPVb=wNDa8UB#~*z6`$P*t^KxyU7vSSZj~`W8 zutnvGtu?WLOJ-!f$-~1_w$Ayeh4Y0H(^O%9DfzDmezNtt2Bg@%wyK;}KS10jhLa1YX+yqAV{x zBl8#)8#}ij!wZL~(vD+t_bCh!c=LxYVus~pC&72_-YfX-KmQmX`rLOgb?`AvvvNFn z`z3S|n9ZZx5$&Er)8IU2_MAfB^d2?&zf9{;$wf`!RzwJt?xmv$4=y1xxX5xkh6-B* zhEN-<(JsUZ!f5X@`j^k6ef}JR1N#u@+m2xW5{}a%fAs5riWlGeJ^cPZ{v69^?!gov zge5TuJEPVXS2^_xtBlAksY78oL8G@L+SZThJx6io#*?^o|0T4tj5fI9h;>gQ-ZKrI zhb~6vL+9uYTKhTdI`TLs`MYa$US5YvY-7ceFP$2hS!%Gc?8{bq-X)}MUy=lSJdzQ8 zB}uSsorIyN_*#t?)eRcet||kYC9k2h-O6>8@u#Bp%+l1sN>HtJYyfURk-s5G`94X` zt0}LACc6~+j1uTJ7elw9nD8rC1D97RlI5_$dGqgaSkX<0?a44V)$e{4TqimEf~m5b-$B6!v1r%qaTW z;^=KlVEgI@?JLpU!RS=c+SNttyAqA<9S9{;2&bATw_8<4l|Vw$ zP_-b)2(`(a+!lDbA877S1DxGdOO9k4oV2hGDlr*+Y)kUw(NfC3gRnF5HDf4F_v=GZkOx_+^-PpM-AVFl>9zAV^D7H?<3;@lIOW zWzyCHqn8q_F)tc%&iPNdHfFY51)bg>OFWbQ!ySn58d&zC|TTv zwDv(1_fMmWH&{LaT+&0k-8qPg)*e*0bfBcA8HH_)$nQ*{peu!v_9SYWV)(}Q{zO6M z*YKgz`{|DrlGF$~dF=gPDDXY~)<@yxxwi~c4G#_R_fEn$JCDHpB4TqgICc^P+MLH@-{&P7Pv)R(G1#wr^ny{YQ4;*{^(BrJfhttiD)TYdL_G!w?t>@TB8-A631N z_SWWi!RRzWFH`s|^;CLwQ~^fx4fJ4fd6A0Fpj6%?g=c21t}&QVT2-sEaBSF``EV*( zJ{3v@3d(8~{3HdE%p8z8ozf001Ag{Oz8kvM&Z-&tkq~c7dxwvJtU$Pc+ zsVrac(1zI@E;!vj1j8}a;TECdZtE5YGKFYMKROA7`Tb`xx#u*-cb~!TGqR~J zo5bv~>v-j}zlR4@dw=pTzr=wnFTl|}4hzBOObx>n=zu5Dhj250&(Iu(r*^PHx`@3e zE@5b4AKF+c4=x^L9HMv_qTQau;*rbf7~ex+CeTUZEh-#qwi0Sv;aHajJHxR){b8iR z$#88*SK;26fmM<>&dY;|$jW678-gsrE#Ux~f*!Qd)*jqGgNgnQ?3kaxSJ_upoMgAZf{3ZTbs(3;^9ei2Sc!doA%Zn3Bv55Ri(-k=;^Huw7448sj*|%GE!8nD+YqzU`d*}O?nZj4rGp}j3rx1Po9A^THvjOWDc#HSr(HwIF)6!xNpfkYkbi+&QCCe5xN##n*76`?VYGEbZJ%m)}5JFVxjRUjTeflQ$QSI+O z{{*&k|In4EapCEY;J1G8ulS>X`6(_w_ZCJET)^#*d=`5y-A4~W(lxse9g{mS&T_MO z@H7nWsG9gM$wup}A#_s>j_x~)(@(vD$6op*8mIT6bLlW512d>6^t^|bPST_s~@Hy)n?ZeT&Y1cQf;2N_B@VXeg@&TaTM1(u_>zp zMO2Dqvcx4Jlh1QoKy@Y=RRnrv<>jcYt%Y8%r?T@X*a_%lT3MW~L@n^e`&hu|mre~r z&q#$`r)4E1iw;SG*PMJ_#|-51y2_Naef#z+U>c++pU@MlYm^0|>{4F1uVsZ*QwMEX zH8cg4(C1dcltu7n)WWj07UoSgurREfSN4sYYoOc2if|J_xv2z(&3Q0y$yQ<6ngeTE zE^O&}urh3{U@hsnFssOcC4}U?U zG1k?B!#n4(fBPKzI$O|~jG~jUYi(;n3!}BYnNqd|EmVJvElp}JX;6A71AaLDemGbd zZC<|`&LPnAMhUxz;gE&6Su=Yg3An>?)%zJF_+0)dydf5taEdox3Qhvi&TtTV4#Llw zY^C(4<&|DsT1qX$NZ@H16;z2;R9SV&Zq@cy)7sC!2cV&HGjxxmPGH1>SRL;{O}rab ze0?P?b`9-leN!v+jXiwd5S1Pa?c8w~&fSOM)+>m-{yD@y^jQ?{x`5g}k0Ey9DHwL1 zLFvpP7*1YC_=PuMd+J44?>z_Hg-sF<_kSiYk+(^Yhp57z@{alC&++qL{ebBFH!}&XmwbiMd`^b41u0TZ$wp{MrA6NG)|)KvI;6x@1qPqDXP?> ztVX8>!^(6i>DiT#8%EZ^Q0;y!tT{zhsxjUhZB^q#^;W9Bng-R-UQsPmC+n&5tO|N= zv0%I*n_Ku=R<6=km246hGKd%*LF>RYBAsJ6b>n$F_T(El|KKAy^TexII(-B0{lQ=1 z`~Ut+yz~2iiJ|@HaQ2xuaOtIwWAEb+FtGDDI;MA{lj`csji=DuHvx^!uQHzK9Whvg z?dV@Pg0X!U@%T&c;`)a_kM^ZwNKWiRxPJzvCNGL~c9a=hDAl`BYv;W)xfgyaJPCVp z2#q7V@yy#_!-F@!jL-eS->d0uy-Rc^LAsi74@S11P;2P649vq#tL#toAeic-IOsuR z_cX$-BWR?$>z&@OI$HeAqu77;4o+Qt2CaQi zlIO6A?(NFsSJ2zttJI^E5sgu1X;@Hi(!nH4f(rO#+#gk+p4U}R+iobTgE7Ajrrdg1 za~srPW(iYHJ@h%X(B;%Xn_B}-UKJV$?wZ0r8;MjA&Uo8f*Dk zb^Oe_T9_K@VbN+})-+(1?;-Fal%i1jXf-mYYBgtBRxGja{@j)z24x=pzgU!sg@a z2|TAi3@7)4p*RABp3h4lO0t7sOs#9{kk=57t_W3PlwhQ_brEtFg3pxdggHq7(w0_J zsnw=rI7pclA+zm8l{Rne;1Bd-h}3bS73PI zB^YnsLGk!J>iZ|5?-+!ky$|(Wy{PW#Lq%UdVc3nY{`sG)*%s=bfJ{M;+ehG&oKgZl zd0fD!LSFaq_5AbwpAu8*ad{p0f3?ysDCypR@OOVhYrBm8nQ2TN*oWrDDRi*%>YSUz z?dP82tEA;nx=mKv59K3&M3O!N-}2twDwB$dR<^-d13j&+!J?tHHDP>m6x}`Dko9XH z_1X&bq`|t5mPdM5W%fW;{>qA6a>H%TEJ8+J1qv%xpeJL^)D)?Wo2l?*=0JuTn7kz| z3z>Y6v|UM}fFQ4*Gmt>NVI=`+ISaGQ{j8~H0p|C)JhCuVP_;qDd2`BE)?!JrBI_BN zphcGiw|&Z<_Rk;0sar4OtKa)aeBlrO27mNVKf)hV;r-6v{|64UAj+iP~(iC%;-ce)oo6d9>p3v+Q+t|d3YIt_6e%Mam4$Vaqj*{@Zvk)#Eb8K3t#>I zzhYwlB{cXtP-#m->tp2}?SUuRuX6pRsOU!K4kFnxfg!5Ork)v$EgZ(o?$f-kZ^7(u zMKx7rrQU`X!CcmSpsRd8oe+qLmb1=(zO;LN| z2kTmK#a+mvT{^W4?Qe!Oxrn-yF@<`Q0a}xu6}BE)Grz}b!YV)M9&)p`WC8YBt%RNj zO#v^Od@e++2K1)FIJ$EI$M$YVXEOm83#p#mcsz_K_d}6@nn&v8dt81G>|Qsmh}te8YqOb({d65=a;%zE*k15^`eX<<}LeCg!Q|Y_43{9jJI&LLoFtd&@G!lBo zL?86DK04_QrWF-9nL37{7yGdHH2R);9q!xD!%b^zefA@;J@-*KpZW+47w(~A@i+=5 z_M+y<<8VCr8f>q>joKGph4#6ZP<7#P6iqIoj%v@)JpxnbAhcZrRG9;)>X9*pJ^0FB z{2$O$6K+@jsbMTD5B@sj`G;+Pig@ymo1gyhhuq3*eu1C8{}Wo%U*daz`HTm>Lj{_%XLIi(ojoGCW0qC*YG|DzrS6v@H3>WoltW4qsrjl@qJsns$h*v)S4gC54{DoRo?(BmP;V{+T^0~Vh*n1W? zKJq!7AvktW+RAzCx$q1gy!}ml z>|1}0+i!ds9aMkXP%BlQQ?0aCqOqtbs52wfK7^*B1qHoO=L|w!bC^AH6>og`k8$Ct zH}R+c_ABgt>?zfzS8NEQ#6Sy86}Mj&8b5j&J)$C;hgqQ=N9W)I&R%&6dyid4TmKwm z7Ft^bdF52n`IS`HrL1tMzz8BqK_$sWWdLzybpQL4Q;N^)rusKX)|^U|l$NXH1d{Pu;3rU(dAyySJ$U)WSFmT-9#~iz z3Q+A7Qlj*>rE<%r1tDaHS zJHqV%zWRf|P^sHLkXgUx7~%8lF>@oKoFnnR(clmW>TtWTBc| ztY>V^DyG$y#dTJOvZx91n^#uQPp37Kj34RS@*pdERW(@9*f9h*uUVbJp*k^SWxg5$ zPg2br&AcgCDLPnI#Lfl?Jc%ePR$rJFt-VVnrVX$Hbn=>ajxS-?sjK+*5B>#z`M-XJ zfBYF#qJW;|qlDC9?4=#N_WFA``~1hS``R;Dx^M@h2hU>Sz!@BR><+3-Zdh2ZYP7WT zEO%mW9kid(o&}YZ;MBdhaR2S!#?0|sFeL^N?VE)u*bEJ=aE---a=jh-wX|~qDyfkb z=miPAK*tO^XAk3JU;jQ%-Te@L@9%zy8?Ss?&FRh81Za_?RFli-p{m=?3ij}YC$K<3 zpP&*QnAkzEkHg^Lap_!C(Nx(4tK<;OEv|yBhPEvyAKS8X7};t;|H9%z1-vR++REx` z)N82T?RHjX5e2yZ{(kiJ^`W=7_k*oGo{~kp=mbqvZFQ1my|`4ZKqoo)atXd7T1%NC zCz-KF$0qUmhdzSE`6bn3D{4>5iOx*mJyd#+prBu#U@xMQFtoKB-qub;I=kUyp>-2-exBSQ0T<+Z zCB3>N-E>nydOaQlX_X@cW7NsQZgIiKaNDRZEe_tKZrJ&_iGQnwja)%BXjR&V)L`LI z3bt4a+_c1QT4R~V>uT+TlhAYVSX_jYr)L=Mz7g0N{<$TzoID5Hj(u?KI|kp;3$W}w z3CrFyh+MskHY&TumtKeY`aSrceiiW--hkuEJ(Mi%Mak4Ws>UZ#H#~}l!6DSk+P3|} zs2UjK{t&+M{{?#T=c5hqf+Tnh_)TF5I}PzC%9mr_w&CzUO@`E{wtD zXh1w_Lrb%tD!`9Gz=he_88p)BNQy3%H;=HBaa&RVHB?`chFmg!$XXWbH)UYMW=VY{ zi7N$o6}%a%kxknxOF2r~qZQ!EIt_fC3{1`{_@EP`scS@KQmJY%E9lkMYt=k)Q4VU| zWgjb0LQj<4BiPC6d^Wm+a8o<2-!Ocsc6d{rXd9Wwp^LZisjvSb{*e;<+yCufv2^Mh znx}SSic0CJPkkNFz4vY0_~_@caN#a?UcQg9L+5bd@;$_QCltK2HXkI>fee&3N#aj; zia^?qmdS(INwv4{@=NI3eh$s!`!GSf8SNZ{&J~5$5mL~U?`dwcWM&)|)({0ZLv#-E{k;g}k>;%pkBy*_~RcVEG!dmlm{;TLXYrQ>ge)tf+B ztw|+Gl_`6Y9$(Vro9#a2P-*An6{%@!c@HxrXXoUq)KM~Myqbz!Le{hh1Vd<{DCr*< z#PG-{23TH)hK3aM+NtuSGebH!qzz7{tX5JXmJ}ADn4g=Ok;%`^qq;9a2De32fOAXR z@$pZ*g8`Ovo6N>w*^p}{c2h^_7|0F-c_AUw)s)SZ#|cGAKr2vt6tcLKgpt-%ranrT z`Fi;*NmMGsq|`$4+*&1npR_RxK=~fEFC8b6_<&*N_e!jCIgAMU?4)V~)s+pSeQofV z>foWB@tJFI|LSSnzI*~raW_4^mp(rVoxYZZU9#f}!TCW9k*+#^iI!iiv61cso{hDK zohQtZOuyXGFaBvC+H7Xn?~Fb8F5LI7UrNp=%xw847n(@*Mn;@J(8F55L-`t}UH9@vbaE@B$UVF%JYL=$w8fGU+Ai&a z@g{g1Tj6W&fRE7iwfDl`IiPX{278AP9vDJ=bPThnPht7;WrU_@;GSTuJii<6sa^0c zABJ7#24A{@=&gHjUc8Q`CtgA$t&)}Mp?Ycuswd}AJ3;7;QrQg&@CH!RKZuIKG1T@G zdVlrb6sQzq9tmhVsg$3LKV*Ag0M zC(*Tc7yL84nRai7pSCtRKaIAzNkj)&X^m3ljdvr`*MeY2iYh3Ki#KoJr~G*NUiJT9 z{sc>N!w9&wXm9plu#dK1w4Yy&+(N26f=?RMH*d|r`VE_u(vyku0zDbTEb2tkD20+8Xp!U*_8=tXdv$}J zsze8^WN~2`lWXgh<_^D1lMAaQNZo{=)f=QD3(+~WAi+w^#|kdhJBE(21=Shx+B;vt zKm8bZ^Gm;jvAw6UNbp^H@nd-JkN*y@e)$h^`q?*e^!{r&^2E#7f9)wOp1cfwpz#Cf znQ3q9sM^fFI1Ij&%7olJz88bLE}(7p1iI%>A>6%)E}01w@1fI)z(_zEJP9?Rxxx}a z^Vkl)W)8L_;l=P%@rla&2rcflSKh@p{@cG}`)rBgf zB)kZsN@qob*{M=+$x^L4lS~t{s62PFNWTmvk(BVV`dnuAN)Ek(qHbbC{M>W`{&s+a-nv&^|MYOV2(*dqWE`HG(89Vsvr_iFrbA zViC^Cc{t~Gz_t4@baQ**+;IvH*3PEUU9c03`mq`4#>Szi71Rw4Ktt%&^0UiF2tC%O zU;Vqk{g;dD-Q|mB;;X5xc67O9Ujfr{mCzXp}_h5fB$E6Y@b8Z;xvzM1|BNC z_`yAJEKMV@n^tOZ0^x}PB&G@WiFUXKQ}FjU!$;`(nv*;iNj!ez27dGt(tvgszxoN5 zXZw)|8PU_`$6$Ai@C##+K<(-5Qj5OG)FJ_%S_FnxLvpMY)7oayPRY$B1B_+7od7R~ z%1*X2pLSbzu^N&hsg6|Mw}(nk8o?#kn`BHjIs6DF+R@lKte_{^R%&@8t5wxl#_TKu zc2;`+ptRw!B9k$C@kZDQL1`lnHg&8lKhRC3*F>A!Gm0eD+Kp#l#}~fyJ^c6o_!XXh z^Rqa7+|@`@Bbyf`M3WIci;XJ4%~i8DZSlSpTg-UUqkEEPE^}NsI}5TxWlwa zPBna{!A|hgIe1%#R7RA>@%^kYx1(|N033-?s^~db{p|#B6P-^KCSMa8TnUsKgDhur zi1seR5+8yw(hqBD1Vg*ey`ZQY)&e7 zihv%fvE}W%uxIZ9?AW;nQ`55q-Y}9WDrZ045>=E`G}75oT`kLOmeAVL&eAd$$}*nm z8LZ$ku{DeED=1JMm&Z<@#k~j5Ak5D<=}ok&gcHSyC^2akk zk9>J(Q`HJID{H38lAL%}sw`7WJAA$U ztkq=hDxv515_E33+;+8EzRPW)&5~5^2KedcBYGX0v<>Lf)u6Ag1mnhPjPbbIG!^Kg zLY*QYC%iVa@;%K|h)q<8QBjFu+U5jrl2lyHGzg{=2sWk=ZfQeIW{Pz7p^3-QJk*C4 zs-c+!Gr0fyJ&Y_(plfm%U811o7gWO2*1271UZ6#poJDA9H|@XYLmcWMvZqub#c zU4(UX8s@P{n5h_z!y_;ZjY2y-j=J$_=qIM|jlcgpHFQM%Bf%aDIW=_d5%>glva5FF zais`X+VW&qRNx=~^WUL!*F1dV14!Ki9~hS4%KgP!SKxc}NG@wGqxYkc<~{}V^9J&QBVi#h;z%P^WocEQ(1 z%i287@7sxbYXapOFDvm@)R}092|Zc6Hm{cFF5F4WyZ~!_5QY$cW3(4>+T`E<%YP^I zK8$yN@2^y*=D|JZ5aR0=j^Dt-kt+&(_GmZnEvoMEdDQWDODEvy>;e+3yz2=VSxo)W z)G}GPzp$8UkHE_>Ch%&_uu^qKn|hFJ=|)%YFcy|~PDu&8kti6IeWXzQrx>rh!;soEY(O65BxjlC>hB#R(P zZa`TOek0Pewjet%gHX@nvE}m^O9|(4Mj5o^L0w%b>~mDBaI? z%2X#6fY$}1%K^L3N4pV*pC?dOH}f~Q(c-pI>G6gm0QKEe41Kh=U7c`IbvbB%_@pis% zl!{>3??Z3MhlHOtIp~Ei5mTTGQrQF(2_)Lu)zr8+)n*?HKwnQEx(C~E@XQkKy?P!O z?jFL_@)Y_fCosG;kHLjS4Dsgap?z&%T0(4o3eovl#1gr53A@25=y}sPMrQHNAN=jF+tLU!+5aK`5|}BfDP_m_4FG;XJ3c0_|K7j; z3$5!2f^)QSgtKRH3Vy0R=k_IN_Z@_mHE#8Zqo_W52$ehcqJGD2)XYvmGd)a;-HnEx zE?$QYG&CmA5b?tl_TcEbBmCqaVehU=IgYE@?A zT8#x|)mkby+Ed<`W-1bs-NSO{hSS4JkqXajW%=?1VfRKbFh0j}mO@0DOX6*;yr{hB zA{uFpn|jC5JGmXrgR|Iq@+z)9_ZD9H%(rm)#ZTej&DZduul*T*_n+R!m;d75@u}ba zYux!+R@rS z4re%pvU&rGs~S*TS%(~2S;@YVS6Yh(i&q`DjNMChPpF>pNxGIpC(h%<*-O~7{}}oQ z$JI)MF1Mcw%L*-lRae(QODn)DM{zOLQ(+PEXk{gZl}tU80p@ErZp6ln43yQ@BFTH| z+@;HCYwv=S*Uaz`deVz4&=YqkLP>0?bS%{ABt4ikeMrjtO>_fkgthcAm(!-#(S}zO zh7G*_e6LLlvqJ-mQ-j9#DB?{4*j;*-!+J!*4kTmJoTcS=RUzOLuvYMQlp`E5qq*6Q zRLTLLPlHvW&RS?Kubw}QJKI^#dfr| zrC8JJU~|!@JL#)El%4@MtUfmZMvEE&YL?f(iZP0YIz|h$W zXLFO9vEgyif;w232~I0N)5y=1w9_^!yMSJgl+J)wjSiiZ+TD5$n(HdiW2nXmp*PS_ zh9ONYb~+t6PTRZRYQnfqk0I77Jv=6vQWuVf5K6F?OQzI1vyJVoXyr{GZ)-)*;0W4V zdoe;awzy{+Z@l*$e*bU3fUD1+!Q_qwj4$uR5TPf-KgMVGVshyadKQ<^wlIqpN!dO( zizYtTG{+j3wQhj$^AdW_iCI_~X8vuY^3x4X5_(f`FuwCQ|A_x2Uc{bCo19D!`*+ez z9uuIcp?3^XbMlxx|GybBa9HAx{_&p?o0&v_3f(z704FW1gTS*>k=l0?Zih~w`ut^7 zT)K$L#~wrFsmD-#;3TSe+%>f7^^=pR?C(cyZ!fA_notuDp-eI&hn?7eWCyk{jndkN zG1Qf$(u-j)&qFL4Ru)q$Lk3jiu~?6l#_gP(Y^+}|%Q>!Ai~mRxf%WS*s5DYCEJY;< zWhgr<;?bLaIMcwkyMggTSJ1!XEc%vCVw3<(_RkZ1 zjR>|5Bh)bgf7>`LVd)+2f;&nG*~4@yji}bU3B3rkjtIrVI1Zn`g&LC=vV5b}8%J}` z1e*E?>qI}yRDzZ;VLdQ|#S@R?D}VYAs#E3luYDhHe)F$!?1?wgwRi^Mo+V80e+-R0 z23M>T7JrKHlpf?}s?G$Dp$)#qF4USmsJD1fD(nAN)+4{79#wjWdc8Z*LqPYy5~RCn z9mLG?0h~C04JXcB!Tj<*I=F5sXdhjijW$`Y7EiCPu0@5+buOzQ^h#A5;iFCwY0Hyk zD7P}!Nrn~Pn}ZKCF-f8V8CD|e(m1FZ9}QiRJ^-uBuExz36Kvb4(z1)PQ9*TPbsKqq z2GQQtjJ|fJ^Zw^N!r^c%HU?!4!ybt z+FBikl+C5$)6`&@igPbd=&VkI8J^rBhaH`Sa8rj(yPME9 z(27p}mhRCx3@_}&sk;~Ph3|a90C;>VbqTgLq9fxqfbAJ zJ8!*>+i$;x+n;(1H{X2=S3mg?+bpo#C?WmhyM){$WC_eibDz08Z)wQdr zyL1C}N6$h>wWpn#Ma9?zN(QL-hDK1+*@@bwBx-p5%1w2&lv?z3%X#t>coB>ZbYS}; z6{>?3oRzhd zvx~>C;LG3p2YmGF-^ZOdzlOKI`B(V-cmECd-})`pSrYG^g)h|$SG0|)tpnaf2bELSPcqjosszCiM0mJ&)zR$Iv%2 zgFvJSI-?zR4F)wPvZ|Ukv$9%^$tx`_|Dc^v)SX%&TrEGj73()|p}kF~DtDrroRGoc zYJC*RtYW77Q~M^>A)@0wTvSnlEjineQ(j0&Ygl1;k?KgIy{`qG{cY&zX+^3z27kzn zL}LW~!(B);2Vu0;!W}Rm+3KNa@xbM;hs#?>73ccDn>x?vwy!hIJ5FX2cgvQoLW#Y1 z5(Ehl9Rx^#=#{7>*n96)5-EzRR`1Pfwk3Bc4FzEKI#H=rNt=+J zUI+YqzPwt-3!rAilB0;0R7P@@jF?LaeH3rAg-e%4+1Y)85|ZQl$MjdC~-BR$(^s#bOCXuZSMMf~c#e5NgZvpf8}6A!x1_x^An-TRi%v*#cN_8vj&+zi@gCeb=Mhxp(W8v7;?>l{I-wIAWuK15o3 z;g7e%ODh{_>3}5?LrpXaEd@?Vp{s~D(B=kE7!UF`gi@bK9uP)ZgAb(ssk+ypeQdU-wxqTNh`M7Lg1f@b0V+o`*wjeDM$3_zTW}^Y=S|u_>@oq&i zY!o|>!wA3Agx-!uI7#+m#Xg7REpntud-K@D@uinudWqupcX;fv)jGC9@Kzgb4@FLh z-OohQP&Tm!|e!ljN;U_kFoR2+gLvRF1DX} zA1fC=!t{|hFtBnOom&oKnu0j6{TO<;9hMdocD{N8gDYn+`N|b+KlL73CifySwt`U4 zJT%TGLO~1iNNK{BD(ZaF>TIq|%RX(C=I;s=7K%_Ek_$F@s+h4A0`xpUCiBa*o0$p{Zs zqQF!gf0sCe>p&sxbGg2rLaXE$(_$09%d`R+@+(AwTQiE)78I(iD6bK9ryF4lw!#x> zhqEDp8k-+F5_6Th1_i|xQvHzB^lWJb_>tgk5G5jCS|--tP1Y4L%ACz{BlomY^Dp93_-)V{eag)8(PIt`vIyEGtVwWkoWK`h2<) zal(YKBnZz~1f!`K24ewCW|E%CWkmnXAYAbnDn%VuzYAKQ9ohyrGy*ZBVMduK?2Qem zGLzCOC{h$Gg|-^O#vru(qsw)qJ259jqLPceacT{%u8zdVrqD^t{Dm@+-IiikPV|>4 zkk5`?SW-!4W+ccX;pNe@i|h-ev}~~=!I4Vdf4h}V7Js!?agTKiQw9e)42W1 z*Rk)?J~S`w#?0k+aPHgB@X^2h9*6FJh`!?|u=4(=*ms?l=j<8uuPmWuVJkX!>_N-o zE;P>XfS=^&XzPWqtp~xDPWYlt@X;m*D0;p~1Wr+)n*vrF4#LQ$2?wAeF>Aw7R0c@S zK`)9UKIDbm$fD@wMg7QNFiDAru(2_Wb)>nCiAJQbWyGRfJhc!#TBb@0-s-V42C;f^zBT}h(=XXvoyEh!6M!$?i53Q>I&WOcjC}Cb zcX0ZxJ6Jk?6$?kMV*1eQSfcQ?FC0SO(osz8Ig81?XVJ5;AH7=+U~v0!3f((6@!prX zdjC&&`{AE4b>IrdcArPzwxdXlufP)TM{T4V#d?Rd_b!$8HH-E(wMd1W3KKGAW>S?O zvYIdqo-P>Nop3je(}KpKwj^M5^pf^E;BT76!0ZuB?l=u=bP%zj?U*2aUZw56_W7R> z9bUoxAO9UkuY8C)KLcMrNvv2dZJ&{8Y9Xi4>Fhy{O*fMAC|I<&n@G;jtWU$bw0z{0 zt0CiSiZ%68wwE%a4+aW$4Q;o&&MPHP$mV;*zDO}gnp;@L>yp8i&cKu+iJhp7ww8pq zhIaN@(w(6CCnWr*@baKupjwxa#bB7@bC5KRq-iLJFUA5WZYK8!Qw=g{l}DeJSiNHR+F{D%%VL zh0N(v(y|sorAUQNoe7gMpQM+CD*mjf&Y)Fh0CiQuYA=T`poFjf7WJrDe2HOBX4J9a@x@%w@|il1uYAxw$3C&nsplMdaqu+T!H4P2W@mAXqMy216@}Lhg+Uv3qiBC{pLjd_~>7>D=SOeCxCGmbLg;VVJ zE8uMr#V&)dEoe?4FVTvU)($AT+K>|oB8&I)2F8#zG>$YjL3b$>yOe=G6b^8p_4cAP zoLTdYVk*I+YAIZqVs_RwZHC_HQ(%@G7>Hwp`~Q6^(>B3+9!lp;r( z*rp;jyNazRk3A+t?{N+SksMSM?_L!=QRPg`;fbQq83<(w<}+Y#E}&+zK)-{9o+uhBMt1R)Aga9{~dlY200zYC4K2`WcKnwQJ1GNIhy zMy0s{-qs1y{vh0~w8Vja=-l0KHcr4E8-vl^1G8rcU1NtavwRV~QwI@fn}#{q1HG>k zo{kyJAASSJ-@1>XDI9nuP`NQC0ps8&iTFE)X{w4UVsvaqu2 zq!Sm5y5aK^9v!zsPLjb~Z6Plfsw5PMyY!mPXM_Eg)Ht^fDM-GKxz0b0IH6 z5i%JaviWmfQ7LIcAac31tGkSZQAWCxl@&orAyg8XwF+K*5{RiJ1Ge&P)RX%98vJOo z*1(~wBd=R~ImTa0FYgUBRhuzr)S{`7gNm z`>!#7^&0lvxQnYl{5KrB`6Z4}=+1un3C@1;00-{e#n8D+*z(r<7(4wc!qfBciN%hN zF2q_|kZ5k9=tU*rYasQx8zQh$WX#b74Dl9dB26T{7}ONR>M#QhY0wZ1!Q>0U=n8W1 zg-}83D+DelDq4{CmyJQFFzQ2bJb;RpxU~PTfS*y$&&y(qZ^PXR4$A;ifWkp^6dURru4?am8Qy$d0Dt7+hWA+6L($pfBxmd~$N(O<2nnoO}< zN8(#UpzF|I!U1eK{w8)^yoLQ&KjZt}M%%(M#K!ls?U5EbTG{Fw zhfqQx$*!!0%0_yxZGgXhQc4>i9oUZO;0huGJJ2}13nR;C__v%TWE{iJW7o0g_=jj7 z*on#Qr_nU9NYU$pxq*Th?nAJ53ug9TM#t1X$@<27rcq_EA&robnq7!Yj+>iMJkhY)^~%jxW!e35{^%bUY^p5d<|`^B}B{ZM4`z3OhU~SG+13D zIkM!8T)en032S(r#I<|+ndf*Nh0uvTl_DdKSd{vORQB4Da-^8JGI3cq{h(FDqS_GcC)#}eqJ>c)@l+f1<+Uti-&O1 z;DDjF8ftwx++HIhQ9@Qg2dh<%fWH=DYLe4N3v4Vw&{K`}goVz;0Iy4qU{HnDb_=@u zoM`Rj?+TQmsZ~XHkcP+Fhufg_TTtwEqu3v$*twzbvjqdtg}ex~xG}RdfOu~dGKyS& zC2cG%tJp+bR$WOy&OfBN2?Z*ZR46A;Yea@phh(`5n=4dEDpw+j(J84^iA~}RVwqBE zsb$qj;bSSJzEqiLmAo%QT6qO8AW5*K0tI|uA>UI#kt{4G(w51kXd(ujLN1oW3u%9N zp-FU_$~@@hIWVirF+1818wF0QsesX1cFNw5lM8955L35|KqPX_cev><}Dn%bssl=_cy%p@DF(VoA2@7Z-0xo|L`MDe({i$ z_W`!N_9lAv9YlP34lNT??7)MFHa8$b zrmB`^$ocvT3YR7jgUQS045Nm(RsImEFNorhUrHWYLGqRbTqt2jCA<0T>+TrZuWjKd;uHH29odDh`NI^;5xASj5a^ha1kcel0(;{qtg#X30(~$=hDmR8 z=$bo(nSGbgIlUjlTTh~)ZJuog4ee8G6L5*5R^cv^VF$F{c7*%3Vr1z!5(8V&GB8Jx zGtdQpU>hrtDsm4k;|?m5mKC` zvXokzw8EUh_vFjesGv}a5~rKevL(?IN7%%5d*)fO_wwog5IbRM#X^-JJyF_ISl23o zhscmD`h-~B7xQ^yBeAfz;@F@#iBa#c!7KK_GB}%QS#{JLmCsDkGeYGykot5`JGIc* zwb0qLFxqtRg~jwm4ZH#NMfTlz+=4(rgILteG2DQzwg$8{xfqyTm>6onV7Cj2m>!9^ z0X^LgjEn@)-Rr>U1cPI<8cod#(qX0$y-pMu709;Np)}HfauQyJM>ID~q>A3LIHtF> zqp>?8t1o1-@)sZvdP&|(b*Yds^#%Q6+# zllESs&~21yD29B#oT5l!+{li;v4R6nPBD~MBb{G~3twO1~3e}o2sH+R$tk+^_u!93e3w<5GOrrAm)-go-7|oruZEg$ZrhD-9 zH`npyckg5Cp)oYiHeu=FUfloNAMp9#{(?8Z{TlCm`)hpi!=G^P`@iDWH~)a^-~Ar9 z|LHII=+A$}weNn1i=REju1i-jxML4GX6Df`Hi6du0VFy)(A3tBP@;vz(E?jbH}q}& zQ1p(&w0r=T!{<@8vLEuLB@|6hK)JLHRZ}yNwf8|5ZiSlGteUrFp*YI;y_O35>vM7N zcu9axC_PpZqL_YgL(QP1%YF_`W)6nxeeU1x39BwIL(?ZS|8BE$vfsavI4% z?7T0Rjzv`}G?L(nxjL2J#3)Z2YqCOXFiSSqY^jIM<%OM;Cl)*EJz-cV>?*5Y61tw5 zo%}yX(LKErbNkLBKC~5c2QQ&_@i@+X^lQ9w^>duR{XKs3Z~s>ky}D4BlnUP6NU>`f zKz+C!(cUSznulNt^}tEn87JAz9eW!)FMf<|r$0dV(rNaYqqMra(7ojZVnaI->{{S^ z#wmEC6uc4C(b}5A1Je7$gWGAX&th=t7#c^mqkVE0Jc%*bNOY~k+n}|F**7Bywhxo^ zT4C_FKNHI45itFLQ&sEQHOkG9STT!Srki2^f(rUT}~E@O0>2j4V5@qBk~=J z(-vY=un;@3SS7?yx+Uoq<`+m=mc>a6adJW=oDiZXHZ@mN5MokNrQ>UM23TJx02hO? zo&DD6wLl-VQuIi99zE1-8jlb?4YX`VmyW?-2b;GBZoirEU`Kn02hnH^A|WjXdcD{- z--D5X7-lCDSl-fsm94E9=(3`t*@*V$8f=+QU}7SI$;lXYtnhp6x1ps)gTcXCJmv@y z%{BD&K0ArRiz1Sn+${!@pBK)JiTPHHPl-u+8%ju2#fmCa>h%hTJ#csQp@xE|9hgS#%3;)>egpcmZ=&kNB~%_gfvO`%U_5&s zrOUgJ-aCrSwgJfcCLr%0N9FJssz-)V*+vqi;At3y^ltuNw4M3@EsBeF%2&^yt^D5k zI3H7aNk)DXU#Fo}U?8xPc5CZlFq;vM*T550K%o;y1eM67;3Zd+tSd=a6?w=lNP$LK z1W%nBVW%0bAs;$PkXl|_vAI;N#EDKVCaFb47pa7vdG-a89<2|>P0-t;_DJkjZSGUj zoZo*Hy=SF4qBYV4d0LvNw=9xZh-4AsG>(>*wS!j3g=(FNl*fTV_aJJq3bAwff^d;s z{p?eAUzCK`08e9=ls5hFrT4LT_!1($vxxQ2V_@q+xZB3y@1Di(S8w3d^{;U0)8Avy zl~3^SFaI0we(`(h?5sW74R@>?_D~xgK{uKQXZamALQml-uZ_UfK1(~hAKTAfm+bDG zu)6an(KL0Gw6_l}lLtvo%rg{pJkm z-uI>KSnjS66CR_g?;;M0%p0v6?PRBp%Jp6Lan2 zNTQgIDv?AlpMoedo`_@x!B7Ld4Sv{sPSgjSF!@REEOBV3*{b1isc3C$5RVzqnyA6VNC2aQehl||u(Z&GgL}HMb1{ytRtlIk!$KM`qSR|e9y@z6BfP@npm^CCIU-ox z(TCxY2Ix)n=c2N`NG#gYNU_t9xKv1|XP1kDI`s1X_6~Lu3(_e>n@NQ0lq%#oJjkqf zBGcwUK5dA;eGr?}7HlZhVpEwAJ~b&%2%d(dr$tJo9w~AIQWa(-D{A@LdZgC4u|aD^ z645=)XhVwHg!~!@(p2m$q=4euT2#}{_Vza8^6A5f`9084Y;{Hp+FRooogC!<b(9Od080#ikKt=e0u>u#x1f9GHxnP6{BY*5WoJ7PG<|)WYObLs46a z8gmV5YcyzPXR%oFA+IV#nX-~LrxaO=5{dBgOEOSWkPNF{hLBCqX2ftuGlEV#WYU3g zk;X^V!B`bO5zU1yeeU_S6tY!;6J;L7;)SrgkD6G-kTR)I>;yH4ChU?Ry%%0sD{Xoe z89&7Ce_>t4O07a;K%%_|+M3l2+B#FcboxR|t7~=A_C}i!inq}EBuD~L+NDO)(j3Wd z2Tr~9F;2aGN2-6(KCuG}`_H3kWD)MxF)SQ@4fBUz$KvVtFn#DX9C+hn{PX_-2DTkQ zplui~il!}?K+pIVs4b*xg99ZxCyI5nf+E9lY)E>4+srFiJoz3Lj$g;{?l%w`+>Q2` zBWN7n2`{P6-!a4c%NSTZipjm_vE|5Xe0~pu?l5g@E0m^`M*(kWo&j zlUIVIf^sP_u07rjgSQpdNIz=49fEr3Dwmi1CkQ?1qJ_ECxs#>npm;TOwWWy ztwt~yM659`k)AZ4#{g{()I&v@Q+X(O6g!o(noY^RtU;~ci29%jW(r_K+=e#V+Kx^S z5(zPfr$juWL~myet*wqCr^5K42@BKpn4hwsx1$Op{YLEE-iX<$5H4R>!NN=%mbdlc z!Ws4t3gDqVgLtgo?0?aX`yHVYlC(S(FRV=H1h-NAg|1k5)G6oiV$t5 z?Naj}mXi>Px7ozo%u-qt(v`on9U8wEQ%4V@ZFxJgZ4{|mD^lz1DDZ}nVs_z0nI0QU zb=Xuwn_EV~t;gA3Qm-z0-yZHD%3GkD+f;v ztKZ;*JNNP0yVtS*)EUg~Si!*bG&)Cz(K67FSWho}?cH#7(;{_`qJCjF8ZW&E_vH^z za^O62_MAlewtXmnZjo0fMXXV)$iY@J78e-A}3OpEKG z6>#(aaH866LlrH8oFXT#m5%i12)HmaGl196@51>rizE+PUb_TWag(LCp!s>(hP*e2@KlbC`ELnyA5WY4#kBf5Ebf+rSrBj6j1Q8 zvU8->SV48d){4aov4|m7ZiUzhkrS29M0?`Nrzm!U@K!goihW*>QXGlYMnd#N>T$6x zrAn=Xg+VfsXot>hgUV1RH7$j&&f$gC<)_#+vNfTjZxo}m+mYxU$Bx72vF+eF>^Sin zu6_C~h87P>MS%94zJ}R7r=;p<;htG(Zf|VYY3zFS11z3;8&~f98n=J_7usP4CVv7k z+CzQ4Pii__kVIrfp{gE*8YhY@yxmu{Cv%NJWByf z5%~x67@Sfvu;%5Bct)IgUYoR{Y&geUgli7APv zdF>dy#1S(#A$&i1{BgXn_C-nH)LJ^MqE*3LE&W-<{y`@|+RMqwMLEgOUTkkWi>p`8@Mj@|*hN7qU=@9J#5J@)So;lzT5wc)%t{cOnO>j7>QBj@_C;!lJlY@Vh3&rY6 zUMM*-%Sw=5T!0Lb`@Xu8Rw01sU>AA z&U-g;{LHJ^y0TNM9@aB9N{ic%M$%rSyAPhuKA1cDVH%l%ZTBJAPP_`;$qTf%uOfBZ zP897u3ONUB`j&kt+)Af}y$?5mya-41E1o?#lb&>?u5Xa$?t}m?9K3)@3e*1AKgK7&`4=o5xrk8P5RCR9 zG-iIsHFmlf6J&H3rD`k6bo|cig0!C$-f*|HL0PQe&L4jl2VTF6?Wf+w{J~4udHO91 z?^TSioIu;;3Wl~HWxt$5ZMX+zCO^{34Je?HR@M5@HMxT{-Yg{=6&s7wN)(WroM;`{ zg7fd*#pKRY@U~8(b!;d5(j0X5800z|((`C(le3VLo`bb7C*kF!RIH=b-IP@*wWron z;9lA&)dv=}N?v?iF*~ln;=BPnmu%R(62y`HUAT66JKns!8z&A-;f?dl zIJ~n5vx5#iR;FZUR#l=d;DE1{irg8cRp>&`P!vPMyx$XrQjrgZJOf>wUM%k(g_%g2 zQBi=bDjCtd2x&z*C=%IF^a@lMsxY-{0jJ)74Z)FaWYAg_a8P>2CZSGr;6=HPf@Z=7 zM!Zde^2)0zdMcz)_|jxrqzloL*Wks98az{7hnL+^lnl+Fl&xZL7U}FLo6P}a)iuD- zK8Di9KIHqu*mi0ku6=S1{adCw?8BOeRdC@+<$;i zKKl}%+P9@?f(*xR~LL-Es%Ou)2#1+|Bc zLQhLvbMh=qr(T8O&{<^Dn&mGag!0s7RFd@acI-yc)GYE_+ffpr;Q4$cIuf{(wt$o; zL{COro6p-!vEm+YBu%vA=!q44`1Vm8JUWZ2Mhi-Odi1vVFx3@?(Z%4SFGGs9niO4u z)bes{&dEZ0PCD&K7HqXD_-rISPc7!A24NEWE=lkCw6(dkw}oXD{JD%&n1z?tzQBR- z^Z#766?^lfGq%5wwl+N4-XPk46+KbzQB=cEN=lM8HD^-rq_poMHI%Uy2D*Xv-eH(* z3|hQzbvM8hN=Qis=C|)5*=@z#(k?VHkO+GlZRwFVB!}Aukr z$rVPq+6;|I7;CUeTRUo8QAB#CFn8p2%pJXo*FXIZ_kQ>*ntCUt*O}-XDm7;4YMm&r zHc6YE#e0evgv+#csI3%F3SVMs4`P#hFw5U_ zw|N}#o*C&ZZ$8J$MiQP#w=Yt0tzEwfn=^7(tt`cF&H7|KPcht>oQ7n6$D$_2DqU0b zD1MJ}9g4*YkyTluJBr;Sx)aw-%-ubjzZ0S-9V`6#-y=0W4f3i=iS$J3Cn=9zIr{_! zuTKY+%l*sB^wAmBqFo2Vz@S^0Q9 zf(aX<6u=1Yw{?3kIuXP0h#y@YW{eEg)3!P>+;70-h>2pSCBT}#W1_2m9Nhvs^=h`LYG>Whvb5EzBj|E~#!kMQ($vnoWtMifW3UiUU!NbXs2_dYjq7lB_|b#5$1H z){hKEbj{=vvi+?jz#x)sAt?K&k;#r;Gd+iMq_yK$kHg*30+GJT;0a=Q>k>lU-4r`^ zvTzKBmKF>yO=Ea*3N6z!h>wk7{*@&>{KG?h`G;@t-rZZc_4%h1wfngH#doy2-{RK8 zhxp`|_i^vRJ$&)yeNrCFq{yX9pVjS<#kO z1e#mnXla9~y#t2+0ciP|%83b7&&)zKvj9CUS_#FxbbbZpJNKht*B%rslJffcq>~qN zA8Ch{R^h0l=+)8UT2M**S;n7p-7ciYT2VPPimn}VIC3az~+y@6>5amY=^$q0Z|LL z+DNh_fhA2!%e02UMl?a-q5zZFe~E_ z>{tq6eAtYs5ep^;Yp^(L!GT>KY?-QNQ%VB3LYsT~Xd5n{8^xy|y@FfstzgSIU*Ds^ cXh#YDAG?u@-rPidr2qf`07*qoM6N<$g2t{?CjbBd diff --git a/docs/source/tutorial_hello_world.rst b/docs/tutorial_hello_world.rst similarity index 100% rename from docs/source/tutorial_hello_world.rst rename to docs/tutorial_hello_world.rst From 88d474323b1c5e9760ce22e504344de4600a42d4 Mon Sep 17 00:00:00 2001 From: Sam Wu Date: Mon, 27 Mar 2023 17:14:36 -0600 Subject: [PATCH 25/31] Separate bibtex requirement from rocm-docs-core (#656) * separate bibtex requirement from rocm-docs-core * point requirements to source rocm-docs-core repo --- docs/.sphinx/requirements.in | 1 + docs/.sphinx/requirements.txt | 18 ++++++++++++++++++ docs/conf.py | 5 +++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/docs/.sphinx/requirements.in b/docs/.sphinx/requirements.in index 2dfc7b076b..36a9a45775 100644 --- a/docs/.sphinx/requirements.in +++ b/docs/.sphinx/requirements.in @@ -1 +1,2 @@ git+https://github.com/RadeonOpenCompute/rocm-docs-core.git +sphinxcontrib-bibtex==2.5.0 diff --git a/docs/.sphinx/requirements.txt b/docs/.sphinx/requirements.txt index e2b7935903..8618920ea6 100644 --- a/docs/.sphinx/requirements.txt +++ b/docs/.sphinx/requirements.txt @@ -46,9 +46,11 @@ docutils==0.16 # via # breathe # myst-parser + # pybtex-docutils # pydata-sphinx-theme # rocm-docs-core # sphinx + # sphinxcontrib-bibtex executing==1.2.0 # via stack-data fastjsonschema==2.16.3 @@ -94,6 +96,8 @@ jupyter-core==5.3.0 # ipykernel # jupyter-client # nbformat +latexcodec==2.0.1 + # via pybtex linkify-it-py==1.0.3 # via myst-parser markdown-it-py==2.2.0 @@ -150,6 +154,12 @@ ptyprocess==0.7.0 # via pexpect pure-eval==0.2.2 # via stack-data +pybtex==0.24.0 + # via + # pybtex-docutils + # sphinxcontrib-bibtex +pybtex-docutils==1.0.2 + # via sphinxcontrib-bibtex pycparser==2.21 # via cffi pydata-sphinx-theme==0.13.1 @@ -175,6 +185,7 @@ pyyaml==6.0 # jupyter-cache # myst-nb # myst-parser + # pybtex # sphinx-external-toc pyzmq==25.0.2 # via @@ -189,6 +200,8 @@ rocm-docs-core @ git+https://github.com/RadeonOpenCompute/rocm-docs-core.git six==1.16.0 # via # asttokens + # latexcodec + # pybtex # python-dateutil smmap==5.0.0 # via gitdb @@ -208,6 +221,7 @@ sphinx==4.3.1 # sphinx-design # sphinx-external-toc # sphinx-notfound-page + # sphinxcontrib-bibtex sphinx-book-theme==1.0.0rc2 # via rocm-docs-core sphinx-copybutton==0.5.1 @@ -220,6 +234,10 @@ sphinx-notfound-page==0.8.3 # via rocm-docs-core sphinxcontrib-applehelp==1.0.4 # via sphinx +sphinxcontrib-bibtex==2.5.0 + # via + # -r requirements.in + # rocm-docs-core sphinxcontrib-devhelp==1.0.2 # via sphinx sphinxcontrib-htmlhelp==2.0.1 diff --git a/docs/conf.py b/docs/conf.py index 9b43b7155f..3ec81ee9df 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,7 +18,8 @@ mathjax3_config = { } } -bibtex_bibfiles = ['refs.bib'] - for sphinx_var in ROCmDocs.SPHINX_VARS: globals()[sphinx_var] = getattr(docs_core, sphinx_var) + +extensions += ['sphinxcontrib.bibtex'] +bibtex_bibfiles = ['refs.bib'] From 4e097ad283d5a2d977501c61d0f8c3081dfa35f6 Mon Sep 17 00:00:00 2001 From: Haocong WANG Date: Thu, 30 Mar 2023 03:07:33 +0800 Subject: [PATCH 26/31] Add CMake Option "USE_OPT_NAVI3X" (#647) * Add CMake Option "USE_OPT_NAVI3X" * remove navi3x opt compile option from cmake script --- CMakeLists.txt | 7 +++++++ script/cmake-ck-dev.sh | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f861e30203..c9fb6b4552 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,6 +22,7 @@ include(TargetFlags) list(APPEND CMAKE_PREFIX_PATH ${CMAKE_INSTALL_PREFIX} ${CMAKE_INSTALL_PREFIX}/llvm ${CMAKE_INSTALL_PREFIX}/hip /opt/rocm /opt/rocm/llvm /opt/rocm/hip) option(USE_BITINT_EXTENSION_INT4, "Whether to enable clang's BitInt extension to provide int4 data type." OFF) +option(USE_OPT_NAVI3X, "Whether to enable LDS cumode and Wavefront32 mode for NAVI3X silicons." OFF) if(USE_BITINT_EXTENSION_INT4) add_compile_definitions(CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4) @@ -29,6 +30,12 @@ if(USE_BITINT_EXTENSION_INT4) message("CK compiled with USE_BITINT_EXTENSION_INT4 set to ${USE_BITINT_EXTENSION_INT4}") endif() +if(USE_OPT_NAVI3X) + add_compile_options(-mcumode) + add_compile_options(-mno-wavefrontsize64) + message("CK compiled with USE_OPT_NAVI3X set to ${USE_OPT_NAVI3X}") +endif() + ## Threads set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) diff --git a/script/cmake-ck-dev.sh b/script/cmake-ck-dev.sh index 375ff49310..8f462237f5 100755 --- a/script/cmake-ck-dev.sh +++ b/script/cmake-ck-dev.sh @@ -8,8 +8,8 @@ MY_PROJECT_SOURCE=$1 cmake \ -D CMAKE_PREFIX_PATH=/opt/rocm \ -D CMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc \ --D CMAKE_CXX_FLAGS="-std=c++17 -O3 -ftemplate-backtrace-limit=0 -fPIE -mcumode \ --mno-wavefrontsize64 -Wno-gnu-line-marker -save-temps=$PWD" \ +-D CMAKE_CXX_FLAGS="-std=c++17 -O3 -ftemplate-backtrace-limit=0 -fPIE -Wno-gnu-line-marker \ +-save-temps=$PWD" \ -D CMAKE_BUILD_TYPE=Release \ -D BUILD_DEV=ON \ -D GPU_TARGETS="gfx908;gfx90a" \ From 389e84a83b1cc3a0ebfe20f7932ab1928f93bad1 Mon Sep 17 00:00:00 2001 From: rocking5566 Date: Thu, 30 Mar 2023 03:50:23 +0800 Subject: [PATCH 27/31] Conv + quantization + tanh (#645) * Rename file. Prepare to support another activation * Add comment for quantization * Extract out_elementop * Add tanh example * Add conv + bias + tanh quantization instance * Add missing parameter * Refine cmake * Add external api and client example * Extract variable in example * Fix the comment --------- Co-authored-by: zjing14 --- client_example/09_quantization/CMakeLists.txt | 6 + ...2d_fwd_bias_relu_perlayer_quantization.cpp | 99 +++++---- ..._fwd_bias_tanh_perchannel_quantization.cpp | 209 ++++++++++++++++++ ...2d_fwd_bias_tanh_perlayer_quantization.cpp | 201 +++++++++++++++++ .../conv2d_fwd_perlayer_quantization.cpp | 99 +++++---- .../40_conv2d_fwd_quantization/CMakeLists.txt | 5 + ...bias_relu_perchannel_quantization_int8.cpp | 8 +- ...l_bias_relu_perlayer_quantization_int8.cpp | 9 +- ...bias_tanh_perchannel_quantization_int8.cpp | 87 ++++++++ ...l_bias_tanh_perlayer_quantization_int8.cpp | 85 +++++++ ...2d_fwd_dl_perchannel_quantization_int8.cpp | 6 +- ...nv2d_fwd_dl_perlayer_quantization_int8.cpp | 7 +- ...bias_relu_perchannel_quantization_int8.cpp | 8 +- ...l_bias_relu_perlayer_quantization_int8.cpp | 9 +- ...d_fwd_xdl_perchannel_quantization_int8.cpp | 6 +- ...v2d_fwd_xdl_perlayer_quantization_int8.cpp | 7 +- ..._bias_perchannel_quantization_example.inc} | 3 +- ...wd_bias_perlayer_quantization_example.inc} | 3 +- ...2d_fwd_perchannel_quantization_example.inc | 3 +- ...nv2d_fwd_perlayer_quantization_example.inc | 3 +- .../gpu/element/quantization_operation.hpp | 131 ++++++++++- .../element/unary_element_wise_operation.hpp | 13 ++ include/ck/utility/math_v2.hpp | 18 ++ .../device_operation_instance_factory.hpp | 9 + ...n_bias_forward_perchannel_quantization.hpp | 94 ++++++++ ...ion_bias_forward_perlayer_quantization.hpp | 92 ++++++++ .../conv2d_fwd/conv2d_quantization_common.hpp | 9 + ..._perchannel_quantization_int8_instance.cpp | 36 +++ ...as_perlayer_quantization_int8_instance.cpp | 37 ++++ ..._perchannel_quantization_int8_instance.cpp | 35 +++ ...as_perlayer_quantization_int8_instance.cpp | 37 ++++ 31 files changed, 1252 insertions(+), 122 deletions(-) create mode 100644 client_example/09_quantization/conv2d_fwd_bias_tanh_perchannel_quantization.cpp create mode 100644 client_example/09_quantization/conv2d_fwd_bias_tanh_perlayer_quantization.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_tanh_perchannel_quantization_int8.cpp create mode 100644 example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_tanh_perlayer_quantization_int8.cpp rename example/40_conv2d_fwd_quantization/{run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc => run_conv2d_fwd_bias_perchannel_quantization_example.inc} (98%) rename example/40_conv2d_fwd_quantization/{run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc => run_conv2d_fwd_bias_perlayer_quantization_example.inc} (98%) diff --git a/client_example/09_quantization/CMakeLists.txt b/client_example/09_quantization/CMakeLists.txt index a4dd80cd3f..2b7d6fc806 100644 --- a/client_example/09_quantization/CMakeLists.txt +++ b/client_example/09_quantization/CMakeLists.txt @@ -1,6 +1,12 @@ +add_executable(client_conv2d_fwd_bias_tanh_perchannel_quantization conv2d_fwd_bias_tanh_perchannel_quantization.cpp) +target_link_libraries(client_conv2d_fwd_bias_tanh_perchannel_quantization PRIVATE composable_kernel::device_operations) + add_executable(client_conv2d_fwd_bias_relu_perchannel_quantization conv2d_fwd_bias_relu_perchannel_quantization.cpp) target_link_libraries(client_conv2d_fwd_bias_relu_perchannel_quantization PRIVATE composable_kernel::device_operations) +add_executable(client_conv2d_fwd_bias_tanh_perlayer_quantization conv2d_fwd_bias_tanh_perlayer_quantization.cpp) +target_link_libraries(client_conv2d_fwd_bias_tanh_perlayer_quantization PRIVATE composable_kernel::device_operations) + add_executable(client_conv2d_fwd_bias_relu_perlayer_quantization conv2d_fwd_bias_relu_perlayer_quantization.cpp) target_link_libraries(client_conv2d_fwd_bias_relu_perlayer_quantization PRIVATE composable_kernel::device_operations) diff --git a/client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp b/client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp index 7cbbd28322..b8e6a493ef 100644 --- a/client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp +++ b/client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp @@ -26,15 +26,16 @@ using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clam static constexpr ck::index_t NumDimSpatial = 2; static constexpr ck::index_t G = 1; -static constexpr ck::index_t N = 4; // batch size -static constexpr ck::index_t K = 64; // output channel -static constexpr ck::index_t C = 192; // input channel -static constexpr ck::index_t Y = 3; // filter H -static constexpr ck::index_t X = 3; // filter W -static constexpr ck::index_t Hi = 71; // input H -static constexpr ck::index_t Wi = 71; // input W -static constexpr ck::index_t Ho = 36; // output H -static constexpr ck::index_t Wo = 36; // output W +static constexpr ck::index_t N = 4; // batch size +static constexpr ck::index_t K = 64; // output channel +static constexpr ck::index_t C = 192; // input channel +static constexpr ck::index_t Y = 3; // filter H +static constexpr ck::index_t X = 3; // filter W +static constexpr ck::index_t Hi = 71; // input H +static constexpr ck::index_t Wi = 71; // input W +static constexpr ck::index_t Ho = 36; // output H +static constexpr ck::index_t Wo = 36; // output W +static constexpr float requant_scale = 0.5f; // requantize qAcc to qz struct SimpleDeviceMem { @@ -102,26 +103,27 @@ int main(int argc, char* argv[]) for(int i = 0; i < op_ptrs.size(); ++i) { - auto& op_ptr = op_ptrs[i]; - auto argument_ptr = op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), - wei.GetDeviceBuffer(), - {bias.GetDeviceBuffer()}, - out.GetDeviceBuffer(), - in_lengths, - in_strides, - weight_lengths, - weight_strides, - {bias_lengths}, - {bias_strides}, - out_lengths, - out_strides, - conv_strides, - conv_dilations, - in_left_pad, - in_right_pad, - PassThrough{}, - PassThrough{}, - OutElementOp{0.5f, ActivationOp{}}); + auto& op_ptr = op_ptrs[i]; + auto argument_ptr = + op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), + wei.GetDeviceBuffer(), + {bias.GetDeviceBuffer()}, + out.GetDeviceBuffer(), + in_lengths, + in_strides, + weight_lengths, + weight_strides, + {bias_lengths}, + {bias_strides}, + out_lengths, + out_strides, + conv_strides, + conv_dilations, + in_left_pad, + in_right_pad, + PassThrough{}, + PassThrough{}, + OutElementOp{requant_scale, ActivationOp{}}); auto invoker_ptr = op_ptr->MakeInvokerPointer(); std::string op_name = op_ptr->GetTypeString(); @@ -165,25 +167,26 @@ int main(int argc, char* argv[]) auto& op_ptr = op_ptrs[best_op_id]; std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() << std::endl; - auto argument_ptr = op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), - wei.GetDeviceBuffer(), - {bias.GetDeviceBuffer()}, - out.GetDeviceBuffer(), - in_lengths, - in_strides, - weight_lengths, - weight_strides, - {bias_lengths}, - {bias_strides}, - out_lengths, - out_strides, - conv_strides, - conv_dilations, - in_left_pad, - in_right_pad, - PassThrough{}, - PassThrough{}, - OutElementOp{0.5f, ActivationOp{}}); + auto argument_ptr = + op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), + wei.GetDeviceBuffer(), + {bias.GetDeviceBuffer()}, + out.GetDeviceBuffer(), + in_lengths, + in_strides, + weight_lengths, + weight_strides, + {bias_lengths}, + {bias_strides}, + out_lengths, + out_strides, + conv_strides, + conv_dilations, + in_left_pad, + in_right_pad, + PassThrough{}, + PassThrough{}, + OutElementOp{requant_scale, ActivationOp{}}); auto invoker_ptr = op_ptr->MakeInvokerPointer(); diff --git a/client_example/09_quantization/conv2d_fwd_bias_tanh_perchannel_quantization.cpp b/client_example/09_quantization/conv2d_fwd_bias_tanh_perchannel_quantization.cpp new file mode 100644 index 0000000000..7a216f027f --- /dev/null +++ b/client_example/09_quantization/conv2d_fwd_bias_tanh_perchannel_quantization.cpp @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include + +#include "ck/ck.hpp" +#include "ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_conv_fwd.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using BiasDataType = int32_t; +using RequantScaleDataType = float; +using OutDataType = int8_t; + +using InLayout = ck::tensor_layout::convolution::GNHWC; +using WeiLayout = ck::tensor_layout::convolution::GKYXC; +using BiasLayout = ck::tensor_layout::convolution::G_K; +using RequantScaleLayout = ck::tensor_layout::convolution::G_K; +using OutLayout = ck::tensor_layout::convolution::GNHWK; +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::TanH; +using OutElementOp = + ck::tensor_operation::element_wise::Add_Mul2_Activation_Mul_Clamp; + +static constexpr ck::index_t NumDimSpatial = 2; +static constexpr ck::index_t G = 1; +static constexpr ck::index_t N = 4; // batch size +static constexpr ck::index_t K = 64; // output channel +static constexpr ck::index_t C = 192; // input channel +static constexpr ck::index_t Y = 3; // filter H +static constexpr ck::index_t X = 3; // filter W +static constexpr ck::index_t Hi = 71; // input H +static constexpr ck::index_t Wi = 71; // input W +static constexpr ck::index_t Ho = 36; // output H +static constexpr ck::index_t Wo = 36; // output W +static constexpr float sz_inv = 0.5f; // inverse of scale_z + +struct SimpleDeviceMem +{ + SimpleDeviceMem() = delete; + + SimpleDeviceMem(std::size_t mem_size) : p_mem_{} + { + (void)hipMalloc(static_cast(&p_mem_), mem_size); + } + + void* GetDeviceBuffer() { return p_mem_; } + + ~SimpleDeviceMem() { (void)hipFree(p_mem_); } + + void* p_mem_; +}; + +int main(int argc, char* argv[]) +{ + std::array in_lengths{G, N, C, Hi, Wi}; + std::array in_strides{N * Hi * Wi * C, Hi * Wi * C, 1, Wi * C, C}; + std::array weight_lengths{G, K, C, Y, X}; + std::array weight_strides{K * Y * X * C, Y * X * C, 1, X * C, C}; + std::array bias_lengths{G, N, K, Ho, Wo}; + std::array bias_strides{K, 0, 1, 0, 0}; + std::array requant_scale_lengths{G, N, K, Ho, Wo}; + std::array requant_scale_strides{K, 0, 1, 0, 0}; + std::array out_lengths{G, N, K, Ho, Wo}; + std::array out_strides{N * Ho * Wo * K, Ho * Wo * K, 1, Wo * K, K}; + std::array in_left_pad{1, 1}; + std::array in_right_pad{1, 1}; + std::array conv_strides{2, 2}; + std::array conv_dilations{1, 1}; + + SimpleDeviceMem in(sizeof(InDataType) * N * Hi * Wi * C); + SimpleDeviceMem wei(sizeof(WeiDataType) * K * Y * X * C); + SimpleDeviceMem bias(sizeof(BiasDataType) * K * Y * X * C); + SimpleDeviceMem requant_scale(sizeof(RequantScaleDataType) * K * Y * X * C); + SimpleDeviceMem out(sizeof(OutDataType) * N * Ho * Wo * K); + + using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD< + NumDimSpatial, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InDataType, + WeiDataType, + ck::Tuple, + OutDataType, + PassThrough, + PassThrough, + OutElementOp>; + // get device op instances + const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory< + DeviceOp>::GetInstances(); + + std::cout << "found " << op_ptrs.size() << " instances" << std::endl; + + std::string best_op_name; + int best_op_id = -1; + float best_avg_time = std::numeric_limits::max(); + float best_gb_per_sec = 0; + float best_tflops = 0; + + // profile device operation instances + std::cout << "Run all instances and do timing" << std::endl; + + for(int i = 0; i < op_ptrs.size(); ++i) + { + auto& op_ptr = op_ptrs[i]; + auto argument_ptr = + op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), + wei.GetDeviceBuffer(), + {bias.GetDeviceBuffer(), requant_scale.GetDeviceBuffer()}, + out.GetDeviceBuffer(), + in_lengths, + in_strides, + weight_lengths, + weight_strides, + {bias_lengths, requant_scale_lengths}, + {bias_strides, requant_scale_strides}, + out_lengths, + out_strides, + conv_strides, + conv_dilations, + in_left_pad, + in_right_pad, + PassThrough{}, + PassThrough{}, + OutElementOp{sz_inv, ActivationOp{}}); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + std::string op_name = op_ptr->GetTypeString(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); + + std::size_t flop = G * 2 * N * K * C * Ho * Wo * Y * X; + std::size_t num_bytes = + G * sizeof(InDataType) * N * Hi * Wi * C + G * sizeof(WeiDataType) * K * Y * X * C + + G * sizeof(BiasDataType) * K + G * sizeof(RequantScaleDataType) * K + + G * sizeof(OutDataType) * N * Ho * Wo * K; + + float tflops = static_cast(flop) / 1.E9 / avg_time; + float gb_per_sec = num_bytes / 1.E6 / avg_time; + + std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, " + << gb_per_sec << " GB/s, " << op_name << std::endl; + + if(tflops > best_tflops) + { + best_op_id = i; + best_op_name = op_name; + best_avg_time = avg_time; + best_gb_per_sec = gb_per_sec; + best_tflops = tflops; + } + } + else + { + std::cout << op_name << " does not support this problem" << std::endl; + } + } + + // run the best intance + if(best_op_id != -1) + { + std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops + << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + + auto& op_ptr = op_ptrs[best_op_id]; + std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() + << std::endl; + auto argument_ptr = + op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), + wei.GetDeviceBuffer(), + {bias.GetDeviceBuffer(), requant_scale.GetDeviceBuffer()}, + out.GetDeviceBuffer(), + in_lengths, + in_strides, + weight_lengths, + weight_strides, + {bias_lengths, requant_scale_lengths}, + {bias_strides, requant_scale_strides}, + out_lengths, + out_strides, + conv_strides, + conv_dilations, + in_left_pad, + in_right_pad, + PassThrough{}, + PassThrough{}, + OutElementOp{sz_inv, ActivationOp{}}); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false}); + } + + std::cout << "Done" << std::endl; + } + + return 0; +} \ No newline at end of file diff --git a/client_example/09_quantization/conv2d_fwd_bias_tanh_perlayer_quantization.cpp b/client_example/09_quantization/conv2d_fwd_bias_tanh_perlayer_quantization.cpp new file mode 100644 index 0000000000..7637f5c785 --- /dev/null +++ b/client_example/09_quantization/conv2d_fwd_bias_tanh_perlayer_quantization.cpp @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include + +#include "ck/ck.hpp" +#include "ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_conv_fwd.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using BiasDataType = int32_t; +using OutDataType = int8_t; + +using InLayout = ck::tensor_layout::convolution::GNHWC; +using WeiLayout = ck::tensor_layout::convolution::GKYXC; +using BiasLayout = ck::tensor_layout::convolution::G_K; +using OutLayout = ck::tensor_layout::convolution::GNHWK; +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::TanH; +using OutElementOp = ck::tensor_operation::element_wise::Add_Mul_Activation_Mul_Clamp; + +static constexpr ck::index_t NumDimSpatial = 2; +static constexpr ck::index_t G = 1; +static constexpr ck::index_t N = 4; // batch size +static constexpr ck::index_t K = 64; // output channel +static constexpr ck::index_t C = 192; // input channel +static constexpr ck::index_t Y = 3; // filter H +static constexpr ck::index_t X = 3; // filter W +static constexpr ck::index_t Hi = 71; // input H +static constexpr ck::index_t Wi = 71; // input W +static constexpr ck::index_t Ho = 36; // output H +static constexpr ck::index_t Wo = 36; // output W +static constexpr float sacc = 0.5f; // scale of acc +static constexpr float sz_inv = 0.5f; // inverse of scale_z + +struct SimpleDeviceMem +{ + SimpleDeviceMem() = delete; + + SimpleDeviceMem(std::size_t mem_size) : p_mem_{} + { + (void)hipMalloc(static_cast(&p_mem_), mem_size); + } + + void* GetDeviceBuffer() { return p_mem_; } + + ~SimpleDeviceMem() { (void)hipFree(p_mem_); } + + void* p_mem_; +}; + +int main(int argc, char* argv[]) +{ + std::array in_lengths{G, N, C, Hi, Wi}; + std::array in_strides{N * Hi * Wi * C, Hi * Wi * C, 1, Wi * C, C}; + std::array weight_lengths{G, K, C, Y, X}; + std::array weight_strides{K * Y * X * C, Y * X * C, 1, X * C, C}; + std::array bias_lengths{G, N, K, Ho, Wo}; + std::array bias_strides{K, 0, 1, 0, 0}; + std::array out_lengths{G, N, K, Ho, Wo}; + std::array out_strides{N * Ho * Wo * K, Ho * Wo * K, 1, Wo * K, K}; + std::array in_left_pad{1, 1}; + std::array in_right_pad{1, 1}; + std::array conv_strides{2, 2}; + std::array conv_dilations{1, 1}; + + SimpleDeviceMem in(sizeof(InDataType) * N * Hi * Wi * C); + SimpleDeviceMem wei(sizeof(WeiDataType) * K * Y * X * C); + SimpleDeviceMem bias(sizeof(BiasDataType) * K * Y * X * C); + SimpleDeviceMem out(sizeof(OutDataType) * N * Ho * Wo * K); + + using DeviceOp = + ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD, + OutLayout, + InDataType, + WeiDataType, + ck::Tuple, + OutDataType, + PassThrough, + PassThrough, + OutElementOp>; + // get device op instances + const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory< + DeviceOp>::GetInstances(); + + std::cout << "found " << op_ptrs.size() << " instances" << std::endl; + + std::string best_op_name; + int best_op_id = -1; + float best_avg_time = std::numeric_limits::max(); + float best_gb_per_sec = 0; + float best_tflops = 0; + + // profile device operation instances + std::cout << "Run all instances and do timing" << std::endl; + + for(int i = 0; i < op_ptrs.size(); ++i) + { + auto& op_ptr = op_ptrs[i]; + auto argument_ptr = op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), + wei.GetDeviceBuffer(), + {bias.GetDeviceBuffer()}, + out.GetDeviceBuffer(), + in_lengths, + in_strides, + weight_lengths, + weight_strides, + {bias_lengths}, + {bias_strides}, + out_lengths, + out_strides, + conv_strides, + conv_dilations, + in_left_pad, + in_right_pad, + PassThrough{}, + PassThrough{}, + OutElementOp{sacc, sz_inv, ActivationOp{}}); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + std::string op_name = op_ptr->GetTypeString(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); + + std::size_t flop = G * 2 * N * K * C * Ho * Wo * Y * X; + std::size_t num_bytes = + G * sizeof(InDataType) * N * Hi * Wi * C + G * sizeof(WeiDataType) * K * Y * X * C + + G * sizeof(BiasDataType) * K + G * sizeof(OutDataType) * N * Ho * Wo * K; + + float tflops = static_cast(flop) / 1.E9 / avg_time; + float gb_per_sec = num_bytes / 1.E6 / avg_time; + + std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, " + << gb_per_sec << " GB/s, " << op_name << std::endl; + + if(tflops > best_tflops) + { + best_op_id = i; + best_op_name = op_name; + best_avg_time = avg_time; + best_gb_per_sec = gb_per_sec; + best_tflops = tflops; + } + } + else + { + std::cout << op_name << " does not support this problem" << std::endl; + } + } + + // run the best intance + if(best_op_id != -1) + { + std::cout << "Best Perf: " << std::setw(10) << best_avg_time << " ms, " << best_tflops + << " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + + auto& op_ptr = op_ptrs[best_op_id]; + std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() + << std::endl; + auto argument_ptr = op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), + wei.GetDeviceBuffer(), + {bias.GetDeviceBuffer()}, + out.GetDeviceBuffer(), + in_lengths, + in_strides, + weight_lengths, + weight_strides, + {bias_lengths}, + {bias_strides}, + out_lengths, + out_strides, + conv_strides, + conv_dilations, + in_left_pad, + in_right_pad, + PassThrough{}, + PassThrough{}, + OutElementOp{sacc, sz_inv, ActivationOp{}}); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false}); + } + + std::cout << "Done" << std::endl; + } + + return 0; +} \ No newline at end of file diff --git a/client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp b/client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp index daeff4ff4f..f7c46a95fe 100644 --- a/client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp +++ b/client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp @@ -24,15 +24,16 @@ using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul_ClampMakeArgumentPointer(in.GetDeviceBuffer(), - wei.GetDeviceBuffer(), - {}, - out.GetDeviceBuffer(), - in_lengths, - in_strides, - weight_lengths, - weight_strides, - {}, - {}, - out_lengths, - out_strides, - conv_strides, - conv_dilations, - in_left_pad, - in_right_pad, - PassThrough{}, - PassThrough{}, - OutElementOp{0.5f, ActivationOp{}}); + auto& op_ptr = op_ptrs[i]; + auto argument_ptr = + op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), + wei.GetDeviceBuffer(), + {}, + out.GetDeviceBuffer(), + in_lengths, + in_strides, + weight_lengths, + weight_strides, + {}, + {}, + out_lengths, + out_strides, + conv_strides, + conv_dilations, + in_left_pad, + in_right_pad, + PassThrough{}, + PassThrough{}, + OutElementOp{requant_scale, ActivationOp{}}); auto invoker_ptr = op_ptr->MakeInvokerPointer(); std::string op_name = op_ptr->GetTypeString(); @@ -158,25 +160,26 @@ int main(int argc, char* argv[]) auto& op_ptr = op_ptrs[best_op_id]; std::cout << "Run the best instance without timing: " << op_ptr->GetTypeString() << std::endl; - auto argument_ptr = op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), - wei.GetDeviceBuffer(), - {}, - out.GetDeviceBuffer(), - in_lengths, - in_strides, - weight_lengths, - weight_strides, - {}, - {}, - out_lengths, - out_strides, - conv_strides, - conv_dilations, - in_left_pad, - in_right_pad, - PassThrough{}, - PassThrough{}, - OutElementOp{0.5f, ActivationOp{}}); + auto argument_ptr = + op_ptr->MakeArgumentPointer(in.GetDeviceBuffer(), + wei.GetDeviceBuffer(), + {}, + out.GetDeviceBuffer(), + in_lengths, + in_strides, + weight_lengths, + weight_strides, + {}, + {}, + out_lengths, + out_strides, + conv_strides, + conv_dilations, + in_left_pad, + in_right_pad, + PassThrough{}, + PassThrough{}, + OutElementOp{requant_scale, ActivationOp{}}); auto invoker_ptr = op_ptr->MakeInvokerPointer(); diff --git a/example/40_conv2d_fwd_quantization/CMakeLists.txt b/example/40_conv2d_fwd_quantization/CMakeLists.txt index c3540d6ee6..0a314cd74c 100644 --- a/example/40_conv2d_fwd_quantization/CMakeLists.txt +++ b/example/40_conv2d_fwd_quantization/CMakeLists.txt @@ -14,3 +14,8 @@ add_example_executable(example_conv2d_fwd_xdl_bias_relu_perlayer_quantization_in add_example_executable(example_conv2d_fwd_dl_bias_relu_perchannel_quantization_int8 conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp) add_example_executable(example_conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8 conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp) +# Conv + bias + tanh perlayer quantization +add_example_executable(example_conv2d_fwd_dl_bias_tanh_perlayer_quantization_int8 conv2d_fwd_dl_bias_tanh_perlayer_quantization_int8.cpp) + +# Conv + bias + tanh perchannel quantization +add_example_executable(example_conv2d_fwd_dl_bias_tanh_perchannel_quantization_int8 conv2d_fwd_dl_bias_tanh_perchannel_quantization_int8.cpp) diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp index df10e80396..5c445d9c50 100644 --- a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perchannel_quantization_int8.cpp @@ -76,6 +76,10 @@ using DeviceGroupedConvNDFwdInstance = 5, // CThreadTransferSrcDstVectorDim 4>; // CThreadTransferDstScalarPerVector -#include "run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc" +#include "run_conv2d_fwd_bias_perchannel_quantization_example.inc" -int main() { run_conv2d_fwd_bias_relu_perchannel_quantization_example(); }; +int main() +{ + const auto out_element_op = OutElementOp{ActivationOp{}}; + run_conv2d_fwd_bias_perchannel_quantization_example(out_element_op); +}; diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp index 18f9197b9c..0ff85f008f 100644 --- a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_relu_perlayer_quantization_int8.cpp @@ -74,6 +74,11 @@ using DeviceGroupedConvNDFwdInstance = 5, // CThreadTransferSrcDstVectorDim 4>; // CThreadTransferDstScalarPerVector -#include "run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc" +#include "run_conv2d_fwd_bias_perlayer_quantization_example.inc" -int main() { run_conv2d_fwd_bias_relu_perlayer_quantization_example(); } +int main() +{ + float requant_scale = 0.5f; + const auto out_element_op = OutElementOp{requant_scale, ActivationOp{}}; + run_conv2d_fwd_bias_perlayer_quantization_example(out_element_op); +} diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_tanh_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_tanh_perchannel_quantization_int8.cpp new file mode 100644 index 0000000000..f8f996d17e --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_tanh_perchannel_quantization_int8.cpp @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using BiasDataType = int32_t; +using RequantScaleDataType = float; +using AccDataType = int32_t; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::TanH; +using OutElementOp = + ck::tensor_operation::element_wise::Add_Mul2_Activation_Mul_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK< + NDimSpatial, + InDataType, + WeiDataType, + ck::Tuple, + OutDataType, + AccDataType, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 256, // BlockSize + 128, // MPerBlock + 128, // NPerBlock + 16, // K0PerBlock + 4, // K1 + 4, // M1PerThread + 4, // N1PerThread + 1, // KPerThread + S<8, 2>, // M1N1ThreadClusterM1Xs + S<8, 2>, // M1N1ThreadClusterN1Xs + S<8, 1, 1, 4>, // ABlockTransferThreadSliceLengths_K0_M0_M1_K1 + S<2, 1, 128, 1>, // ABlockTransferThreadClusterLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // ABlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // ABlockTransferSrcVectorTensorLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // ABlockTransferDstVectorTensorLengths_K0_M0_M1_K1 + S<8, 1, 1, 4>, // BBlockTransferThreadSliceLengths_K0_N0_N1_K1 + S<2, 1, 128, 1>, // BBlockTransferThreadClusterLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // BBlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // BBlockTransferSrcVectorTensorLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // BBlockTransferDstVectorTensorLengths_K0_N0_N1_K1 + S<0, 1, 2, 3, 4, 5>, // CThreadTransferSrcDstAccessOrder + 5, // CThreadTransferSrcDstVectorDim + 4>; // CThreadTransferDstScalarPerVector + +#include "run_conv2d_fwd_bias_perchannel_quantization_example.inc" + +int main() +{ + float scale_z_inv = 0.5f; + const auto out_element_op = OutElementOp{scale_z_inv, ActivationOp{}}; + run_conv2d_fwd_bias_perchannel_quantization_example(out_element_op); +}; diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_tanh_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_tanh_perlayer_quantization_int8.cpp new file mode 100644 index 0000000000..3b25fec0c4 --- /dev/null +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_bias_tanh_perlayer_quantization_int8.cpp @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include "common.hpp" +#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp" + +using InDataType = int8_t; +using WeiDataType = int8_t; +using BiasDataType = int32_t; +using AccDataType = int32_t; +using OutDataType = int8_t; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using InElementOp = PassThrough; +using WeiElementOp = PassThrough; +using ActivationOp = ck::tensor_operation::element_wise::TanH; +using OutElementOp = ck::tensor_operation::element_wise::Add_Mul_Activation_Mul_Clamp; + +static constexpr auto ConvSpec = + ck::tensor_operation::device::ConvolutionForwardSpecialization::Default; + +static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +template +using DeviceGroupedConvNDFwdInstance = + ck::tensor_operation::device::DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK< + NDimSpatial, + InDataType, + WeiDataType, + ck::Tuple, + OutDataType, + AccDataType, + InLayout, + WeiLayout, + ck::Tuple, + OutLayout, + InElementOp, + WeiElementOp, + OutElementOp, + ConvSpec, // ConvForwardSpecialization + GemmSpec, // GemmSpecialization + 256, // BlockSize + 128, // MPerBlock + 128, // NPerBlock + 16, // K0PerBlock + 4, // K1 + 4, // M1PerThread + 4, // N1PerThread + 1, // KPerThread + S<8, 2>, // M1N1ThreadClusterM1Xs + S<8, 2>, // M1N1ThreadClusterN1Xs + S<8, 1, 1, 4>, // ABlockTransferThreadSliceLengths_K0_M0_M1_K1 + S<2, 1, 128, 1>, // ABlockTransferThreadClusterLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // ABlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // ABlockTransferSrcVectorTensorLengths_K0_M0_M1_K1 + S<1, 2, 0, 3>, // ABlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // ABlockTransferDstVectorTensorLengths_K0_M0_M1_K1 + S<8, 1, 1, 4>, // BBlockTransferThreadSliceLengths_K0_N0_N1_K1 + S<2, 1, 128, 1>, // BBlockTransferThreadClusterLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferThreadClusterArrangeOrder + S<1, 2, 0, 3>, // BBlockTransferSrcAccessOrder + S<4, 1, 1, 4>, // BBlockTransferSrcVectorTensorLengths_K0_N0_N1_K1 + S<1, 2, 0, 3>, // BBlockTransferSrcVectorTensorContiguousDimOrder + S<1, 1, 1, 4>, // BBlockTransferDstVectorTensorLengths_K0_N0_N1_K1 + S<0, 1, 2, 3, 4, 5>, // CThreadTransferSrcDstAccessOrder + 5, // CThreadTransferSrcDstVectorDim + 4>; // CThreadTransferDstScalarPerVector + +#include "run_conv2d_fwd_bias_perlayer_quantization_example.inc" + +int main() +{ + float scale_acc = 0.5f; + float scale_z_inv = 0.5f; + const auto out_element_op = OutElementOp{scale_z_inv, scale_acc, ActivationOp{}}; + run_conv2d_fwd_bias_perlayer_quantization_example(out_element_op); +} diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perchannel_quantization_int8.cpp index afff7f8b69..a98a1e240b 100644 --- a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perchannel_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perchannel_quantization_int8.cpp @@ -76,4 +76,8 @@ using DeviceGroupedConvNDFwdInstance = #include "run_conv2d_fwd_perchannel_quantization_example.inc" -int main() { run_conv2d_fwd_perchannel_quantization_example(); } +int main() +{ + const auto out_element_op = OutElementOp{ActivationOp{}}; + run_conv2d_fwd_perchannel_quantization_example(out_element_op); +} diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perlayer_quantization_int8.cpp index a38fe2a6c3..262594d58b 100644 --- a/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perlayer_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_dl_perlayer_quantization_int8.cpp @@ -71,4 +71,9 @@ using DeviceGroupedConvNDFwdInstance = #include "run_conv2d_fwd_perlayer_quantization_example.inc" -int main() { run_conv2d_fwd_perlayer_quantization_example(); } +int main() +{ + float requant_scale = 0.5f; + const auto out_element_op = OutElementOp{requant_scale, ActivationOp{}}; + run_conv2d_fwd_perlayer_quantization_example(out_element_op); +} diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp index ba6990d938..6b22055053 100644 --- a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perchannel_quantization_int8.cpp @@ -80,6 +80,10 @@ using DeviceGroupedConvNDFwdInstance = S<1, 64, 1, 4>, 8>; -#include "run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc" +#include "run_conv2d_fwd_bias_perchannel_quantization_example.inc" -int main() { run_conv2d_fwd_bias_relu_perchannel_quantization_example(); }; +int main() +{ + const auto out_element_op = OutElementOp{ActivationOp{}}; + run_conv2d_fwd_bias_perchannel_quantization_example(out_element_op); +}; diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp index 690d70e112..1ac8679743 100644 --- a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp @@ -78,6 +78,11 @@ using DeviceGroupedConvNDFwdInstance = S<1, 64, 1, 4>, 8>; -#include "run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc" +#include "run_conv2d_fwd_bias_perlayer_quantization_example.inc" -int main() { run_conv2d_fwd_bias_relu_perlayer_quantization_example(); } +int main() +{ + float requant_scale = 0.5f; + const auto out_element_op = OutElementOp{requant_scale, ActivationOp{}}; + run_conv2d_fwd_bias_perlayer_quantization_example(out_element_op); +} diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perchannel_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perchannel_quantization_int8.cpp index dd755ff065..f28abe5ebc 100644 --- a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perchannel_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perchannel_quantization_int8.cpp @@ -80,4 +80,8 @@ using DeviceGroupedConvNDFwdInstance = #include "run_conv2d_fwd_perchannel_quantization_example.inc" -int main() { run_conv2d_fwd_perchannel_quantization_example(); } +int main() +{ + const auto out_element_op = OutElementOp{ActivationOp{}}; + run_conv2d_fwd_perchannel_quantization_example(out_element_op); +} diff --git a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp index 48617e4775..f468e8adcd 100644 --- a/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp +++ b/example/40_conv2d_fwd_quantization/conv2d_fwd_xdl_perlayer_quantization_int8.cpp @@ -75,4 +75,9 @@ using DeviceGroupedConvNDFwdInstance = #include "run_conv2d_fwd_perlayer_quantization_example.inc" -int main() { run_conv2d_fwd_perlayer_quantization_example(); } +int main() +{ + float requant_scale = 0.5f; + const auto out_element_op = OutElementOp{requant_scale, ActivationOp{}}; + run_conv2d_fwd_perlayer_quantization_example(out_element_op); +} diff --git a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_perchannel_quantization_example.inc similarity index 98% rename from example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc rename to example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_perchannel_quantization_example.inc index 822a1ed8b5..1587c614da 100644 --- a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perchannel_quantization_example.inc +++ b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_perchannel_quantization_example.inc @@ -167,7 +167,7 @@ bool run_grouped_conv_fwd(bool do_verification, return (pass ? 0 : 1); } -int run_conv2d_fwd_bias_relu_perchannel_quantization_example() +int run_conv2d_fwd_bias_perchannel_quantization_example(const OutElementOp& out_element_op) { bool do_verification = true; bool time_kernel = true; @@ -189,7 +189,6 @@ int run_conv2d_fwd_bias_relu_perchannel_quantization_example() const auto in_element_op = InElementOp{}; const auto wei_element_op = WeiElementOp{}; - const auto out_element_op = OutElementOp{ActivationOp{}}; using InLayout = ck::tensor_layout::convolution::GNHWC; using WeiLayout = ck::tensor_layout::convolution::GKYXC; diff --git a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_perlayer_quantization_example.inc similarity index 98% rename from example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc rename to example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_perlayer_quantization_example.inc index 00cbaa09ee..455e0804d4 100644 --- a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_relu_perlayer_quantization_example.inc +++ b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_bias_perlayer_quantization_example.inc @@ -155,7 +155,7 @@ bool run_grouped_conv_fwd(bool do_verification, return (pass ? 0 : 1); } -int run_conv2d_fwd_bias_relu_perlayer_quantization_example() +int run_conv2d_fwd_bias_perlayer_quantization_example(const OutElementOp& out_element_op) { bool do_verification = true; bool time_kernel = true; @@ -177,7 +177,6 @@ int run_conv2d_fwd_bias_relu_perlayer_quantization_example() const auto in_element_op = InElementOp{}; const auto wei_element_op = WeiElementOp{}; - const auto out_element_op = OutElementOp{0.5f, ActivationOp{}}; using InLayout = ck::tensor_layout::convolution::GNHWC; using WeiLayout = ck::tensor_layout::convolution::GKYXC; diff --git a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perchannel_quantization_example.inc b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perchannel_quantization_example.inc index 2e0623028d..8e75c27746 100644 --- a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perchannel_quantization_example.inc +++ b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perchannel_quantization_example.inc @@ -157,7 +157,7 @@ bool run_grouped_conv_fwd(bool do_verification, return (pass ? 0 : 1); } -int run_conv2d_fwd_perchannel_quantization_example() +int run_conv2d_fwd_perchannel_quantization_example(const OutElementOp& out_element_op) { bool do_verification = true; bool time_kernel = true; @@ -179,7 +179,6 @@ int run_conv2d_fwd_perchannel_quantization_example() const auto in_element_op = InElementOp{}; const auto wei_element_op = WeiElementOp{}; - const auto out_element_op = OutElementOp{ActivationOp{}}; using InLayout = ck::tensor_layout::convolution::GNHWC; using WeiLayout = ck::tensor_layout::convolution::GKYXC; diff --git a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perlayer_quantization_example.inc b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perlayer_quantization_example.inc index aeccb30cf2..926c033c58 100644 --- a/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perlayer_quantization_example.inc +++ b/example/40_conv2d_fwd_quantization/run_conv2d_fwd_perlayer_quantization_example.inc @@ -139,7 +139,7 @@ bool run_grouped_conv_fwd(bool do_verification, return (pass ? 0 : 1); } -int run_conv2d_fwd_perlayer_quantization_example() +int run_conv2d_fwd_perlayer_quantization_example(const OutElementOp& out_element_op) { bool do_verification = true; bool time_kernel = false; @@ -161,7 +161,6 @@ int run_conv2d_fwd_perlayer_quantization_example() const auto in_element_op = InElementOp{}; const auto wei_element_op = WeiElementOp{}; - const auto out_element_op = OutElementOp{0.5f, ActivationOp{}}; using InLayout = ck::tensor_layout::convolution::GNHWC; using WeiLayout = ck::tensor_layout::convolution::GKYXC; diff --git a/include/ck/tensor_operation/gpu/element/quantization_operation.hpp b/include/ck/tensor_operation/gpu/element/quantization_operation.hpp index 7ea09a2220..fefa6c793f 100644 --- a/include/ck/tensor_operation/gpu/element/quantization_operation.hpp +++ b/include/ck/tensor_operation/gpu/element/quantization_operation.hpp @@ -7,10 +7,30 @@ namespace ck { namespace tensor_operation { namespace element_wise { +// Y = Sy * Qy +// W = Sw * Qw +// X = Sx * Qx +// B = Sb * Qb = Sw * Sx * Qb +// Where X, W, Y are float32, Qx, Qw, Qy are int8 +// Sx, Sw, Sy are scale of x, w, y (float32), which is calculated from quantization range +// Qb is int32, scale of B is Sw * Sx for convenient + +// Y = W @ X, where @ is convolution or matrix multiplication +// Sy * Qy = Sw * Qw @ Sx * Qx +// Qy = [(Sw*Sx)/Sy] * Qw @ Qx + // For Activation function which is piecewise linear function, such as relu, leaky relu ...etc +// Activation(Sy * Qy) = Sy * Activation(Qy) template struct Activation_Mul_Clamp { + // Convolution + Activation (piecewise linear function) + // If an activation is piecewise linear function, then Activation(Sy * Qy) = Sy * Activation(Qy) + // Z = Activation(Y) = Activation(W @ X) + // Sz * Qz = Activation(Sy * Qy) + // Qz = Sy / Sz * Activation(Qy) = (Sw * Sx / Sz) * Activation(Qw @ Qx) + + // requantScale_ = Sw * Sx / Sz Activation_Mul_Clamp(float requantScale, Activation activationOp) : requantScale_(requantScale), activationOp_(activationOp) { @@ -45,8 +65,39 @@ struct Activation_Mul_Clamp Activation activationOp_; }; +// For Activation function which is non piecewise linear function, such as TanH, Sigmoid ...etc +// If an activation is not piecewise linear function +// then Activation(Sy * Qy) != Sy * Activation(Qy) +template +struct Mul_Activation_Mul_Clamp +{ + // Convolution + Activation (non piecewise linear function) + // Z = Activation(Y) = Activation(W @ X) + // Sz * Qz = Activation(Sy * Qy) + // Qz = S1 * Activation[Sacc * (Qw @ Qx)] + // Where S1 = 1 / Sz, Sacc = Sw * Sx + Mul_Activation_Mul_Clamp(float scale_z_inv, float scaleAcc, Activation activationOp) + : scale_z_inv_(scale_z_inv), scaleAcc_(scaleAcc), activationOp_(activationOp) + { + } + + __host__ __device__ constexpr void operator()(int8_t& y, const int32_t& x) const + { + float y_fp32 = ck::type_convert(x); + y_fp32 = scaleAcc_ * y_fp32; + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(scale_z_inv_ * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); + } + + float scale_z_inv_; + float scaleAcc_; + Activation activationOp_; +}; + // Conv Perchannel quantization + Activation function which is piecewise linear function, such as // relu, leaky relu ...etc +// Activation(Sy * Qy) = Sy * Activation(Qy) template struct Activation_Mul2_Clamp { @@ -76,9 +127,20 @@ struct Activation_Mul2_Clamp }; // For Activation function which is piecewise linear function, such as relu, leaky relu ...etc +// Activation(Sy * Qy) = Sy * Activation(Qy) template struct Add_Activation_Mul_Clamp { + // Convolution + bias + // Let Bias = B = Sw * Sx * Qb + // Where Qb is int32 + // Y = W @ X + B + // Sy * Qy = Sw * Qw @ Sx * Qx + Sw * Sx * Qb + // Qy = [(Sw*Sx)/Sy] * (Qw @ Qx + Qb) + + // For activation, Z = Activaiton(Y) + // Sz * Qz = Activation(Sy * Qy) + // Qz = Sy / Sz * Activation(Qy) = [(Sw*Sx)/Sz] * Activation(Qw @ Qx + Qb) Add_Activation_Mul_Clamp(float requantScale, Activation activationOp) : requantScale_(requantScale), activationOp_(activationOp) { @@ -139,11 +201,18 @@ struct Add_Activation_Mul2_Clamp }; // For Activation function which is non piecewise linear function, such as TanH, Sigmoid ...etc +// If an activation is not piecewise linear function +// then Activation(Sy * Qy) != Sy * Activation(Qy) template struct Add_Mul_Activation_Mul_Clamp { - Add_Mul_Activation_Mul_Clamp(float requantScale1, float requantScale2, Activation activationOp) - : requantScale1_(requantScale1), requantScale2_(requantScale2), activationOp_(activationOp) + // Convolution + Activation (non piecewise linear function) + // Z = Activation(Y) = Activation(W @ X + B) + // Sz * Qz = Activation(Sy * Qy) + // Qz = S1 * Activation[Sacc * (Qw @ Qx + Qb)] + // Where S1 = 1 / Sz, Sacc = Sw * Sx + Add_Mul_Activation_Mul_Clamp(float scale_z_inv, float scaleAcc, Activation activationOp) + : scale_z_inv_(scale_z_inv), scaleAcc_(scaleAcc), activationOp_(activationOp) { } @@ -151,14 +220,64 @@ struct Add_Mul_Activation_Mul_Clamp operator()(int8_t& y, const int32_t& x, const int32_t& bias) const { float y_fp32 = ck::type_convert(x + bias); - y_fp32 = requantScale1_ * y_fp32; + y_fp32 = scaleAcc_ * y_fp32; activationOp_(y_fp32, y_fp32); - y_fp32 = math::clamp(requantScale2_ * y_fp32, -128.f, 127.f); + y_fp32 = math::clamp(scale_z_inv_ * y_fp32, -128.f, 127.f); y = ck::type_convert(y_fp32); } - float requantScale1_; - float requantScale2_; + __host__ __device__ constexpr void + operator()(int32_t& y, const int32_t& x, const int32_t& bias) const + { + // CAUSION - We might type_convert to int8 in threadwise copy + // eg. GridwiseGemmDlMultipleD_km_kn_mn + float y_fp32 = ck::type_convert(x + bias); + y_fp32 = scaleAcc_ * y_fp32; + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(scale_z_inv_ * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); + } + + float scale_z_inv_; + float scaleAcc_; + Activation activationOp_; +}; + +// Conv Perchannel quantization + Activation function which is non piecewise linear function, +// such as TanH, Sigmoid ...etc +// If an activation is not piecewise linear function +// then Activation(Sy *Qy) != Sy * Activation(Qy) +template +struct Add_Mul2_Activation_Mul_Clamp +{ + Add_Mul2_Activation_Mul_Clamp(float scale_z_inv, Activation activationOp) + : scale_z_inv_(scale_z_inv), activationOp_(activationOp) + { + } + + __host__ __device__ constexpr void + operator()(int8_t& y, const int32_t& x, const int32_t& bias, const float& scaleAcc) const + { + float y_fp32 = ck::type_convert(x + bias); + y_fp32 = scaleAcc * y_fp32; + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(scale_z_inv_ * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); + } + + __host__ __device__ constexpr void + operator()(int32_t& y, const int32_t& x, const int32_t& bias, const float& scaleAcc) const + { + // CAUSION - We might type_convert to int8 in threadwise copy + // eg. GridwiseGemmDlMultipleD_km_kn_mn + float y_fp32 = ck::type_convert(x + bias); + y_fp32 = scaleAcc * y_fp32; + activationOp_(y_fp32, y_fp32); + y_fp32 = math::clamp(scale_z_inv_ * y_fp32, -128.f, 127.f); + y = ck::type_convert(y_fp32); + } + + float scale_z_inv_; Activation activationOp_; }; diff --git a/include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp b/include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp index 6b4df3b60e..f1f3042ad1 100644 --- a/include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp +++ b/include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp @@ -320,6 +320,19 @@ struct Sigmoid int32_t divider_ = 1; }; +struct TanH +{ + template + __host__ __device__ void operator()(T& y, const T& x) const + { + static_assert(is_same::value || is_same::value || + is_same::value, + "Data type is not supported by this operation!"); + + y = ck::math::tanh(x); + }; +}; + } // namespace element_wise } // namespace tensor_operation } // namespace ck diff --git a/include/ck/utility/math_v2.hpp b/include/ck/utility/math_v2.hpp index 4febace0b8..a3732b2fe0 100644 --- a/include/ck/utility/math_v2.hpp +++ b/include/ck/utility/math_v2.hpp @@ -92,6 +92,15 @@ static inline __host__ float sqrt(float x) { return std::sqrt(x); }; static inline __host__ double sqrt(double x) { return std::sqrt(x); }; +static inline __host__ half_t tanh(half_t x) +{ + return static_cast(std::tanh(static_cast(x))); +}; + +static inline __host__ float tanh(float x) { return std::tanh(x); }; + +static inline __host__ double tanh(double x) { return std::tanh(x); }; + // math functions for the HIP kernel, some are implemented by calling hip builtin functions static inline __device__ float abs(float x) { return ::abs(x); }; @@ -172,5 +181,14 @@ static inline __device__ float sqrt(float x) { return __builtin_amdgcn_sqrtf(x); static inline __device__ double sqrt(double x) { return __builtin_amdgcn_sqrt(x); }; +static inline __device__ half_t tanh(half_t x) +{ + return static_cast(::tanhf(static_cast(x))); +}; + +static inline __device__ float tanh(float x) { return ::tanhf(x); }; + +static inline __device__ double tanh(double x) { return ::tanh(x); }; + } // namespace math } // namespace ck diff --git a/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp b/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp index 104b21a3ec..0bde4919af 100644 --- a/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp +++ b/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp @@ -85,6 +85,7 @@ using GK_GK_Tuple = ck::Tuple; // pointwise functor using PassThrough = ck::tensor_operation::element_wise::PassThrough; using Relu = ck::tensor_operation::element_wise::Relu; +using TanH = ck::tensor_operation::element_wise::TanH; using Scale = ck::tensor_operation::element_wise::Scale; using Bilinear = ck::tensor_operation::element_wise::Bilinear; using AddAddFastGelu = ck::tensor_operation::element_wise::AddAddFastGelu; @@ -102,6 +103,10 @@ template using Add_Activation_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; +template +using Add_Mul_Activation_Mul_Clamp = + ck::tensor_operation::element_wise::Add_Mul_Activation_Mul_Clamp; + template using Activation_Mul2_Clamp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; @@ -109,6 +114,10 @@ template using Add_Activation_Mul2_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; +template +using Add_Mul2_Activation_Mul_Clamp = + ck::tensor_operation::element_wise::Add_Mul2_Activation_Mul_Clamp; + template struct DeviceOperationInstanceFactory; diff --git a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp index 57c971e52e..793dc8d04a 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp @@ -49,6 +49,22 @@ void add_device_conv2d_dl_bias_relu_perchannel_quantization_int8_instances( Add_Activation_Mul2_Clamp>>>& instances); +void add_device_conv2d_dl_bias_tanh_perchannel_quantization_int8_instances( + std::vector< + std::unique_ptr>>>& + instances); + void add_device_conv2d_xdl_bias_perchannel_quantization_int8_instances( std::vector< std::unique_ptr>>>& instances); +void add_device_conv2d_xdl_bias_tanh_perchannel_quantization_int8_instances( + std::vector< + std::unique_ptr>>>& + instances); + +// piecewise activation function template +struct DeviceOperationInstanceFactory>> +{ + using DeviceOp = DeviceGroupedConvFwdMultipleD>; + + static auto GetInstances() + { + std::vector> op_ptrs; + + if constexpr(NumDimSpatial == 2 && is_same_v && + is_same_v && is_same_v && + is_same_v) + { + if constexpr(is_same_v && is_same_v && + is_same_v && is_same_v) + { + if constexpr(is_same_v) + { + add_device_conv2d_dl_bias_tanh_perchannel_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_bias_tanh_perchannel_quantization_int8_instances(op_ptrs); + } + } + } + + return op_ptrs; + } +}; + } // namespace instance } // namespace device } // namespace tensor_operation diff --git a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp index 9f8ac9b7b1..c570f76750 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp @@ -49,6 +49,21 @@ void add_device_conv2d_dl_bias_relu_perlayer_quantization_int8_instances( Add_Activation_Mul_Clamp>>>& instances); +void add_device_conv2d_dl_bias_tanh_perlayer_quantization_int8_instances( + std::vector>>>& + instances); + void add_device_conv2d_xdl_bias_perlayer_quantization_int8_instances( std::vector< std::unique_ptr>>>& instances); +void add_device_conv2d_xdl_bias_tanh_perlayer_quantization_int8_instances( + std::vector>>>& + instances); + +// piecewise activation function template +struct DeviceOperationInstanceFactory>> +{ + using DeviceOp = DeviceGroupedConvFwdMultipleD>; + + static auto GetInstances() + { + std::vector> op_ptrs; + + if constexpr(NumDimSpatial == 2 && is_same_v && + is_same_v && is_same_v && + is_same_v) + { + if constexpr(is_same_v && is_same_v && + is_same_v && is_same_v) + { + if constexpr(is_same_v) + { + add_device_conv2d_dl_bias_tanh_perlayer_quantization_int8_instances(op_ptrs); + add_device_conv2d_xdl_bias_tanh_perlayer_quantization_int8_instances(op_ptrs); + } + } + } + + return op_ptrs; + } +}; + } // namespace instance } // namespace device } // namespace tensor_operation diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp index 7729e42638..b231f8c956 100644 --- a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp @@ -25,6 +25,7 @@ using GNHWK = ck::tensor_layout::convolution::GNHWK; using GK = ck::tensor_layout::convolution::G_K; using PassThrough = ck::tensor_operation::element_wise::PassThrough; using Relu = ck::tensor_operation::element_wise::Relu; +using TanH = ck::tensor_operation::element_wise::TanH; using GK_Tuple = ck::Tuple; using GK_GK_Tuple = ck::Tuple; @@ -32,17 +33,25 @@ using I32_Tuple = ck::Tuple; using F32_Tuple = ck::Tuple; using I32_F32_Tuple = ck::Tuple; +// perlayer using Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; using Relu_Mul_Clamp = ck::tensor_operation::element_wise::Activation_Mul_Clamp; +// bias + perlayer using Add_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; using Add_Relu_Mul_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clamp; +using Add_Mul_TanH_Mul_Clamp = + ck::tensor_operation::element_wise::Add_Mul_Activation_Mul_Clamp; +// perchannel using Mul2_Clamp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; using Relu_Mul2_Clamp = ck::tensor_operation::element_wise::Activation_Mul2_Clamp; +// bias + perchannel using Add_Mul2_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; using Add_Relu_Mul2_Clamp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Clamp; +using Add_Mul2_TanH_Mul_Clamp = + ck::tensor_operation::element_wise::Add_Mul2_Activation_Mul_Clamp; static constexpr ck::index_t NDimSpatial = 2; static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp index ba2451101e..ae5c1d7c32 100644 --- a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp @@ -76,6 +76,42 @@ void add_device_conv2d_dl_bias_relu_perchannel_quantization_int8_instances( ConvFwd1x1S1P0, 4>{}); } + +void add_device_conv2d_dl_bias_tanh_perchannel_quantization_int8_instances( + std::vector>>& instances) +{ + // dl + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} + } // namespace instance } // namespace device } // namespace tensor_operation diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp index ea1c953bb2..192d5c9a55 100644 --- a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp @@ -76,6 +76,43 @@ void add_device_conv2d_dl_bias_relu_perlayer_quantization_int8_instances( ConvFwd1x1S1P0, 4>{}); } + +void add_device_conv2d_dl_bias_tanh_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); + + add_device_operation_instances(instances, + device_grouped_conv2d_dl_int8_instances{}); +} + } // namespace instance } // namespace device } // namespace tensor_operation diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp index 25e2cda9cb..b6e8ee1590 100644 --- a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp @@ -74,6 +74,41 @@ void add_device_conv2d_xdl_bias_relu_perchannel_quantization_int8_instances( ConvFwd1x1S1P0, 8>{}); } + +void add_device_conv2d_xdl_bias_tanh_perchannel_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); +} + } // namespace instance } // namespace device } // namespace tensor_operation diff --git a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp index d598d3d38e..70f92cec3a 100644 --- a/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp +++ b/library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp @@ -76,6 +76,43 @@ void add_device_conv2d_xdl_bias_relu_perlayer_quantization_int8_instances( ConvFwd1x1S1P0, 8>{}); } + +void add_device_conv2d_xdl_bias_tanh_perlayer_quantization_int8_instances( + std::vector>>& instances) +{ + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); + + add_device_operation_instances(instances, + device_grouped_conv2d_xdl_int8_instances{}); +} + } // namespace instance } // namespace device } // namespace tensor_operation From dbd8f94bef7882bbba2dcabd888d5754a628772f Mon Sep 17 00:00:00 2001 From: Rostyslav Geyyer <46627076+geyyer@users.noreply.github.com> Date: Wed, 29 Mar 2023 15:05:32 -0500 Subject: [PATCH 28/31] Add a denorm test fix (#603) * Add type_convert implementations for bf16 * Add the fix for conv_fwd * Add the fix for conv_bwd_data * Add the fix for conv_bwd_weight * Format * Format * Another format * Add a macro to use workaround on MI200 only * Format --------- Co-authored-by: Rosty Geyyer Co-authored-by: zjing14 --- .../grouped_conv_fwd_xdl_fp16.cpp | 2 +- .../device_grouped_conv_fwd_multiple_d.hpp | 2 +- ...ouped_conv_fwd_multiple_d_xdl_cshuffle.hpp | 2 +- .../gridwise_gemm_multiple_d_xdl_cshuffle.hpp | 26 +++++--- .../gpu/grid/gridwise_gemm_pipeline_v1.hpp | 2 +- .../grid/gridwise_gemm_xdlops_bwd_weight.hpp | 60 ++++++++++--------- .../gpu/grid/gridwise_gemm_xdlops_v2r3.hpp | 44 ++++++++------ .../threadwise_tensor_slice_transfer_v3r1.hpp | 2 +- include/ck/utility/data_type.hpp | 56 ++++++++++++++++- 9 files changed, 138 insertions(+), 58 deletions(-) diff --git a/example/30_grouped_conv_fwd_multiple_d/grouped_conv_fwd_xdl_fp16.cpp b/example/30_grouped_conv_fwd_multiple_d/grouped_conv_fwd_xdl_fp16.cpp index 6de1daa3d4..498eda2442 100644 --- a/example/30_grouped_conv_fwd_multiple_d/grouped_conv_fwd_xdl_fp16.cpp +++ b/example/30_grouped_conv_fwd_multiple_d/grouped_conv_fwd_xdl_fp16.cpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #include "common.hpp" diff --git a/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_multiple_d.hpp b/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_multiple_d.hpp index 1e2f81915d..1cc30fd9e6 100644 --- a/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_multiple_d.hpp +++ b/include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd_multiple_d.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once diff --git a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp index 8de81285d9..7bab2d0408 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_xdl_cshuffle.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_xdl_cshuffle.hpp index da0b0cea24..d49c96f86c 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_xdl_cshuffle.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_xdl_cshuffle.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once @@ -92,6 +92,17 @@ struct GridwiseGemmMultipleD_xdl_cshuffle using GridwiseGemmPipe = remove_cvref_t())>; + // denorm test fix, required to work around fp16 mfma issue + // we convert fp16->fp32->bf16 and execute bf16 mfma instruction + // when mfma if fixed, remove this section and update + // ABDataTypeAdjusted -> ABDataType throughout this file +#if defined(__gfx90a__) + using ABDataTypeAdjusted = + conditional_t, ck::bhalf_t, ABDataType>; +#else + using ABDataTypeAdjusted = ABDataType; +#endif + __host__ __device__ static constexpr auto GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1() { // A matrix in LDS memory, dst of blockwise copy @@ -397,7 +408,7 @@ struct GridwiseGemmMultipleD_xdl_cshuffle ABlockTransferThreadClusterLengths_AK0_M_AK1, ABlockTransferThreadClusterArrangeOrder, ABDataType, - ABDataType, + ABDataTypeAdjusted, decltype(a_grid_desc_ak0_m_ak1), decltype(a_block_desc_ak0_m_ak1), ABlockTransferSrcAccessOrder, @@ -428,7 +439,7 @@ struct GridwiseGemmMultipleD_xdl_cshuffle BBlockTransferThreadClusterLengths_BK0_N_BK1, BBlockTransferThreadClusterArrangeOrder, ABDataType, - ABDataType, + ABDataTypeAdjusted, decltype(b_grid_desc_bk0_n_bk1), decltype(b_block_desc_bk0_n_bk1), BBlockTransferSrcAccessOrder, @@ -458,11 +469,11 @@ struct GridwiseGemmMultipleD_xdl_cshuffle // sanity check constexpr index_t KPack = math::max(math::lcm(AK1, BK1), - MfmaSelector::selected_mfma.k_per_blk); + MfmaSelector::selected_mfma.k_per_blk); auto blockwise_gemm = BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_Selector< BlockSize, - ABDataType, + ABDataTypeAdjusted, AccDataType, decltype(a_block_desc_ak0_m_ak1), decltype(b_block_desc_bk0_n_bk1), @@ -480,10 +491,11 @@ struct GridwiseGemmMultipleD_xdl_cshuffle a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align); auto a_block_buf = make_dynamic_buffer( - static_cast(p_shared), a_block_desc_ak0_m_ak1.GetElementSpaceSize()); + static_cast(p_shared), + a_block_desc_ak0_m_ak1.GetElementSpaceSize()); auto b_block_buf = make_dynamic_buffer( - static_cast(p_shared) + a_block_space_size_aligned, + static_cast(p_shared) + a_block_space_size_aligned, b_block_desc_bk0_n_bk1.GetElementSpaceSize()); constexpr auto a_block_slice_copy_step = make_multi_index(KPerBlock / AK1, 0, 0); diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_pipeline_v1.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_pipeline_v1.hpp index e9097552c2..d1209636de 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_pipeline_v1.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_pipeline_v1.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_bwd_weight.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_bwd_weight.hpp index 126887cbac..2b66898b1e 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_bwd_weight.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_bwd_weight.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once @@ -166,15 +166,12 @@ __global__ void const CBlockClusterAdaptor c_block_cluster_adaptor) { #if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx908__) || defined(__gfx90a__)) - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); - - __shared__ FloatAB p_shared_block[shared_block_size]; + __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()]; GridwiseGemm::template Run(p_a_grid, p_b_grid, p_c_grid, - p_shared_block, + p_shared, a_b_k0_m_k1_grid_desc, b_b_k0_n_k1_grid_desc, c_grid_desc_mblock_mperblock_nblock_nperblock, @@ -183,16 +180,16 @@ __global__ void c_element_op, c_block_cluster_adaptor); #else - ignore = p_a_grid; - ignore = p_b_grid; - ignore = p_c_grid; - ignore = a_b_k0_m_k1_grid_desc; - ignore = b_b_k0_n_k1_grid_desc; - ignore = c_grid_desc_mblock_mperblock_nblock_nperblock; - ignore = a_element_op; - ignore = b_element_op; - ignore = c_element_op; - ignore = c_block_cluster_adaptor; + ignore = p_a_grid; + ignore = p_b_grid; + ignore = p_c_grid; + ignore = a_b_k0_m_k1_grid_desc; + ignore = b_b_k0_n_k1_grid_desc; + ignore = c_grid_desc_mblock_mperblock_nblock_nperblock; + ignore = a_element_op; + ignore = b_element_op; + ignore = c_element_op; + ignore = c_block_cluster_adaptor; #endif // end of if (defined(__gfx908__) || defined(__gfx90a__)) } @@ -264,6 +261,16 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_bwd_weight using GridwiseGemmPipe = remove_cvref_t())>; + // denorm test fix, required to work around fp16 mfma issue + // we convert fp16->fp32->bf16 and execute bf16 mfma instruction + // when mfma if fixed, remove this section and update + // FloatABAdjusted -> FloatAB throughout this file +#if defined(__gfx90a__) + using FloatABAdjusted = conditional_t, ck::bhalf_t, FloatAB>; +#else + using FloatABAdjusted = FloatAB; +#endif + // M0/M1/M1Padding static constexpr auto M1PerBlock = Number{}; static constexpr auto M0PerBlock = Number{}; @@ -605,7 +612,7 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_bwd_weight __device__ static void Run(const FloatAB* __restrict__ p_a_grid, const FloatAB* __restrict__ p_b_grid, FloatC* __restrict__ p_c_grid, - FloatAB* __restrict__ p_shared_block, + void* __restrict__ p_shared, const AGridDesc_B_K0_M_K1& a_b_k0_m_k1_grid_desc, const BGridDesc_B_K0_N_K1& b_b_k0_n_k1_grid_desc, const CGridDesc_MBlock_MPerBlock_NBlock_NPerBlock& @@ -666,7 +673,7 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_bwd_weight ABlockTransferThreadClusterLengths_K0_M_K1, ABlockTransferThreadClusterArrangeOrder, FloatAB, - FloatAB, + FloatABAdjusted, decltype(a_b_k0_m_k1_grid_desc), decltype(a_b_k0_m_k1_block_desc), ABlockTransferSrcAccessOrder, @@ -696,7 +703,7 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_bwd_weight BBlockTransferThreadClusterLengths_K0_N_K1, BBlockTransferThreadClusterArrangeOrder, FloatAB, - FloatAB, + FloatABAdjusted, decltype(b_b_k0_n_k1_grid_desc), decltype(b_b_k0_n_k1_block_desc), BBlockTransferSrcAccessOrder, @@ -725,11 +732,11 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_bwd_weight // sanity check constexpr index_t KPack = - math::max(K1, MfmaSelector::selected_mfma.k_per_blk); + math::max(K1, MfmaSelector::selected_mfma.k_per_blk); auto blockwise_gemm = BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_v1( - p_a_block, a_k0_m_k1_block_desc.GetElementSpaceSize()); + static_cast(p_shared), a_k0_m_k1_block_desc.GetElementSpaceSize()); + auto b_block_buf = make_dynamic_buffer( - p_b_block, b_k0_n_k1_block_desc.GetElementSpaceSize()); + static_cast(p_shared) + a_block_space_size, + b_k0_n_k1_block_desc.GetElementSpaceSize()); // gridwise GEMM pipeline const index_t K0BlockMainLoop = __builtin_amdgcn_readfirstlane(K0 / K0PerBlock); @@ -798,8 +804,6 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_bwd_weight constexpr auto c_block_desc_mblock_mperblock_nblock_nperblock = GetCBlockDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(); - void* p_shared = static_cast(p_shared_block); - auto c_block_buf = make_dynamic_buffer( static_cast(p_shared), c_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize()); diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_v2r3.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_v2r3.hpp index d1149c0c2e..02b0081349 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_v2r3.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_v2r3.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once @@ -58,16 +58,16 @@ __global__ void c_element_op, block_2_ctile_map); #else - ignore = p_a_grid; - ignore = p_b_grid; - ignore = p_c_grid; - ignore = a_grid_desc_k0_m_k1; - ignore = b_grid_desc_k0_n_k1; - ignore = c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2; - ignore = a_element_op; - ignore = b_element_op; - ignore = c_element_op; - ignore = block_2_ctile_map; + ignore = p_a_grid; + ignore = p_b_grid; + ignore = p_c_grid; + ignore = a_grid_desc_k0_m_k1; + ignore = b_grid_desc_k0_n_k1; + ignore = c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2; + ignore = a_element_op; + ignore = b_element_op; + ignore = c_element_op; + ignore = block_2_ctile_map; #endif // end of if (defined(__gfx908__) || defined(__gfx90a__)) } @@ -131,6 +131,16 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r3 using GridwiseGemmPipe = remove_cvref_t())>; + // denorm test fix, required to work around fp16 mfma issue + // we convert fp16->fp32->bf16 and execute bf16 mfma instruction + // when mfma if fixed, remove this section and update + // FloatABAdjusted -> FloatAB throughout this file +#if defined(__gfx90a__) + using FloatABAdjusted = conditional_t, ck::bhalf_t, FloatAB>; +#else + using FloatABAdjusted = FloatAB; +#endif + __host__ __device__ static constexpr auto GetABlockDescriptor_K0PerBlock_MPerBlock_K1() { constexpr auto max_lds_align = K1; @@ -281,7 +291,7 @@ struct GridwiseGemm_k0mk1_k0nk1_mn_xdlops_v2r3 using BlockwiseGemm = BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_v1( - static_cast(p_shared), a_block_desc_k0_m_k1.GetElementSpaceSize()); + static_cast(p_shared), a_block_desc_k0_m_k1.GetElementSpaceSize()); auto b_block_buf = make_dynamic_buffer( - static_cast(p_shared) + a_block_space_size_aligned, + static_cast(p_shared) + a_block_space_size_aligned, b_block_desc_k0_n_k1.GetElementSpaceSize()); constexpr auto a_block_slice_copy_step = make_multi_index(K0PerBlock, 0, 0); diff --git a/include/ck/tensor_operation/gpu/thread/threadwise_tensor_slice_transfer_v3r1.hpp b/include/ck/tensor_operation/gpu/thread/threadwise_tensor_slice_transfer_v3r1.hpp index bb28c194f4..cba06f8e87 100644 --- a/include/ck/tensor_operation/gpu/thread/threadwise_tensor_slice_transfer_v3r1.hpp +++ b/include/ck/tensor_operation/gpu/thread/threadwise_tensor_slice_transfer_v3r1.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once diff --git a/include/ck/utility/data_type.hpp b/include/ck/utility/data_type.hpp index 5190036144..079b0cb860 100644 --- a/include/ck/utility/data_type.hpp +++ b/include/ck/utility/data_type.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. #pragma once @@ -1008,6 +1008,60 @@ inline __host__ __device__ constexpr bhalf_t type_convert(float return uint16_t(u.int32 >> 16); } +// convert bfp16 to fp16 via fp32 +template <> +inline __host__ __device__ constexpr half_t type_convert(bhalf_t x) +{ + float x_fp32 = type_convert(x); + + return static_cast(x_fp32); +} + +// convert fp16 to bfp16 via fp32 +template <> +inline __host__ __device__ constexpr bhalf_t type_convert(half_t x) +{ + float x_fp32 = static_cast(x); + + return type_convert(x_fp32); +} + +// convert bfp16 to int32 via fp32 +template <> +inline __host__ __device__ constexpr int32_t type_convert(bhalf_t x) +{ + float x_fp32 = type_convert(x); + + return static_cast(x_fp32); +} + +// convert int32 to bfp16 via fp32 +template <> +inline __host__ __device__ constexpr bhalf_t type_convert(int32_t x) +{ + float x_fp32 = static_cast(x); + + return type_convert(x_fp32); +} + +// convert bfp16 to int8 via fp32 +template <> +inline __host__ __device__ constexpr int8_t type_convert(bhalf_t x) +{ + float x_fp32 = type_convert(x); + + return static_cast(x_fp32); +} + +// convert int8 to bfp16 via fp32 +template <> +inline __host__ __device__ constexpr bhalf_t type_convert(int8_t x) +{ + float x_fp32 = static_cast(x); + + return type_convert(x_fp32); +} + template struct NumericLimits { From bb5530af91352dca062b791313d9b77700335ae9 Mon Sep 17 00:00:00 2001 From: carlushuang Date: Thu, 30 Mar 2023 08:03:07 +0800 Subject: [PATCH 29/31] simplify karg in device/grid of split-k op (#644) * simplify karg in device/grid split-k op * fix mk_kn_mn instances * add more instances * use name from tensor layout --- .../impl/device_gemm_xdl_splitk_c_shuffle.hpp | 464 ++--------------- .../gpu/grid/gridwise_gemm_xdlops_v2r4r2.hpp | 486 +++++++++++++----- ...l_splitk_f16_f16_f16_mk_kn_mn_instance.cpp | 27 +- 3 files changed, 438 insertions(+), 539 deletions(-) diff --git a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp index 0d2aeaeb7e..1f08cec67e 100644 --- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp +++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_splitk_c_shuffle.hpp @@ -73,157 +73,18 @@ struct DeviceGemmXdlSplitKCShuffle : public DeviceGemmSplitK{}; static constexpr auto I3 = Number<3>{}; - static constexpr auto K1Number = Number{}; - - static auto - MakeAGridDescriptor_KBatch_K0_M_K1(index_t M, index_t K, index_t StrideA, int KBatch, int KPad) - { - assert(KPad % (K1 * KBatch) == 0); - - const index_t K0 = KPad / (K1 * KBatch); - - const auto a_grid_desc_m_k = [&]() { - if constexpr(is_same::value) - { - return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(StrideA, I1)); - } - else if constexpr(is_same::value) - { - return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(I1, StrideA)); - } - }(); - - const auto a_grid_desc_m_kpad = transform_tensor_descriptor( - a_grid_desc_m_k, - make_tuple(make_pass_through_transform(M), make_right_pad_transform(K, KPad - K)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0>{}, Sequence<1>{})); - - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; - return transform_tensor_descriptor( - a_grid_desc_m_kpad, - make_tuple(make_unmerge_transform(make_tuple(KBatch, K0, K1Number)), - make_right_pad_transform(M, PadM)), - make_tuple(Sequence<1>{}, Sequence<0>{}), - make_tuple(Sequence<0, 1, 3>{}, Sequence<2>{})); - } - else - { - return transform_tensor_descriptor( - a_grid_desc_m_kpad, - make_tuple(make_unmerge_transform(make_tuple(KBatch, K0, K1Number)), - make_pass_through_transform(M)), - make_tuple(Sequence<1>{}, Sequence<0>{}), - make_tuple(Sequence<0, 1, 3>{}, Sequence<2>{})); - } - } - - static auto - MakeBGridDescriptor_KBatch_K0_N_K1(index_t K, index_t N, index_t StrideB, int KBatch, int KPad) - { - assert(KPad % (K1 * KBatch) == 0); - - const index_t K0 = KPad / (K1 * KBatch); - - const auto b_grid_desc_k_n = [&]() { - if constexpr(is_same::value) - { - return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(StrideB, I1)); - } - else if constexpr(is_same::value) - { - return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(I1, StrideB)); - } - }(); - - const auto b_grid_desc_kpad_n = transform_tensor_descriptor( - b_grid_desc_k_n, - make_tuple(make_right_pad_transform(K, KPad - K), make_pass_through_transform(N)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0>{}, Sequence<1>{})); - - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; - return transform_tensor_descriptor( - b_grid_desc_kpad_n, - make_tuple(make_unmerge_transform(make_tuple(KBatch, K0, K1Number)), - make_right_pad_transform(N, PadN)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0, 1, 3>{}, Sequence<2>{})); - } - else - { - return transform_tensor_descriptor( - b_grid_desc_kpad_n, - make_tuple(make_unmerge_transform(make_tuple(KBatch, K0, K1Number)), - make_pass_through_transform(N)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0, 1, 3>{}, Sequence<2>{})); - } - } - - static auto MakeCGridDescriptor_M_N(index_t M, index_t N, index_t StrideC) - { - const auto c_grid_desc_m_n = [&]() { - if constexpr(is_same::value) - { - return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(StrideC, I1)); - } - else if constexpr(is_same::value) - { - return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(I1, StrideC)); - } - }(); - - if constexpr(GemmSpec == GemmSpecialization::MNPadding) - { - const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; - const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; - - return transform_tensor_descriptor( - c_grid_desc_m_n, - make_tuple(make_right_pad_transform(M, PadM), make_right_pad_transform(N, PadN)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0>{}, Sequence<1>{})); - } - else - { - - return transform_tensor_descriptor( - c_grid_desc_m_n, - make_tuple(make_pass_through_transform(M), make_pass_through_transform(N)), - make_tuple(Sequence<0>{}, Sequence<1>{}), - make_tuple(Sequence<0>{}, Sequence<1>{})); - } - } - - static auto GetKPad(index_t K, index_t KBatch) - { - const index_t K0 = math::integer_divide_ceil(K, K1 * K0PerBlock * KBatch) * K0PerBlock; - const index_t KPad = KBatch * K0 * K1; - return KPad; - } - - using AGridDesc_K0_M_K1 = decltype(MakeAGridDescriptor_KBatch_K0_M_K1(1, 1, 1, 1, 1)); - using BGridDesc_K0_N_K1 = decltype(MakeBGridDescriptor_KBatch_K0_N_K1(1, 1, 1, 1, 1)); - using CGridDesc_M_N = decltype(MakeCGridDescriptor_M_N(1, 1, 1)); - - // GridwiseGemm using GridwiseGemm = GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2< BlockSize, ADataType, // TODO: distinguish A/B datatype AccDataType, CDataType, - InMemoryDataOperationEnum::Set, - AGridDesc_K0_M_K1, - BGridDesc_K0_N_K1, - CGridDesc_M_N, + ALayout, + BLayout, + CLayout, AElementwiseOperation, BElementwiseOperation, CElementwiseOperation, + GemmSpec, MPerBlock, NPerBlock, K0PerBlock, @@ -253,236 +114,64 @@ struct DeviceGemmXdlSplitKCShuffle : public DeviceGemmSplitK; - // GridwiseGemm - using GridwiseGemmAtomicAdd = GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2< - BlockSize, - ADataType, // TODO: distinguish A/B datatype - AccDataType, - CDataType, - InMemoryDataOperationEnum::AtomicAdd, - AGridDesc_K0_M_K1, - BGridDesc_K0_N_K1, - CGridDesc_M_N, - AElementwiseOperation, - BElementwiseOperation, - CElementwiseOperation, - MPerBlock, - NPerBlock, - K0PerBlock, - MPerXDL, - NPerXDL, - K1, - MXdlPerWave, - NXdlPerWave, - ABlockTransferThreadClusterLengths_K0_M_K1, - ABlockTransferThreadClusterArrangeOrder, - ABlockTransferSrcAccessOrder, - ABlockTransferSrcVectorDim, - ABlockTransferSrcScalarPerVector, - ABlockTransferDstScalarPerVector_K1, - false, // AThreadTransferSrcResetCoordinateAfterRun, - ABlockLdsAddExtraM, - BBlockTransferThreadClusterLengths_K0_N_K1, - BBlockTransferThreadClusterArrangeOrder, - BBlockTransferSrcAccessOrder, - BBlockTransferSrcVectorDim, - BBlockTransferSrcScalarPerVector, - BBlockTransferDstScalarPerVector_K1, - false, // BThreadTransferSrcResetCoordinateAfterRun, - BBlockLdsAddExtraN, - CShuffleMRepeatPerShuffle, - CShuffleNRepeatPerShuffle, - CBlockTransferScalarPerVector_NWaveNPerXDL, - CBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock>; - - using CGridDesc_MBlock_MPerBlock_NBlock_NPerBlock = - decltype(GridwiseGemm::MakeCGridDesc_MBlock_MPerBlock_NBlock_NPerBlock(CGridDesc_M_N{})); - - using Block2CTileMap = typename GridwiseGemm::CBlockClusterAdaptor; - - // Argument - struct Argument : public BaseArgument - { - Argument(const ADataType* p_a_grid, - const BDataType* p_b_grid, - CDataType* p_c_grid, - index_t M, - index_t N, - index_t K, - index_t StrideA, - index_t StrideB, - index_t StrideC, - index_t M01, - index_t N01, - AElementwiseOperation a_element_op, - BElementwiseOperation b_element_op, - CElementwiseOperation c_element_op, - index_t k_batch) - : p_a_grid_{p_a_grid}, - p_b_grid_{p_b_grid}, - p_c_grid_{p_c_grid}, - a_grid_desc_kbatch_k0_m_k1_{}, - b_grid_desc_kbatch_k0_n_k1_{}, - c_grid_desc_m_n_{}, - c_grid_desc_mblock_mperblock_nblock_nperblock_{}, - block_2_ctile_map_{}, - M01_{M01}, - N01_{N01}, - a_element_op_{a_element_op}, - b_element_op_{b_element_op}, - c_element_op_{c_element_op}, - k_batch_{k_batch} - { - int KPad = DeviceGemmXdlSplitKCShuffle::GetKPad(K, k_batch_); - - a_grid_desc_kbatch_k0_m_k1_ = - DeviceGemmXdlSplitKCShuffle::MakeAGridDescriptor_KBatch_K0_M_K1( - M, K, StrideA, k_batch_, KPad); - b_grid_desc_kbatch_k0_n_k1_ = - DeviceGemmXdlSplitKCShuffle::MakeBGridDescriptor_KBatch_K0_N_K1( - K, N, StrideB, k_batch_, KPad); - c_grid_desc_m_n_ = DeviceGemmXdlSplitKCShuffle::MakeCGridDescriptor_M_N(M, N, StrideC); - - block_2_ctile_map_ = - GridwiseGemm::MakeCBlockClusterAdaptor(c_grid_desc_m_n_, M01, N01, k_batch_); - - if(GridwiseGemm::CheckValidity(a_grid_desc_kbatch_k0_m_k1_, - b_grid_desc_kbatch_k0_n_k1_, - c_grid_desc_m_n_, - block_2_ctile_map_)) - { - c_grid_desc_mblock_mperblock_nblock_nperblock_ = - GridwiseGemm::MakeCGridDesc_MBlock_MPerBlock_NBlock_NPerBlock(c_grid_desc_m_n_); - } - } - - // private: - const ADataType* p_a_grid_; - const BDataType* p_b_grid_; - CDataType* p_c_grid_; - AGridDesc_K0_M_K1 a_grid_desc_kbatch_k0_m_k1_; - BGridDesc_K0_N_K1 b_grid_desc_kbatch_k0_n_k1_; - CGridDesc_M_N c_grid_desc_m_n_; - CGridDesc_MBlock_MPerBlock_NBlock_NPerBlock c_grid_desc_mblock_mperblock_nblock_nperblock_; - Block2CTileMap block_2_ctile_map_; - index_t M01_; - index_t N01_; - AElementwiseOperation a_element_op_; - BElementwiseOperation b_element_op_; - CElementwiseOperation c_element_op_; - index_t k_batch_; - }; + using Argument = typename GridwiseGemm::Argument; // Invoker struct Invoker : public BaseInvoker { - using Argument = DeviceGemmXdlSplitKCShuffle::Argument; - void Print(const Argument& arg) - { - std::cout << "arg.a_grid_desc_kbatch_k0_m_k1_{" - << arg.a_grid_desc_kbatch_k0_m_k1_.GetLength(I0) << ", " - << arg.a_grid_desc_kbatch_k0_m_k1_.GetLength(I1) << ", " - << arg.a_grid_desc_kbatch_k0_m_k1_.GetLength(I2) << ", " - << arg.a_grid_desc_kbatch_k0_m_k1_.GetLength(I3) << "}" << std::endl; + void Print(const Argument& karg) { karg.Print(); } - std::cout << "arg.b_grid_desc_kbatch_k0_n_k1_{" - << arg.b_grid_desc_kbatch_k0_n_k1_.GetLength(I0) << ", " - << arg.b_grid_desc_kbatch_k0_n_k1_.GetLength(I1) << ", " - << arg.b_grid_desc_kbatch_k0_n_k1_.GetLength(I2) << ", " - << arg.b_grid_desc_kbatch_k0_n_k1_.GetLength(I3) << "}" << std::endl; - - std::cout << "arg.c_grid_desc_m_n_{ " << arg.c_grid_desc_m_n_.GetLength(I0) << ", " - << arg.c_grid_desc_m_n_.GetLength(I1) << "}" << std::endl; - } - - float Run(const Argument& arg, const StreamConfig& stream_config = StreamConfig{}) + float Run(const Argument& karg, const StreamConfig& stream_config = StreamConfig{}) { if(stream_config.log_level_ > 0) { - Print(arg); + Print(karg); } - const auto kbatch = arg.a_grid_desc_kbatch_k0_m_k1_.GetLength(I0); + const auto kbatch = karg.k_batch; - if(!GridwiseGemm::CheckValidity(arg.a_grid_desc_kbatch_k0_m_k1_, - arg.b_grid_desc_kbatch_k0_n_k1_, - arg.c_grid_desc_m_n_, - arg.block_2_ctile_map_)) + if(!GridwiseGemm::CheckValidity(karg)) { throw std::runtime_error( - "wrong! GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2 has invalid setting"); + "wrong! GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2 has invalid " + "setting"); } - const index_t grid_size = - arg.block_2_ctile_map_.CalculateGridSize(arg.c_grid_desc_m_n_); - - const auto K0 = arg.a_grid_desc_kbatch_k0_m_k1_.GetLength(I1); + index_t gdx, gdy, gdz; + std::tie(gdx, gdy, gdz) = GridwiseGemm::CalculateGridSize(karg); + const auto K0 = karg.K0; const bool has_main_k0_block_loop = GridwiseGemm::CalculateHasMainK0BlockLoop(K0); float ave_time = 0; const auto Run = [&](const auto& kernel) { - hipGetErrorString(hipMemset( - arg.p_c_grid_, - 0, - arg.c_grid_desc_mblock_mperblock_nblock_nperblock_.GetElementSpaceSize() * - sizeof(CDataType))); + if(kbatch > 1) + hipGetErrorString( + hipMemset(karg.p_c_grid, 0, karg.M * karg.N * sizeof(CDataType))); - ave_time = - launch_and_time_kernel(stream_config, - kernel, - dim3(grid_size), - dim3(BlockSize), - 0, - arg.p_a_grid_, - arg.p_b_grid_, - arg.p_c_grid_, - arg.a_grid_desc_kbatch_k0_m_k1_, - arg.b_grid_desc_kbatch_k0_n_k1_, - arg.c_grid_desc_mblock_mperblock_nblock_nperblock_, - arg.a_element_op_, - arg.b_element_op_, - arg.c_element_op_, - arg.block_2_ctile_map_); + ave_time = launch_and_time_kernel( + stream_config, kernel, dim3(gdx, gdy, gdz), dim3(BlockSize), 0, karg); }; if(has_main_k0_block_loop) { if(kbatch == 1) { - const auto kernel = kernel_gemm_xdlops_v2r4r2< - GridwiseGemm, - ADataType, // TODO: distiguish A/B datatype - CDataType, - remove_reference_t, - remove_reference_t, - remove_reference_t, - AElementwiseOperation, - BElementwiseOperation, - CElementwiseOperation, - remove_reference_t, - true>; + const auto kernel = + kernel_gemm_xdlops_v2r4r2_simplified; Run(kernel); } else { - const auto kernel = kernel_gemm_xdlops_v2r4r2< - GridwiseGemmAtomicAdd, - ADataType, // TODO: distiguish A/B datatype - CDataType, - remove_reference_t, - remove_reference_t, - remove_reference_t, - AElementwiseOperation, - BElementwiseOperation, - CElementwiseOperation, - remove_reference_t, - true>; + const auto kernel = + kernel_gemm_xdlops_v2r4r2_simplified; Run(kernel); } @@ -491,37 +180,19 @@ struct DeviceGemmXdlSplitKCShuffle : public DeviceGemmSplitK, - remove_reference_t, - remove_reference_t, - AElementwiseOperation, - BElementwiseOperation, - CElementwiseOperation, - remove_reference_t, - false>; + const auto kernel = + kernel_gemm_xdlops_v2r4r2_simplified; Run(kernel); } else { - const auto kernel = kernel_gemm_xdlops_v2r4r2< - GridwiseGemmAtomicAdd, - ADataType, // TODO: distiguish A/B datatype - CDataType, - remove_reference_t, - remove_reference_t, - remove_reference_t, - AElementwiseOperation, - BElementwiseOperation, - CElementwiseOperation, - remove_reference_t, - false>; + const auto kernel = + kernel_gemm_xdlops_v2r4r2_simplified; Run(kernel); } @@ -544,12 +215,9 @@ struct DeviceGemmXdlSplitKCShuffle : public DeviceGemmSplitK(static_cast(p_a), @@ -615,11 +282,10 @@ struct DeviceGemmXdlSplitKCShuffle : public DeviceGemmSplitK"; - // clang-format on - - return str.str(); - } + std::string GetTypeString() const override { return GridwiseGemm::GetTypeString(); } }; } // namespace device diff --git a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_v2r4r2.hpp b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_v2r4r2.hpp index 190194f1eb..727f180e96 100644 --- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_v2r4r2.hpp +++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_xdlops_v2r4r2.hpp @@ -18,60 +18,23 @@ namespace ck { template + bool HasMainKBlockLoop, + InMemoryDataOperationEnum CGlobalMemoryDataOperation> __global__ void #if CK_USE_LAUNCH_BOUNDS __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, CK_MIN_BLOCK_PER_CU) #endif - kernel_gemm_xdlops_v2r4r2(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - FloatC* __restrict__ p_c_grid, - const AGridDesc_B_K0_M_K1 a_b_k0_m_k1_grid_desc, - const BGridDesc_B_K0_N_K1 b_b_k0_n_k1_grid_desc, - const CGridDesc_MBlock_MPerBlock_NBlock_NPerBlock - c_grid_desc_mblock_mperblock_nblock_nperblock, - const AElementwiseOperation a_element_op, - const BElementwiseOperation b_element_op, - const CElementwiseOperation c_element_op, - const CBlockClusterAdaptor c_block_cluster_adaptor) + kernel_gemm_xdlops_v2r4r2_simplified(typename GridwiseGemm::Argument karg) { #if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx908__) || defined(__gfx90a__)) - constexpr index_t shared_block_size = - GridwiseGemm::GetSharedMemoryNumberOfByte() / sizeof(FloatAB); + constexpr index_t shared_size = GridwiseGemm::GetSharedMemoryNumberOfByte(); - __shared__ FloatAB p_shared_block[shared_block_size]; + __shared__ uint8_t p_shared[shared_size]; - GridwiseGemm::template Run(p_a_grid, - p_b_grid, - p_c_grid, - static_cast(p_shared_block), - a_b_k0_m_k1_grid_desc, - b_b_k0_n_k1_grid_desc, - c_grid_desc_mblock_mperblock_nblock_nperblock, - a_element_op, - b_element_op, - c_element_op, - c_block_cluster_adaptor); + GridwiseGemm::template Run( + karg, static_cast(p_shared)); #else - ignore = p_a_grid; - ignore = p_b_grid; - ignore = p_c_grid; - ignore = a_b_k0_m_k1_grid_desc; - ignore = b_b_k0_n_k1_grid_desc; - ignore = c_grid_desc_mblock_mperblock_nblock_nperblock; - ignore = a_element_op; - ignore = b_element_op; - ignore = c_element_op; - ignore = c_block_cluster_adaptor; + ignore = karg; #endif // end of if (defined(__gfx908__) || defined(__gfx90a__)) } @@ -79,13 +42,13 @@ template {}; // K1 should be Number<...> - static constexpr auto K1 = Number{}; + static constexpr auto K1 = Number{}; + static constexpr auto M01 = 1; + static constexpr auto N01 = 1; using ThisThreadBlock = ThisThreadBlock; + struct Argument : public ck::tensor_operation::device::BaseArgument + { + const FloatAB* p_a_grid; + const FloatAB* p_b_grid; + FloatC* p_c_grid; + index_t M; + index_t N; + index_t K; + index_t StrideA; + index_t StrideB; + index_t StrideC; + index_t MPadded; + index_t NPadded; + index_t KPadded; + index_t K0; + index_t k_batch; + + Argument(const FloatAB* p_a_grid_, + const FloatAB* p_b_grid_, + FloatC* p_c_grid_, + index_t M_, + index_t N_, + index_t K_, + index_t StrideA_, + index_t StrideB_, + index_t StrideC_, + index_t MPadded_, + index_t NPadded_, + index_t KPadded_, + index_t K0_, + index_t k_batch_) + : p_a_grid(p_a_grid_), + p_b_grid(p_b_grid_), + p_c_grid(p_c_grid_), + M(M_), + N(N_), + K(K_), + StrideA(StrideA_), + StrideB(StrideB_), + StrideC(StrideC_), + MPadded(MPadded_), + NPadded(NPadded_), + KPadded(KPadded_), + K0(K0_), + k_batch(k_batch_) + { + } + + void Print() const + { + std::cout << "arg {" + << "M:" << M << ", " + << "N:" << N << ", " + << "K:" << K << ", " + << "SA:" << StrideA << ", " + << "SB:" << StrideB << ", " + << "SC:" << StrideC << ", " + << "MP:" << MPadded << ", " + << "NP:" << NPadded << ", " + << "KP:" << KPadded << ", " + << "K0:" << K0 << ", " + << "KB:" << k_batch << "}" << std::endl; + } + }; + + __host__ __device__ static auto CalculateGridSize(const Argument& karg) + { + return std::make_tuple(math::integer_divide_ceil(karg.N, NPerBlock), + math::integer_divide_ceil(karg.M, MPerBlock), + karg.k_batch); + } + + // prefer this to be called on host + __host__ __device__ static auto CalculateMPadded(index_t M) + { + return (M + MPerBlock - 1) / MPerBlock * MPerBlock; + } + + __host__ __device__ static auto CalculateNPadded(index_t N) + { + return (N + NPerBlock - 1) / NPerBlock * NPerBlock; + } + + __host__ __device__ static auto CalculateK0(index_t K, index_t K_Batch = 1) + { + // k_batch * k0 * k0_per_block * k1 + auto K_t = K_Batch * K0PerBlock * K1; + return (K + K_t - 1) / K_t * K0PerBlock; + } + + __host__ __device__ static auto CalculateKPadded(index_t K, index_t K_Batch = 1) + { + auto K0 = CalculateK0(K, K_Batch); + return K_Batch * K0 * K1; + } + + __host__ __device__ static auto MakeAGridDescriptor_KBatch_K0_M_K1(index_t M, + index_t MPad, + index_t K, + index_t StrideA, + index_t KBatch, + index_t K0, + index_t KPad) + { + const auto a_grid_desc_m_k = [&]() { + if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(StrideA, I1)); + } + else if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(M, K), make_tuple(I1, StrideA)); + } + }(); + + const auto a_grid_desc_m_kpad = transform_tensor_descriptor( + a_grid_desc_m_k, + make_tuple(make_pass_through_transform(M), make_right_pad_transform(K, KPad - K)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + + if constexpr(GemmSpec == tensor_operation::device::GemmSpecialization::MPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MNPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MKPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MNKPadding) + { + // const auto PadM = (MPerBlock - M % MPerBlock) % MPerBlock; + return transform_tensor_descriptor( + a_grid_desc_m_kpad, + make_tuple(make_unmerge_transform(make_tuple(KBatch, K0, K1)), + make_right_pad_transform(M, MPad - M)), + make_tuple(Sequence<1>{}, Sequence<0>{}), + make_tuple(Sequence<0, 1, 3>{}, Sequence<2>{})); + } + else + { + return transform_tensor_descriptor( + a_grid_desc_m_kpad, + make_tuple(make_unmerge_transform(make_tuple(KBatch, K0, K1)), + make_pass_through_transform(M)), + make_tuple(Sequence<1>{}, Sequence<0>{}), + make_tuple(Sequence<0, 1, 3>{}, Sequence<2>{})); + } + } + + __host__ __device__ static auto MakeBGridDescriptor_KBatch_K0_N_K1(index_t K, + index_t NPad, + index_t N, + index_t StrideB, + index_t KBatch, + index_t K0, + index_t KPad) + { + const auto b_grid_desc_k_n = [&]() { + if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(StrideB, I1)); + } + else if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(K, N), make_tuple(I1, StrideB)); + } + }(); + + const auto b_grid_desc_kpad_n = transform_tensor_descriptor( + b_grid_desc_k_n, + make_tuple(make_right_pad_transform(K, KPad - K), make_pass_through_transform(N)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + + if constexpr(GemmSpec == tensor_operation::device::GemmSpecialization::NPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MNPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::NKPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MNKPadding) + { + // const auto PadN = (NPerBlock - N % NPerBlock) % NPerBlock; + return transform_tensor_descriptor( + b_grid_desc_kpad_n, + make_tuple(make_unmerge_transform(make_tuple(KBatch, K0, K1)), + make_right_pad_transform(N, NPad - N)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0, 1, 3>{}, Sequence<2>{})); + } + else + { + return transform_tensor_descriptor( + b_grid_desc_kpad_n, + make_tuple(make_unmerge_transform(make_tuple(KBatch, K0, K1)), + make_pass_through_transform(N)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0, 1, 3>{}, Sequence<2>{})); + } + } + + __host__ __device__ static auto + MakeCGridDescriptor_M_N(index_t M, index_t N, index_t MPad, index_t NPad, index_t StrideC) + { + const auto c_grid_desc_m_n = [&]() { + if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(StrideC, I1)); + } + else if constexpr(is_same::value) + { + return make_naive_tensor_descriptor(make_tuple(M, N), make_tuple(I1, StrideC)); + } + }(); + + if constexpr(GemmSpec == tensor_operation::device::GemmSpecialization::MNPadding) + { + return transform_tensor_descriptor(c_grid_desc_m_n, + make_tuple(make_right_pad_transform(M, MPad - M), + make_right_pad_transform(N, NPad - N)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + } + else + { + return transform_tensor_descriptor( + c_grid_desc_m_n, + make_tuple(make_pass_through_transform(M), make_pass_through_transform(N)), + make_tuple(Sequence<0>{}, Sequence<1>{}), + make_tuple(Sequence<0>{}, Sequence<1>{})); + } + } + __host__ __device__ static constexpr index_t GetSharedMemoryNumberOfByte() { constexpr auto max_lds_align = K1; @@ -178,45 +369,68 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2 c_block_size * sizeof(FloatC)); } - // block_id to matrix tile idx (m0, n0) mapping are controlled by {M01, N01} - template - __host__ __device__ static constexpr bool - CheckValidity(const AGridDesc_B_K0_M_K1& a_b_k0_m_k1_grid_desc, - const BGridDesc_B_K0_N_K1& b_b_k0_n_k1_grid_desc, - const CMNGridDesc& c_m_n_grid_desc, - const Block2CTileMap& block_2_ctile_map) + __host__ __device__ static constexpr bool CheckValidity(const Argument& karg) { - static_assert(is_known_at_compile_time>::value, - "wrong! K1 need to be known at compile-time"); - - static_assert((MPerBlock % (MPerXDL * MRepeat) == 0) && - (NPerBlock % (NRepeat * NPerXDL)) == 0, - "Invalid tuning param!"); - - const auto M = a_b_k0_m_k1_grid_desc.GetLength(I2); - const auto N = b_b_k0_n_k1_grid_desc.GetLength(I2); - const auto K0 = a_b_k0_m_k1_grid_desc.GetLength(I1); - const auto KBatch = a_b_k0_m_k1_grid_desc.GetLength(I0); - - if(!(M == c_m_n_grid_desc.GetLength(I0) && N == c_m_n_grid_desc.GetLength(I1) && - K0 == b_b_k0_n_k1_grid_desc.GetLength(I1) && - K1 == a_b_k0_m_k1_grid_desc.GetLength(I3) && - K1 == b_b_k0_n_k1_grid_desc.GetLength(I3) && - KBatch == b_b_k0_n_k1_grid_desc.GetLength(I0))) - return false; - - if(!(M % MPerBlock == 0 && N % NPerBlock == 0 && K0 % K0PerBlock == 0)) - return false; - - if(!block_2_ctile_map.CheckValidity(c_m_n_grid_desc)) + if constexpr(!(GemmSpec == tensor_operation::device::GemmSpecialization::MPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MNPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MKPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MNKPadding)) { - return false; + if(!(karg.M % MPerBlock == 0)) + return false; + } + if constexpr(!(GemmSpec == tensor_operation::device::GemmSpecialization::NPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MNPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::NKPadding || + GemmSpec == tensor_operation::device::GemmSpecialization::MNKPadding)) + { + if(!(karg.N % NPerBlock == 0)) + return false; + } + + if constexpr(is_same::value) + { + if(karg.K % ABlockTransferSrcScalarPerVector != 0) + return false; + } + else + { + if(karg.M % ABlockTransferSrcScalarPerVector != 0) + return false; + } + + if constexpr(is_same::value) + { + if(karg.N % BBlockTransferSrcScalarPerVector != 0) + return false; + } + else + { + if(karg.K % BBlockTransferSrcScalarPerVector != 0) + return false; + } + + if constexpr(is_same::value) + { + if(karg.N % CBlockTransferScalarPerVector_NWaveNPerXDL != 0) + return false; + } + else + { + if(karg.M % CBlockTransferScalarPerVector_NWaveNPerXDL != 0) + return false; } - // TODO: also check validity of all components (blockwise-copy, threadwise-copy, etc) return true; } + __host__ __device__ static auto GetKPad(index_t K, index_t KBatch) + { + const index_t K0 = math::integer_divide_ceil(K, K1 * K0PerBlock * KBatch) * K0PerBlock; + const index_t KPad = KBatch * K0 * K1; + return KPad; + } + __host__ __device__ static constexpr bool CalculateHasMainK0BlockLoop(index_t K0) { const bool has_main_k0_block_loop = K0 > K0PerBlock; @@ -224,8 +438,9 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2 return has_main_k0_block_loop; } + template __host__ __device__ static constexpr auto - MakeCGridDesc_MBlock_MPerBlock_NBlock_NPerBlock(const CMNGridDesc& c_m_n_grid_desc) + MakeCGridDesc_MBlock_MPerBlock_NBlock_NPerBlock(const CGridDesc& c_m_n_grid_desc) { const auto M = c_m_n_grid_desc.GetLength(I0); const auto N = c_m_n_grid_desc.GetLength(I1); @@ -242,10 +457,11 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2 } // return block_id to C matrix tile idx (m0, n0) mapping + template __host__ __device__ static constexpr auto MakeCBlockClusterAdaptor( - const CMNGridDesc& c_m_n_grid_desc, index_t /* M01 */, index_t /* N01 */, index_t KBatch) + const CGridDesc& c_m_n_grid_desc, index_t /* M01 */, index_t /* N01 */, index_t KBatch) { - return BlockToCTileMap_KSplit_M00_N0_M01Adapt( + return BlockToCTileMap_KSplit_M00_N0_M01Adapt( c_m_n_grid_desc, 8, KBatch); } @@ -262,24 +478,25 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2 Number{})); } - using CGridDesc_MBlock_MPerBlock_NBlock_NPerBlock = - decltype(MakeCGridDesc_MBlock_MPerBlock_NBlock_NPerBlock(CMNGridDesc{})); - using CBlockClusterAdaptor = decltype(MakeCBlockClusterAdaptor(CMNGridDesc{}, 1, 1, 1)); - - template - __device__ static void Run(const FloatAB* __restrict__ p_a_grid, - const FloatAB* __restrict__ p_b_grid, - FloatC* __restrict__ p_c_grid, - void* __restrict__ p_shared_block, - const AGridDesc_B_K0_M_K1& a_b_k0_m_k1_grid_desc, - const BGridDesc_B_K0_N_K1& b_b_k0_n_k1_grid_desc, - const CGridDesc_MBlock_MPerBlock_NBlock_NPerBlock& - c_grid_desc_mblock_mperblock_nblock_nperblock, - const AElementwiseOperation& a_element_op, - const BElementwiseOperation& b_element_op, - const CElementwiseOperation& c_element_op, - const CBlockClusterAdaptor& c_block_cluster_adaptor) + template + __device__ static void Run(const Argument& karg, void* __restrict__ p_shared_block) { + const FloatAB* p_a_grid = karg.p_a_grid; + const FloatAB* p_b_grid = karg.p_b_grid; + FloatC* p_c_grid = karg.p_c_grid; + const auto a_b_k0_m_k1_grid_desc = MakeAGridDescriptor_KBatch_K0_M_K1( + karg.M, karg.MPadded, karg.K, karg.StrideA, karg.k_batch, karg.K0, karg.KPadded); + const auto b_b_k0_n_k1_grid_desc = MakeBGridDescriptor_KBatch_K0_N_K1( + karg.K, karg.NPadded, karg.N, karg.StrideB, karg.k_batch, karg.K0, karg.KPadded); + const auto c_grid_desc_m_n = + MakeCGridDescriptor_M_N(karg.M, karg.N, karg.MPadded, karg.NPadded, karg.StrideC); + + const auto c_grid_desc_mblock_mperblock_nblock_nperblock = + MakeCGridDesc_MBlock_MPerBlock_NBlock_NPerBlock(c_grid_desc_m_n); + const AElementwiseOperation a_element_op = AElementwiseOperation{}; + const BElementwiseOperation b_element_op = BElementwiseOperation{}; + const CElementwiseOperation c_element_op = CElementwiseOperation{}; + const auto a_grid_buf = make_dynamic_buffer( p_a_grid, a_b_k0_m_k1_grid_desc.GetElementSpaceSize()); const auto b_grid_buf = make_dynamic_buffer( @@ -289,26 +506,16 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2 const auto K0 = a_b_k0_m_k1_grid_desc.GetLength(I1); - // divide block work by [M, N] - const auto block_work_idx = - c_block_cluster_adaptor.CalculateBottomIndex(make_multi_index(get_block_1d_id())); - - if(!c_block_cluster_adaptor.ValidCTileIndex( - make_tuple(block_work_idx[I1], block_work_idx[I2]), - make_tuple(c_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(I0), - c_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(I2)))) - { - return; - } - - const index_t k_batch_id = block_work_idx[I0]; + const index_t block_m_id = __builtin_amdgcn_readfirstlane(blockIdx.y); + const index_t block_n_id = __builtin_amdgcn_readfirstlane(blockIdx.x); + const index_t k_batch_id = __builtin_amdgcn_readfirstlane(blockIdx.z); // HACK: this force m/n_block_data_idx_on_grid into SGPR const index_t m_block_data_idx_on_grid = - __builtin_amdgcn_readfirstlane(block_work_idx[I1] * MPerBlock); + __builtin_amdgcn_readfirstlane(block_m_id * MPerBlock); const index_t n_block_data_idx_on_grid = - __builtin_amdgcn_readfirstlane(block_work_idx[I2] * NPerBlock); + __builtin_amdgcn_readfirstlane(block_n_id * NPerBlock); // lds max alignment constexpr auto max_lds_align = K1; @@ -444,7 +651,6 @@ struct GridwiseGemm_bk0mk1_bk0nk1_mn_xdlops_v2r4r2 // c_mtx[MPerBlock, NPerBlock] is distributed among threads, and saved in // register // sanity check - auto blockwise_gemm = BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_v1 + struct LStr + { + static std::string Get() { return ""; } + }; + + template <> + struct LStr + { + static std::string Get() { return "R"; } + }; + + template <> + struct LStr + { + static std::string Get() { return "C"; } + }; + + static std::string GetTypeString() + { + auto str = std::stringstream(); + + // clang-format off + str << "GemmXdlSplitKCShuffle_" + << getGemmSpecializationString(GemmSpec) << "_" + << std::string(ALayout::name)[0] + << std::string(BLayout::name)[0] + << std::string(CLayout::name)[0] + << "_" + << "B" << BlockSize << "_" + << "Vec" << ABlockTransferSrcScalarPerVector << "x" + << BBlockTransferSrcScalarPerVector << "x" + << CBlockTransferScalarPerVector_NWaveNPerXDL << "_" + << MPerBlock << "x" + << NPerBlock << "x" + << K0PerBlock << "x" + << K1 ; + // clang-format on + + return str.str(); + } }; } // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/gemm_splitk/device_gemm_xdl_splitk_f16_f16_f16_mk_kn_mn_instance.cpp b/library/src/tensor_operation_instance/gpu/gemm_splitk/device_gemm_xdl_splitk_f16_f16_f16_mk_kn_mn_instance.cpp index 9b5ff40484..c4680db831 100644 --- a/library/src/tensor_operation_instance/gpu/gemm_splitk/device_gemm_xdl_splitk_f16_f16_f16_mk_kn_mn_instance.cpp +++ b/library/src/tensor_operation_instance/gpu/gemm_splitk/device_gemm_xdl_splitk_f16_f16_f16_mk_kn_mn_instance.cpp @@ -26,7 +26,8 @@ using S = ck::Sequence; using PassThrough = ck::tensor_operation::element_wise::PassThrough; -static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; +// static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; +static constexpr auto GemmMNPadding = ck::tensor_operation::device::GemmSpecialization::MNPadding; // Compilation parameters for a[m, k] * b[k, n] = c[m, n] using device_gemm_xdl_splitk_f16_f16_f16_mk_kn_mn_instances = std::tuple< @@ -35,14 +36,22 @@ using device_gemm_xdl_splitk_f16_f16_f16_mk_kn_mn_instances = std::tuple< //#########################| Type| Type| Type| Type| | | | Elementwise| Elementwise| Elementwise|Specialization| Size| Block| Block| Block| | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MXdlPerWave_MWaveMPerXdl| ScalarPerVector| //#########################| | | | | | | | Operation| Operation| Operation| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NXdlPerWave_NWaveNPerXdl| _NWaveNPerXdl| //#########################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | - DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmDefault, 256, 256, 128, 4, 8, 32, 32, 4, 2, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, - DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmDefault, 256, 128, 256, 4, 8, 32, 32, 2, 4, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 4, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, - DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmDefault, 128, 128, 128, 4, 8, 32, 32, 4, 2, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 4, 8, true, 1, 1, S<1, 16, 1, 8>, 8>, - DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmDefault, 256, 128, 128, 4, 8, 32, 32, 2, 2, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, - DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmDefault, 128, 128, 64, 4, 8, 32, 32, 2, 2, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 4>, 8>, - DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmDefault, 128, 64, 128, 4, 8, 32, 32, 2, 2, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 4, 8, true, 1, 1, S<1, 16, 1, 8>, 8>, - DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmDefault, 256, 128, 64, 4, 8, 32, 32, 2, 1, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 1, 8, true, 1, 1, S<1, 16, 1, 4>, 8>, - DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmDefault, 256, 64, 128, 4, 8, 32, 32, 1, 2, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 8>, 8> + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 256, 256, 128, 4, 8, 32, 32, 4, 2, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 256, 128, 256, 4, 8, 32, 32, 2, 4, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 4, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 128, 128, 4, 8, 32, 32, 4, 2, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 4, 8, true, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 256, 64, 192, 4, 8, 32, 32, 1, 3, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 48, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 256, 192, 64, 4, 8, 32, 32, 3, 1, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 256, 128, 128, 4, 8, 32, 32, 2, 2, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 128, 64, 4, 8, 32, 32, 2, 2, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 64, 128, 4, 8, 32, 32, 2, 2, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 4, 8, true, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 256, 128, 64, 4, 8, 32, 32, 2, 1, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 1, 8, true, 1, 1, S<1, 16, 1, 4>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 256, 64, 128, 4, 8, 32, 32, 1, 2, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 64, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 32, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 32, 192, 4, 8, 32, 32, 1, 3, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 24, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 8, 8, true, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 192, 32, 4, 8, 32, 32, 3, 1, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 1, 8, true, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 32, 64, 4, 8, 32, 32, 1, 1, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 2, 8, true, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 64, 32, 4, 8, 32, 32, 1, 1, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 1, 8, true, 1, 1, S<1, 32, 1, 4>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 32, 128, 4, 8, 32, 32, 1, 2, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 4, 8, true, 1, 1, S<1, 16, 1, 8>, 8>, + DeviceGemmXdlSplitKCShuffle< F16, F16, F16, F32, Row, Row, Row, PassThrough, PassThrough, PassThrough, GemmMNPadding, 128, 128, 32, 4, 8, 32, 32, 2, 1, S<1, 4, 32, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, true, S<1, 4, 32, 1>, S<0, 1, 3, 2>, S<0, 1, 3, 2>, 2, 1, 8, true, 1, 1, S<1, 32, 1, 4>, 8> // clang-format on >; From 091570f594e6e6b7109e290ce878f4f9b8ad1e9f Mon Sep 17 00:00:00 2001 From: Haocong WANG Date: Thu, 30 Mar 2023 08:03:55 +0800 Subject: [PATCH 30/31] fix 3rd dword of buffer source descriptor (#659) --- include/ck/ck.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/ck/ck.hpp b/include/ck/ck.hpp index 5009dec5e9..9853f19a47 100644 --- a/include/ck/ck.hpp +++ b/include/ck/ck.hpp @@ -36,7 +36,7 @@ #elif defined(__gfx1030__) // for GPU code #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31014000 #elif defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) // for GPU code -#define CK_BUFFER_RESOURCE_3RD_DWORD 0x10020000 +#define CK_BUFFER_RESOURCE_3RD_DWORD 0x31004000 #endif // FMA instruction From fde6d2742bc7fe56bb86864ff960f8ea26082dfc Mon Sep 17 00:00:00 2001 From: zjing14 Date: Thu, 30 Mar 2023 13:30:43 -0500 Subject: [PATCH 31/31] add fp64 instances (#658) Co-authored-by: root --- client_example/04_contraction/CMakeLists.txt | 14 +- ...near.cpp => contraction_bilinear_fp32.cpp} | 0 .../contraction_bilinear_fp64.cpp | 281 ++++++++++++++++++ ...n_scale.cpp => contraction_scale_fp32.cpp} | 0 .../04_contraction/contraction_scale_fp64.cpp | 270 +++++++++++++++++ .../device_operation_instance_factory.hpp | 1 + .../gpu/contraction_bilinear.hpp | 66 ++++ .../gpu/contraction_scale.hpp | 66 ++++ .../gpu/contraction_bilinear/CMakeLists.txt | 6 + ..._shuffle_f64_f64_f64_f64_kknn_instance.cpp | 76 +++++ ..._shuffle_f64_f64_f64_f64_knnn_instance.cpp | 76 +++++ ..._shuffle_f64_f64_f64_f64_mknn_instance.cpp | 76 +++++ ..._shuffle_f64_f64_f64_f64_mnnn_instance.cpp | 76 +++++ .../gpu/contraction_scale/CMakeLists.txt | 6 + ...xdl_c_shuffle_f64_f64_f64_kkn_instance.cpp | 75 +++++ ...xdl_c_shuffle_f64_f64_f64_knn_instance.cpp | 75 +++++ ...xdl_c_shuffle_f64_f64_f64_mkn_instance.cpp | 75 +++++ ...xdl_c_shuffle_f64_f64_f64_mnn_instance.cpp | 75 +++++ 18 files changed, 1310 insertions(+), 4 deletions(-) rename client_example/04_contraction/{contraction_bilinear.cpp => contraction_bilinear_fp32.cpp} (100%) create mode 100644 client_example/04_contraction/contraction_bilinear_fp64.cpp rename client_example/04_contraction/{contraction_scale.cpp => contraction_scale_fp32.cpp} (100%) create mode 100644 client_example/04_contraction/contraction_scale_fp64.cpp create mode 100644 library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance.cpp create mode 100644 library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance.cpp diff --git a/client_example/04_contraction/CMakeLists.txt b/client_example/04_contraction/CMakeLists.txt index 971d5d9f1c..7ffedfeef3 100644 --- a/client_example/04_contraction/CMakeLists.txt +++ b/client_example/04_contraction/CMakeLists.txt @@ -1,8 +1,14 @@ -add_executable(client_contraction_scale contraction_scale.cpp) -target_link_libraries(client_contraction_scale PRIVATE composable_kernel::device_operations) +add_executable(client_contraction_scale_fp32 contraction_scale_fp32.cpp) +target_link_libraries(client_contraction_scale_fp32 PRIVATE composable_kernel::device_operations) -add_executable(client_contraction_bilinear contraction_bilinear.cpp) -target_link_libraries(client_contraction_bilinear PRIVATE composable_kernel::device_operations) +add_executable(client_contraction_bilinear_fp32 contraction_bilinear_fp32.cpp) +target_link_libraries(client_contraction_bilinear_fp32 PRIVATE composable_kernel::device_operations) + +add_executable(client_contraction_scale_fp64 contraction_scale_fp64.cpp) +target_link_libraries(client_contraction_scale_fp64 PRIVATE composable_kernel::device_operations) + +add_executable(client_contraction_bilinear_fp64 contraction_bilinear_fp64.cpp) +target_link_libraries(client_contraction_bilinear_fp64 PRIVATE composable_kernel::device_operations) add_executable(contraction_g1m2n3k1_add_xdl_fp16 contraction_g1m2n3k1_add_xdl_fp16.cpp) target_link_libraries(contraction_g1m2n3k1_add_xdl_fp16 PRIVATE composable_kernel::device_operations) diff --git a/client_example/04_contraction/contraction_bilinear.cpp b/client_example/04_contraction/contraction_bilinear_fp32.cpp similarity index 100% rename from client_example/04_contraction/contraction_bilinear.cpp rename to client_example/04_contraction/contraction_bilinear_fp32.cpp diff --git a/client_example/04_contraction/contraction_bilinear_fp64.cpp b/client_example/04_contraction/contraction_bilinear_fp64.cpp new file mode 100644 index 0000000000..9238e4cd80 --- /dev/null +++ b/client_example/04_contraction/contraction_bilinear_fp64.cpp @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_contraction_multiple_d.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/gpu/contraction_bilinear.hpp" +#include "ck/library/utility/numeric.hpp" + +using F64 = double; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Bilinear = ck::tensor_operation::element_wise::Bilinear; + +using AElementOp = PassThrough; +using BElementOp = PassThrough; +using CDEElementOp = Bilinear; + +using ADataType = F64; +using BDataType = F64; +using AccDataType = F64; +using CShuffleDataType = F64; +using DDataType = F64; +using DsDataType = ck::Tuple; +using EDataType = F64; + +static constexpr ck::index_t NumDimM = 2; +static constexpr ck::index_t NumDimN = 2; +static constexpr ck::index_t NumDimK = 2; + +struct SimpleDeviceMem +{ + SimpleDeviceMem() = delete; + + SimpleDeviceMem(std::size_t mem_size) : p_mem_{} + { + (void)hipMalloc(static_cast(&p_mem_), mem_size); + } + + void* GetDeviceBuffer() { return p_mem_; } + + ~SimpleDeviceMem() { (void)hipFree(p_mem_); } + + void* p_mem_; +}; + +int main(int argc, char* argv[]) +{ +// kknn +#if 1 + // A[M0, M1, K0, K1] + std::vector a_ms_ks_lengths{30, 128, 32, 64}; + std::vector a_ms_ks_strides{524288, 4096, 128, 1}; + // B[N0, N1, K0, K1] + std::vector b_ns_ks_lengths{32, 64, 32, 64}; + std::vector b_ns_ks_strides{524288, 4096, 128, 1}; + // D[M0, M1, N0, N1] + std::vector d_ms_ns_lengths{30, 128, 32, 64}; + std::vector d_ms_ns_strides{524288, 4096, 128, 1}; + // E[M0, M1, N0, N1] + std::vector e_ms_ns_lengths{30, 128, 32, 64}; + std::vector e_ms_ns_strides{524288, 4096, 128, 1}; +// knnn +#elif 0 + // A[M0, M1, K0, K1] + std::vector a_ms_ks_lengths{30, 128, 32, 64}; + std::vector a_ms_ks_strides{524288, 4096, 128, 1}; + // B[N0, N1, K0, K1] + std::vector b_ns_ks_lengths{32, 64, 32, 64}; + std::vector b_ns_ks_strides{64, 1, 131072, 2048}; + // D[M0, M1, N0, N1] + std::vector d_ms_ns_lengths{30, 128, 32, 64}; + std::vector d_ms_ns_strides{524288, 4096, 128, 1}; + // E[M0, M1, N0, N1] + std::vector e_ms_ns_lengths{30, 128, 32, 64}; + std::vector e_ms_ns_strides{524288, 4096, 128, 1}; +// mknn +#elif 0 + // A[M0, M1, K0, K1] + std::vector a_ms_ks_lengths{30, 128, 32, 64}; + std::vector a_ms_ks_strides{128, 1, 245760, 3840}; + // B[N0, N1, K0, K1] + std::vector b_ns_ks_lengths{32, 64, 32, 64}; + std::vector b_ns_ks_strides{524288, 4096, 128, 1}; + // D[M0, M1, N0, N1] + std::vector d_ms_ns_lengths{30, 128, 32, 64}; + std::vector d_ms_ns_strides{524288, 4096, 128, 1}; + // E[M0, M1, N0, N1] + std::vector e_ms_ns_lengths{30, 128, 32, 64}; + std::vector e_ms_ns_strides{524288, 4096, 128, 1}; +// mnnn +#elif 0 + // A[M0, M1, K0, K1] + std::vector a_ms_ks_lengths{30, 128, 32, 64}; + std::vector a_ms_ks_strides{128, 1, 245760, 3840}; + // B[N0, N1, K0, K1] + std::vector b_ns_ks_lengths{32, 64, 32, 64}; + std::vector b_ns_ks_strides{64, 1, 131072, 2048}; + // D[M0, M1, N0, N1] + std::vector d_ms_ns_lengths{30, 128, 32, 64}; + std::vector d_ms_ns_strides{524288, 4096, 128, 1}; + // E[M0, M1, N0, N1] + std::vector e_ms_ns_lengths{30, 128, 32, 64}; + std::vector e_ms_ns_strides{524288, 4096, 128, 1}; +#endif + + float alpha = 1.f; + float beta = 1.f; + + if(argc == 1) + { + // use default case + } + else if(argc == 25) + { + const ck::index_t M0 = std::stoi(argv[1]); + const ck::index_t M1 = std::stoi(argv[2]); + + const ck::index_t N0 = std::stoi(argv[3]); + const ck::index_t N1 = std::stoi(argv[4]); + + const ck::index_t K0 = std::stoi(argv[5]); + const ck::index_t K1 = std::stoi(argv[6]); + + a_ms_ks_lengths = {M0, M1, K0, K1}; + a_ms_ks_strides = { + std::stoi(argv[7]), std::stoi(argv[8]), std::stoi(argv[9]), std::stoi(argv[10])}; + + b_ns_ks_lengths = {N0, N1, K0, K1}; + b_ns_ks_strides = { + std::stoi(argv[11]), std::stoi(argv[12]), std::stoi(argv[13]), std::stoi(argv[14])}; + + d_ms_ns_lengths = {M0, M1, N0, N1}; + d_ms_ns_strides = { + std::stoi(argv[15]), std::stoi(argv[16]), std::stoi(argv[17]), std::stoi(argv[18])}; + + e_ms_ns_lengths = {M0, M1, N0, N1}; + e_ms_ns_strides = { + std::stoi(argv[19]), std::stoi(argv[20]), std::stoi(argv[21]), std::stoi(argv[22])}; + + alpha = std::stof(argv[23]); + beta = std::stof(argv[24]); + } + else + { + printf("arg1 to 6: M0, M1, N0, N1, K0, K1\n"); + printf("arg7 to 10: Stride_A_M0, Stride_A_M1, Stride_A_K0, Stride_A_K1\n"); + printf("arg11 to 14: Stride_B_N0, Stride_B_N1, Stride_B_K0, Stride_B_K1\n"); + printf("arg15 to 18: Stride_D_M0, Stride_D_M1, Stride_D_N0, Stride_D_N1\n"); + printf("arg19 to 22: Stride_E_M0, Stride_E_M1, Stride_E_N0, Stride_E_N1\n"); + printf("arg23 to 24: alpha, beta\n"); + exit(0); + } + + auto f_tensor_space_size = [](auto lengths, auto strides) { + std::size_t space_size = 1; + for(std::size_t i = 0; i < lengths.size(); ++i) + { + space_size += (lengths[i] - 1) * strides[i]; + } + return space_size; + }; + + SimpleDeviceMem a_device_buf(sizeof(ADataType) * + f_tensor_space_size(a_ms_ks_lengths, a_ms_ks_strides)); + SimpleDeviceMem b_device_buf(sizeof(BDataType) * + f_tensor_space_size(b_ns_ks_lengths, b_ns_ks_strides)); + SimpleDeviceMem d_device_buf(sizeof(DDataType) * + f_tensor_space_size(d_ms_ns_lengths, d_ms_ns_strides)); + SimpleDeviceMem e_device_buf(sizeof(EDataType) * + f_tensor_space_size(e_ms_ns_lengths, e_ms_ns_strides)); + + using DeviceOp = ck::tensor_operation::device::DeviceContractionMultipleD< + NumDimM, + NumDimN, + NumDimK, + ADataType, + BDataType, + ck::Tuple, + EDataType, + ck::tensor_operation::element_wise::PassThrough, + ck::tensor_operation::element_wise::PassThrough, + ck::tensor_operation::element_wise::Bilinear>; + + // get device op instances + const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory< + DeviceOp>::GetInstances(); + + std::cout << "found " << op_ptrs.size() << " instances" << std::endl; + + const auto a_element_op = AElementOp{}; + const auto b_element_op = BElementOp{}; + const auto cde_element_op = CDEElementOp{alpha, beta}; + + std::string best_op_name; + bool found = false; + int best_op_id = -1; + float best_ave_time = 0; + float best_tflops = 0; + float best_gb_per_sec = 0; + + // profile device operation instances + std::cout << "Run all instances and do timing" << std::endl; + + for(int i = 0; i < op_ptrs.size(); ++i) + { + auto& op_ptr = op_ptrs[i]; + + auto argument_ptr = + op_ptr->MakeArgumentPointer(a_device_buf.GetDeviceBuffer(), + b_device_buf.GetDeviceBuffer(), + std::array{d_device_buf.GetDeviceBuffer()}, + e_device_buf.GetDeviceBuffer(), + a_ms_ks_lengths, + a_ms_ks_strides, + b_ns_ks_lengths, + b_ns_ks_strides, + std::array, 1>{d_ms_ns_lengths}, + std::array, 1>{d_ms_ns_strides}, + e_ms_ns_lengths, + e_ms_ns_strides, + a_element_op, + b_element_op, + cde_element_op); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + + std::string op_name = op_ptr->GetTypeString(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); + + ck::index_t M = ck::accumulate_n( + e_ms_ns_lengths.begin(), NumDimM, 1, std::multiplies<>{}); + + ck::index_t N = ck::accumulate_n( + e_ms_ns_lengths.begin() + NumDimM, NumDimN, 1, std::multiplies<>{}); + + ck::index_t K = ck::accumulate_n( + a_ms_ks_lengths.begin() + NumDimM, NumDimK, 1, std::multiplies<>{}); + + std::size_t flop = std::size_t(2) * M * N * K; + std::size_t num_btype = sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + + sizeof(DDataType) * M * N + sizeof(EDataType) * M * N; + + float tflops = static_cast(flop) / 1.E9 / ave_time; + + float gb_per_sec = num_btype / 1.E6 / ave_time; + + std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, " + << gb_per_sec << " GB/s, " << op_name << std::endl; + + if(tflops > best_tflops) + { + found = true; + best_op_id = i; + best_op_name = op_name; + best_tflops = tflops; + best_ave_time = ave_time; + best_gb_per_sec = gb_per_sec; + } + } + else + { + std::cout << op_name << " does not support this problem" << std::endl; + } + } + + std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, " + << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + + return 0; +} diff --git a/client_example/04_contraction/contraction_scale.cpp b/client_example/04_contraction/contraction_scale_fp32.cpp similarity index 100% rename from client_example/04_contraction/contraction_scale.cpp rename to client_example/04_contraction/contraction_scale_fp32.cpp diff --git a/client_example/04_contraction/contraction_scale_fp64.cpp b/client_example/04_contraction/contraction_scale_fp64.cpp new file mode 100644 index 0000000000..3c36aa21eb --- /dev/null +++ b/client_example/04_contraction/contraction_scale_fp64.cpp @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +#include +#include +#include +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/device_contraction_multiple_d.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/gpu/contraction_scale.hpp" +#include "ck/library/utility/numeric.hpp" + +using F64 = double; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Scale = ck::tensor_operation::element_wise::Scale; + +using AElementOp = PassThrough; +using BElementOp = PassThrough; +using CDEElementOp = Scale; + +using ADataType = F64; +using BDataType = F64; +using AccDataType = F64; +using CShuffleDataType = F64; +using DsDataType = ck::Tuple<>; +using EDataType = F64; + +static constexpr ck::index_t NumDimM = 2; +static constexpr ck::index_t NumDimN = 2; +static constexpr ck::index_t NumDimK = 2; + +struct SimpleDeviceMem +{ + SimpleDeviceMem() = delete; + + SimpleDeviceMem(std::size_t mem_size) : p_mem_{} + { + (void)hipMalloc(static_cast(&p_mem_), mem_size); + } + + void* GetDeviceBuffer() { return p_mem_; } + + ~SimpleDeviceMem() { (void)hipFree(p_mem_); } + + void* p_mem_; +}; + +int main(int argc, char* argv[]) +{ +// kkn +#if 1 + // A[M0, M1, K0, K1] + std::vector a_ms_ks_lengths{30, 128, 32, 64}; + std::vector a_ms_ks_strides{524288, 4096, 128, 1}; + // B[N0, N1, K0, K1] + std::vector b_ns_ks_lengths{32, 64, 32, 64}; + std::vector b_ns_ks_strides{524288, 4096, 128, 1}; + // D[M0, M1, N0, N1] + std::vector d_ms_ns_lengths{30, 128, 32, 64}; + std::vector d_ms_ns_strides{524288, 4096, 128, 1}; + // E[M0, M1, N0, N1] + std::vector e_ms_ns_lengths{30, 128, 32, 64}; + std::vector e_ms_ns_strides{524288, 4096, 128, 1}; +// knn +#elif 0 + // A[M0, M1, K0, K1] + std::vector a_ms_ks_lengths{30, 128, 32, 64}; + std::vector a_ms_ks_strides{524288, 4096, 128, 1}; + // B[N0, N1, K0, K1] + std::vector b_ns_ks_lengths{32, 64, 32, 64}; + std::vector b_ns_ks_strides{64, 1, 131072, 2048}; + // D[M0, M1, N0, N1] + std::vector d_ms_ns_lengths{30, 128, 32, 64}; + std::vector d_ms_ns_strides{524288, 4096, 128, 1}; + // E[M0, M1, N0, N1] + std::vector e_ms_ns_lengths{30, 128, 32, 64}; + std::vector e_ms_ns_strides{524288, 4096, 128, 1}; +// mkn +#elif 0 + // A[M0, M1, K0, K1] + std::vector a_ms_ks_lengths{30, 128, 32, 64}; + std::vector a_ms_ks_strides{128, 1, 245760, 3840}; + // B[N0, N1, K0, K1] + std::vector b_ns_ks_lengths{32, 64, 32, 64}; + std::vector b_ns_ks_strides{524288, 4096, 128, 1}; + // D[M0, M1, N0, N1] + std::vector d_ms_ns_lengths{30, 128, 32, 64}; + std::vector d_ms_ns_strides{524288, 4096, 128, 1}; + // E[M0, M1, N0, N1] + std::vector e_ms_ns_lengths{30, 128, 32, 64}; + std::vector e_ms_ns_strides{524288, 4096, 128, 1}; +// mnn +#elif 0 + // A[M0, M1, K0, K1] + std::vector a_ms_ks_lengths{30, 128, 32, 64}; + std::vector a_ms_ks_strides{128, 1, 245760, 3840}; + // B[N0, N1, K0, K1] + std::vector b_ns_ks_lengths{32, 64, 32, 64}; + std::vector b_ns_ks_strides{64, 1, 131072, 2048}; + // D[M0, M1, N0, N1] + std::vector d_ms_ns_lengths{30, 128, 32, 64}; + std::vector d_ms_ns_strides{524288, 4096, 128, 1}; + // E[M0, M1, N0, N1] + std::vector e_ms_ns_lengths{30, 128, 32, 64}; + std::vector e_ms_ns_strides{524288, 4096, 128, 1}; +#endif + + float scale = 1.f; + + if(argc == 1) + { + // use default case + } + else if(argc == 20) + { + const ck::index_t M0 = std::stoi(argv[1]); + const ck::index_t M1 = std::stoi(argv[2]); + + const ck::index_t N0 = std::stoi(argv[3]); + const ck::index_t N1 = std::stoi(argv[4]); + + const ck::index_t K0 = std::stoi(argv[5]); + const ck::index_t K1 = std::stoi(argv[6]); + + a_ms_ks_lengths = {M0, M1, K0, K1}; + a_ms_ks_strides = { + std::stoi(argv[7]), std::stoi(argv[8]), std::stoi(argv[9]), std::stoi(argv[10])}; + + b_ns_ks_lengths = {N0, N1, K0, K1}; + b_ns_ks_strides = { + std::stoi(argv[11]), std::stoi(argv[12]), std::stoi(argv[13]), std::stoi(argv[14])}; + + e_ms_ns_lengths = {M0, M1, N0, N1}; + e_ms_ns_strides = { + std::stoi(argv[15]), std::stoi(argv[16]), std::stoi(argv[17]), std::stoi(argv[18])}; + + scale = std::stof(argv[19]); + } + else + { + printf("arg1 to 6: M0, M1, N0, N1, K0, K1\n"); + printf("arg7 to 10: Stride_A_M0, Stride_A_M1, Stride_A_K0, Stride_A_K1\n"); + printf("arg11 to 14: Stride_B_N0, Stride_B_N1, Stride_B_K0, Stride_B_K1\n"); + printf("arg15 to 18: Stride_E_M0, Stride_E_M1, Stride_E_N0, Stride_E_N1\n"); + printf("arg19: scale\n"); + exit(0); + } + + auto f_tensor_space_size = [](auto lengths, auto strides) { + std::size_t space_size = 1; + for(std::size_t i = 0; i < lengths.size(); ++i) + { + space_size += (lengths[i] - 1) * strides[i]; + } + return space_size; + }; + + SimpleDeviceMem a_device_buf(sizeof(ADataType) * + f_tensor_space_size(a_ms_ks_lengths, a_ms_ks_strides)); + SimpleDeviceMem b_device_buf(sizeof(BDataType) * + f_tensor_space_size(b_ns_ks_lengths, b_ns_ks_strides)); + SimpleDeviceMem e_device_buf(sizeof(EDataType) * + f_tensor_space_size(e_ms_ns_lengths, e_ms_ns_strides)); + + using DeviceOp = ck::tensor_operation::device::DeviceContractionMultipleD< + NumDimM, + NumDimN, + NumDimK, + ADataType, + BDataType, + ck::Tuple<>, + EDataType, + ck::tensor_operation::element_wise::PassThrough, + ck::tensor_operation::element_wise::PassThrough, + ck::tensor_operation::element_wise::Scale>; + + // get device op instances + const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory< + DeviceOp>::GetInstances(); + + std::cout << "found " << op_ptrs.size() << " instances" << std::endl; + + const auto a_element_op = AElementOp{}; + const auto b_element_op = BElementOp{}; + const auto cde_element_op = CDEElementOp{scale}; + + std::string best_op_name; + bool found = false; + int best_op_id = -1; + float best_ave_time = 0; + float best_tflops = 0; + float best_gb_per_sec = 0; + + // profile device operation instances + std::cout << "Run all instances and do timing" << std::endl; + + for(int i = 0; i < op_ptrs.size(); ++i) + { + auto& op_ptr = op_ptrs[i]; + + auto argument_ptr = op_ptr->MakeArgumentPointer(a_device_buf.GetDeviceBuffer(), + b_device_buf.GetDeviceBuffer(), + std::array{}, + e_device_buf.GetDeviceBuffer(), + a_ms_ks_lengths, + a_ms_ks_strides, + b_ns_ks_lengths, + b_ns_ks_strides, + std::array, 0>{}, + std::array, 0>{}, + e_ms_ns_lengths, + e_ms_ns_strides, + a_element_op, + b_element_op, + cde_element_op); + + auto invoker_ptr = op_ptr->MakeInvokerPointer(); + + std::string op_name = op_ptr->GetTypeString(); + + if(op_ptr->IsSupportedArgument(argument_ptr.get())) + { + float ave_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, true}); + + ck::index_t M = ck::accumulate_n( + e_ms_ns_lengths.begin(), NumDimM, 1, std::multiplies<>{}); + + ck::index_t N = ck::accumulate_n( + e_ms_ns_lengths.begin() + NumDimM, NumDimN, 1, std::multiplies<>{}); + + ck::index_t K = ck::accumulate_n( + a_ms_ks_lengths.begin() + NumDimM, NumDimK, 1, std::multiplies<>{}); + + std::size_t flop = std::size_t(2) * M * N * K; + std::size_t num_btype = + sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(EDataType) * M * N; + + float tflops = static_cast(flop) / 1.E9 / ave_time; + + float gb_per_sec = num_btype / 1.E6 / ave_time; + + std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, " + << gb_per_sec << " GB/s, " << op_name << std::endl; + + if(tflops > best_tflops) + { + found = true; + best_op_id = i; + best_op_name = op_name; + best_tflops = tflops; + best_ave_time = ave_time; + best_gb_per_sec = gb_per_sec; + } + } + else + { + std::cout << op_name << " does not support this problem" << std::endl; + } + } + + std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, " + << best_gb_per_sec << " GB/s, " << best_op_name << std::endl; + + return 0; +} diff --git a/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp b/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp index 0bde4919af..f176cb91e0 100644 --- a/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp +++ b/library/include/ck/library/tensor_operation_instance/device_operation_instance_factory.hpp @@ -26,6 +26,7 @@ using Empty_Tuple = ck::Tuple<>; using F16_Tuple = ck::Tuple; using F16_F16_Tuple = ck::Tuple; +using F64_Tuple = ck::Tuple; using F32_Tuple = ck::Tuple; using I32_Tuple = ck::Tuple; using I32_F32_Tuple = ck::Tuple; diff --git a/library/include/ck/library/tensor_operation_instance/gpu/contraction_bilinear.hpp b/library/include/ck/library/tensor_operation_instance/gpu/contraction_bilinear.hpp index a0cea7e390..c116d999da 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/contraction_bilinear.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/contraction_bilinear.hpp @@ -19,6 +19,7 @@ namespace tensor_operation { namespace device { namespace instance { +// float void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_f32_kknn_instance( std::vector>>& instances); +// double +void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance( + std::vector>>& instances); + +void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance( + std::vector>>& instances); + +void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance( + std::vector>>& instances); + +void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance( + std::vector>>& instances); + // Contraction + Bilinear template && is_same_v && + is_same_v && is_same_v) + { + if constexpr(NumDimM == 2 && NumDimN == 2 && NumDimK == 2) + { + add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance( + op_ptrs); + add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance( + op_ptrs); + add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance( + op_ptrs); + add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance( + op_ptrs); + } + } + return op_ptrs; } }; diff --git a/library/include/ck/library/tensor_operation_instance/gpu/contraction_scale.hpp b/library/include/ck/library/tensor_operation_instance/gpu/contraction_scale.hpp index e921ecd47a..e3f07606c2 100644 --- a/library/include/ck/library/tensor_operation_instance/gpu/contraction_scale.hpp +++ b/library/include/ck/library/tensor_operation_instance/gpu/contraction_scale.hpp @@ -19,6 +19,7 @@ namespace tensor_operation { namespace device { namespace instance { +// float void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_kkn_instance( std::vector>>& instances); +// double +void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance( + std::vector>>& instances); + +void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance( + std::vector>>& instances); + +void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance( + std::vector>>& instances); + +void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance( + std::vector>>& instances); + // Contraction + Scale template && is_same_v && + is_same_v) + { + if constexpr(NumDimM == 2 && NumDimN == 2 && NumDimK == 2) + { + add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance( + op_ptrs); + add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance( + op_ptrs); + add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance( + op_ptrs); + add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance( + op_ptrs); + } + } + return op_ptrs; } }; diff --git a/library/src/tensor_operation_instance/gpu/contraction_bilinear/CMakeLists.txt b/library/src/tensor_operation_instance/gpu/contraction_bilinear/CMakeLists.txt index ffd6a6a7be..d2a0a3d0fb 100644 --- a/library/src/tensor_operation_instance/gpu/contraction_bilinear/CMakeLists.txt +++ b/library/src/tensor_operation_instance/gpu/contraction_bilinear/CMakeLists.txt @@ -1,7 +1,13 @@ add_instance_library(device_contraction_bilinear_instance + #float device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_f32_kknn_instance.cpp device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_f32_knnn_instance.cpp device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_f32_mknn_instance.cpp device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_f32_mnnn_instance.cpp + #double + device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance.cpp + device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance.cpp + device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance.cpp + device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance.cpp ) diff --git a/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance.cpp b/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance.cpp new file mode 100644 index 0000000000..093b2f0e98 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance.cpp @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +// This (ifndef) is a hack to use customized behavior for buffer load rather than using default +// setting Don't use this hack unless absolutely necessary! +// FIXME: make the behavior of buffer load a configurable (template) parameter of each device op +#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 1 + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F64 = double; +using F64_Tuple = ck::Tuple; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Bilinear = ck::tensor_operation::element_wise::Bilinear; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +// A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1] +// k/k/n/n are the fast changing dimension for A/B/D/E +using device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance = std::tuple< + // clang-format off + //#####################################| NumDimM| NumDimN| NumDimK| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //#####################################| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //#####################################| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //#####################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 64, 64, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 2, 16, 16, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 128, 32, 16, 2, 2, 16, 16, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 32, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 64, 64, 32, 16, 2, 2, 16, 16, 4, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 64, 32, 64, 16, 2, 2, 16, 16, 2, 4, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 8>, 1> + // clang-format on + >; + +void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, + device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_kknn_instance{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance.cpp b/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance.cpp new file mode 100644 index 0000000000..0f683e5c28 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance.cpp @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +// This (ifndef) is a hack to use customized behavior for buffer load rather than using default +// setting Don't use this hack unless absolutely necessary! +// FIXME: make the behavior of buffer load a configurable (template) parameter of each device op +#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 1 + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F64 = double; +using F64_Tuple = ck::Tuple; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Bilinear = ck::tensor_operation::element_wise::Bilinear; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +// A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1] +// k/n/n/n are the fast changing dimension for A/B/D/E +using device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance = std::tuple< + // clang-format off + //#####################################| NumDimM| NumDimN| NumDimK| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //#####################################| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //#####################################| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //#####################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 1, 16, 16, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 1, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 1, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 1, 16, 16, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 2, 16, 16, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 1, 16, 16, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1> + // clang-format on + >; + +void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, + device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_knnn_instance{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance.cpp b/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance.cpp new file mode 100644 index 0000000000..e384993aed --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance.cpp @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +// This (ifndef) is a hack to use customized behavior for buffer load rather than using default +// setting Don't use this hack unless absolutely necessary! +// FIXME: make the behavior of buffer load a configurable (template) parameter of each device op +#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 1 + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F64 = double; +using F64_Tuple = ck::Tuple; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Bilinear = ck::tensor_operation::element_wise::Bilinear; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +// A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1] +// m/k/n/n are the fast changing dimension for A/B/D/E +using device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance = std::tuple< + // clang-format off + //#####################################| NumDimM| NumDimN| NumDimK| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //#####################################| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //#####################################| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //#####################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 128, 16, 1, 2, 16, 16, 4, 4, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 128, 64, 16, 1, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 64, 128, 16, 1, 2, 16, 16, 4, 4, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 64, 16, 1, 2, 16, 16, 4, 2, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 2, 16, 16, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 64, 128, 16, 1, 2, 16, 16, 2, 4, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1> + // clang-format on + >; + +void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, + device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mknn_instance{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance.cpp b/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance.cpp new file mode 100644 index 0000000000..92e39c173f --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/contraction_bilinear/device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance.cpp @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +// This (ifndef) is a hack to use customized behavior for buffer load rather than using default +// setting Don't use this hack unless absolutely necessary! +// FIXME: make the behavior of buffer load a configurable (template) parameter of each device op +#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 1 + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F64 = double; +using F64_Tuple = ck::Tuple; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Bilinear = ck::tensor_operation::element_wise::Bilinear; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +// A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1] +// m/n/n/n are the fast changing dimension for A/B/D/E +using device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance = std::tuple< + // clang-format off + //#####################################| NumDimM| NumDimN| NumDimK| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //#####################################| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //#####################################| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //#####################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 128, 16, 1, 1, 16, 16, 4, 4, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 128, 64, 16, 1, 1, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 64, 128, 16, 1, 1, 16, 16, 4, 4, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 64, 16, 1, 1, 16, 16, 4, 2, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 2, 16, 16, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 64, 128, 16, 1, 1, 16, 16, 2, 4, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, F64_Tuple, F64, PassThrough, PassThrough, Bilinear, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1> + // clang-format on + >; + +void add_device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, + device_contraction_bilinear_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_f64_mnnn_instance{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/contraction_scale/CMakeLists.txt b/library/src/tensor_operation_instance/gpu/contraction_scale/CMakeLists.txt index 7ad6605486..31f6a0fcdc 100644 --- a/library/src/tensor_operation_instance/gpu/contraction_scale/CMakeLists.txt +++ b/library/src/tensor_operation_instance/gpu/contraction_scale/CMakeLists.txt @@ -1,7 +1,13 @@ add_instance_library(device_contraction_scale_instance + #float device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_kkn_instance.cpp device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_knn_instance.cpp device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_mkn_instance.cpp device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f32_f32_f32_mnn_instance.cpp + #double + device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance.cpp + device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance.cpp + device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance.cpp + device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance.cpp ) diff --git a/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance.cpp b/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance.cpp new file mode 100644 index 0000000000..0aa927155a --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance.cpp @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +// This (ifndef) is a hack to use customized behavior for buffer load rather than using default +// setting Don't use this hack unless absolutely necessary! +// FIXME: make the behavior of buffer load a configurable (template) parameter of each device op +#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 1 + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F64 = double; +using Empty_Tuple = ck::Tuple<>; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Scale = ck::tensor_operation::element_wise::Scale; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +// A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1] +// k/k/n/n are the fast changing dimension for A/B/D/E +using device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance = std::tuple< + // clang-format off + //#####################################| NumDimM| NumDimN| NumDimK| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //#####################################| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //#####################################| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //#####################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 64, 64, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 2, 16, 16, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 128, 32, 16, 2, 2, 16, 16, 4, 2, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 32, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 64, 64, 32, 16, 2, 2, 16, 16, 4, 2, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 64, 32, 64, 16, 2, 2, 16, 16, 2, 4, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 8>, 1> + // clang-format on + >; + +void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_kkn_instance{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance.cpp b/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance.cpp new file mode 100644 index 0000000000..b84ea274c5 --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance.cpp @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +// This (ifndef) is a hack to use customized behavior for buffer load rather than using default +// setting Don't use this hack unless absolutely necessary! +// FIXME: make the behavior of buffer load a configurable (template) parameter of each device op +#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 1 + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F64 = double; +using Empty_Tuple = ck::Tuple<>; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Scale = ck::tensor_operation::element_wise::Scale; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +// A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1] +// k/n/n/n are the fast changing dimension for A/B/D/E +using device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance = std::tuple< + // clang-format off + //#####################################| NumDimM| NumDimN| NumDimK| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //#####################################| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //#####################################| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //#####################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 1, 16, 16, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 1, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 1, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 1, 16, 16, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 2, 16, 16, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 1, 16, 16, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1> + // clang-format on + >; + +void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_knn_instance{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance.cpp b/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance.cpp new file mode 100644 index 0000000000..578469997a --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance.cpp @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +// This (ifndef) is a hack to use customized behavior for buffer load rather than using default +// setting Don't use this hack unless absolutely necessary! +// FIXME: make the behavior of buffer load a configurable (template) parameter of each device op +#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 1 + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F64 = double; +using Empty_Tuple = ck::Tuple<>; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Scale = ck::tensor_operation::element_wise::Scale; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +// A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1] +// m/k/n/n are the fast changing dimension for A/B/D/E +using device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance = std::tuple< + // clang-format off + //#####################################| NumDimM| NumDimN| NumDimK| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //#####################################| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //#####################################| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //#####################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 128, 16, 1, 2, 16, 16, 4, 4, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 128, 64, 16, 1, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 64, 128, 16, 1, 2, 16, 16, 4, 4, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 64, 16, 1, 2, 16, 16, 4, 2, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 2, 16, 16, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 64, 128, 16, 1, 2, 16, 16, 2, 4, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1> + // clang-format on + >; + +void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mkn_instance{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck diff --git a/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance.cpp b/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance.cpp new file mode 100644 index 0000000000..8e5a19313e --- /dev/null +++ b/library/src/tensor_operation_instance/gpu/contraction_scale/device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance.cpp @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. + +// This (ifndef) is a hack to use customized behavior for buffer load rather than using default +// setting Don't use this hack unless absolutely necessary! +// FIXME: make the behavior of buffer load a configurable (template) parameter of each device op +#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 1 + +#include + +#include "ck/ck.hpp" +#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" +#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp" +#include "ck/tensor_operation/gpu/device/impl/device_contraction_multiple_d_xdl_cshuffle.hpp" +#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" + +#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp" + +namespace ck { +namespace tensor_operation { +namespace device { +namespace instance { + +using F64 = double; +using Empty_Tuple = ck::Tuple<>; + +template +using S = ck::Sequence; + +using PassThrough = ck::tensor_operation::element_wise::PassThrough; +using Scale = ck::tensor_operation::element_wise::Scale; + +static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding; + +// A[m0, m1, k0, k1] * B[n0, n1, k0, k1] + D[m0, m1, n0, n1] = E[m0, m1, n0, n1] +// m/n/n/n are the fast changing dimension for A/B/D/E +using device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance = std::tuple< + // clang-format off + //#####################################| NumDimM| NumDimN| NumDimK| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| + //#####################################| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector| + //#####################################| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl| + //#####################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 128, 16, 1, 1, 16, 16, 4, 4, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 128, 64, 16, 1, 1, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 128, 64, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 8>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 64, 128, 16, 1, 1, 16, 16, 4, 4, S<8, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 128, 64, 128, 16, 2, 2, 16, 16, 4, 4, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 8, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 64, 16, 1, 1, 16, 16, 4, 2, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 128, 64, 16, 2, 2, 16, 16, 4, 2, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 64, 128, 16, 1, 1, 16, 16, 2, 4, S<16,16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 0, 1, 1, S<1, 16, 1, 16>, 1>, + DeviceContractionMultipleD_Xdl_CShuffle< 2, 2, 2, F64, F64, F64, F64, Empty_Tuple, F64, PassThrough, PassThrough, Scale, GemmMNKPadding, 1, 256, 64, 128, 16, 2, 2, 16, 16, 2, 4, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, S<4, 64, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 1, 1, 1, 1, 1, S<1, 16, 1, 16>, 1> + // clang-format on + >; + +void add_device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance( + std::vector>>& instances) +{ + add_device_operation_instances( + instances, device_contraction_scale_m2_n2_k2_xdl_c_shuffle_f64_f64_f64_mnn_instance{}); +} + +} // namespace instance +} // namespace device +} // namespace tensor_operation +} // namespace ck