refactor build, clean up

This commit is contained in:
Chao Liu
2019-02-14 15:10:16 -06:00
parent 28354a0fa3
commit e80fbbdd71
30 changed files with 486 additions and 572 deletions

View File

@@ -1,2 +1,2 @@
add_executable(conv EXCLUDE_FROM_ALL conv.cu)
target_link_libraries(conv convolution)
add_executable(conv conv.cu)
target_link_libraries(conv tensor device)

View File

@@ -2,13 +2,12 @@
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "nvToolsExt.h"
#include "tensor.hpp"
#include "ConstantTensorDescriptor.cuh"
#include "conv_common.cuh"
#include "device_direct_convolution_1.cuh"
#include "device_direct_convolution_2.cuh"
#include "device_implicit_gemm_convolution_1_nchw_kcsr.cuh"
#include "device_implicit_gemm_convolution_1_nchw_kcsr_nkhw.cuh"
#include "device_implicit_gemm_convolution_1_nchw_srck_nkhw.cuh"
#include "device_implicit_gemm_convolution_1_chwn_csrk_khwn.cuh"
#include "device_implicit_gemm_convolution_1_chwn_csrk_khwn_padded.cuh"
@@ -590,7 +589,7 @@ int main()
#elif 0
device_direct_convolution_2
#elif 0
device_implicit_gemm_convolution_1_nchw_kcsr
device_implicit_gemm_convolution_1_nchw_kcsr_nkhw
#elif 0
device_implicit_gemm_convolution_1_nchw_srck_nkhw
#elif 0
@@ -602,7 +601,7 @@ int main()
#endif
(in_nchw_desc, in_nchw, wei_kcsr_desc, wei_kcsr, out_nkhw_desc, out_nkhw_device, nrepeat);
#elif 1
#elif 0
device_implicit_gemm_convolution_1_chwn_csrk_khwn_padded(in_nchw_desc,
in_nchw,
wei_kcsr_desc,
@@ -614,7 +613,7 @@ int main()
nrepeat);
#endif
#if 0
#if 1
if(S == 3 && R == 3)
{
host_winograd_3x3_convolution(in_nchw, wei_kcsr, out_nkhw_host, lower_pads, upper_pads);

View File

@@ -1,6 +1,7 @@
#pragma once
#include "gridwise_direct_convolution_1.cuh"
#include <unistd.h>
#include "device.hpp"
#include "gridwise_direct_convolution_1.cuh"
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_direct_convolution_1(InDesc,
@@ -32,6 +33,7 @@ void device_direct_convolution_1(InDesc,
constexpr auto out_desc = OutDesc{};
#if 1
// 3x3, 34x34
constexpr unsigned OutTileSizeH = 2;
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned NPerBlock = 2;
@@ -45,20 +47,6 @@ void device_direct_convolution_1(InDesc,
constexpr unsigned CPerThread = 2;
constexpr unsigned BlockSize = 128;
#elif 1
constexpr unsigned OutTileSizeH = 2;
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned NPerBlock = 2;
constexpr unsigned KPerBlock = 16;
constexpr unsigned CPerBlock = 2;
constexpr unsigned YPerBlock = 2;
constexpr unsigned XPerBlock = 27;
constexpr unsigned NPerThread = 2;
constexpr unsigned KPerThread = 4;
constexpr unsigned CPerThread = 2;
constexpr unsigned BlockSize = 216;
#endif
constexpr unsigned GridSize = (out_desc.GetLength(I0) / NPerBlock) *
@@ -73,45 +61,36 @@ void device_direct_convolution_1(InDesc,
for(unsigned i = 0; i < nrepeat; ++i)
{
cudaEvent_t start, stop;
float elapsedTime;
const void* f = reinterpret_cast<const void*>(gridwise_direct_convolution_1<T,
InDesc,
WeiDesc,
OutDesc,
OutTileSizeH,
OutTileSizeW,
NPerBlock,
KPerBlock,
CPerBlock,
YPerBlock,
XPerBlock,
NPerThread,
KPerThread,
CPerThread,
BlockSize,
GridSize>);
cudaEventCreate(&start);
cudaEventRecord(start, 0);
T* in_dev_ptr = static_cast<T*>(in_device_buf.GetDeviceBuffer());
T* wei_dev_ptr = static_cast<T*>(wei_device_buf.GetDeviceBuffer());
T* out_dev_ptr = static_cast<T*>(out_device_buf.GetDeviceBuffer());
gridwise_direct_convolution_1<T,
InDesc,
WeiDesc,
OutDesc,
OutTileSizeH,
OutTileSizeW,
NPerBlock,
KPerBlock,
CPerBlock,
YPerBlock,
XPerBlock,
NPerThread,
KPerThread,
CPerThread,
BlockSize,
GridSize>
<<<grid_dim, block_dim>>>(InDesc{},
static_cast<T*>(in_device_buf.GetDeviceBuffer()),
WeiDesc{},
static_cast<T*>(wei_device_buf.GetDeviceBuffer()),
OutDesc{},
static_cast<T*>(out_device_buf.GetDeviceBuffer()));
void* args[] = {&in_dev_ptr, &wei_dev_ptr, &out_dev_ptr};
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
launch_kernel(f, grid_dim, block_dim, args, time);
usleep(10000);
printf("Elapsed time : %f ms\n", time);
usleep(std::min(time * 1000, float(10000)));
}
checkCudaErrors(cudaGetLastError());
out_device_buf.FromDevice(out.mData.data());
}

View File

@@ -1,6 +1,7 @@
#pragma once
#include "gridwise_direct_convolution_2.cuh"
#include <unistd.h>
#include "device.hpp"
#include "gridwise_direct_convolution_2.cuh"
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_direct_convolution_2(InDesc,
@@ -32,6 +33,7 @@ void device_direct_convolution_2(InDesc,
constexpr auto out_desc = OutDesc{};
#if 1
// 3x3, 34x34, 128 thread
constexpr unsigned OutTileSizeH = 2;
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned NPerBlock = 2;
@@ -46,20 +48,7 @@ void device_direct_convolution_2(InDesc,
constexpr unsigned BlockSize = 128;
#elif 0
constexpr unsigned OutTileSizeH = 2;
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned NPerBlock = 2;
constexpr unsigned KPerBlock = 32;
constexpr unsigned CPerBlock = 4;
constexpr unsigned YPerBlock = 1;
constexpr unsigned XPerBlock = 27;
constexpr unsigned NPerThread = 2;
constexpr unsigned KPerThread = 4;
constexpr unsigned CPerThread = 2;
constexpr unsigned BlockSize = 216;
#elif 0
// 3x3, 34x34, 256 thread
constexpr unsigned OutTileSizeH = 2;
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned NPerBlock = 2;
@@ -87,45 +76,36 @@ void device_direct_convolution_2(InDesc,
for(unsigned i = 0; i < nrepeat; ++i)
{
cudaEvent_t start, stop;
float elapsedTime;
const void* f = reinterpret_cast<const void*>(gridwise_direct_convolution_2<T,
InDesc,
WeiDesc,
OutDesc,
OutTileSizeH,
OutTileSizeW,
NPerBlock,
KPerBlock,
CPerBlock,
YPerBlock,
XPerBlock,
NPerThread,
KPerThread,
CPerThread,
BlockSize,
GridSize>);
cudaEventCreate(&start);
cudaEventRecord(start, 0);
T* in_dev_ptr = static_cast<T*>(in_device_buf.GetDeviceBuffer());
T* wei_dev_ptr = static_cast<T*>(wei_device_buf.GetDeviceBuffer());
T* out_dev_ptr = static_cast<T*>(out_device_buf.GetDeviceBuffer());
gridwise_direct_convolution_2<T,
InDesc,
WeiDesc,
OutDesc,
OutTileSizeH,
OutTileSizeW,
NPerBlock,
KPerBlock,
CPerBlock,
YPerBlock,
XPerBlock,
NPerThread,
KPerThread,
CPerThread,
BlockSize,
GridSize>
<<<grid_dim, block_dim>>>(InDesc{},
static_cast<T*>(in_device_buf.GetDeviceBuffer()),
WeiDesc{},
static_cast<T*>(wei_device_buf.GetDeviceBuffer()),
OutDesc{},
static_cast<T*>(out_device_buf.GetDeviceBuffer()));
void* args[] = {&in_dev_ptr, &wei_dev_ptr, &out_dev_ptr};
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
launch_kernel(f, grid_dim, block_dim, args, time);
usleep(10000);
printf("Elapsed time : %f ms\n", time);
usleep(std::min(time * 1000, float(10000)));
}
checkCudaErrors(cudaGetLastError());
out_device_buf.FromDevice(out.mData.data());
}

View File

@@ -1,6 +1,7 @@
#pragma once
#include "gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn.cuh"
#include <unistd.h>
#include "device.hpp"
#include "gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn.cuh"
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_implicit_gemm_convolution_1_chwn_csrk_khwn(InDesc,
@@ -73,21 +74,7 @@ void device_implicit_gemm_convolution_1_chwn_csrk_khwn(InDesc,
wei_csrk_device_buf.ToDevice(wei_csrk.mData.data());
out_khwn_device_buf.ToDevice(out_khwn.mData.data());
#if 0
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 1;
constexpr unsigned CPerBlock = 1;
constexpr unsigned HoPerBlock = 2;
constexpr unsigned WoPerBlock = 4;
constexpr unsigned NPerThread = 1;
constexpr unsigned KPerThread = 1;
constexpr unsigned CPerThread = 1;
constexpr unsigned HoPerThread = 1;
constexpr unsigned WoPerThread = 1;
constexpr unsigned BlockSize = 8;
#elif 0
#if 1
// for 3x3, 34x34 | 3x3 58x58, NKC = 64, 64, 256
constexpr unsigned NPerBlock = 16;
constexpr unsigned KPerBlock = 64;
@@ -214,50 +201,42 @@ void device_implicit_gemm_convolution_1_chwn_csrk_khwn(InDesc,
for(unsigned i = 0; i < nrepeat; ++i)
{
cudaEvent_t start, stop;
float elapsedTime;
const void* f = reinterpret_cast<const void*>(
gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn<GridSize,
BlockSize,
T,
decltype(in_chwn_desc),
decltype(wei_csrk_desc),
decltype(out_khwn_desc),
NPerBlock,
KPerBlock,
CPerBlock,
HoPerBlock,
WoPerBlock,
NPerThread,
KPerThread,
CPerThread,
HoPerThread,
WoPerThread,
WeiBlockCopyThreadPerDim0,
WeiBlockCopyThreadPerDim1,
InBlockCopyDataPerRead,
WeiBlockCopyDataPerRead>);
cudaEventCreate(&start);
cudaEventRecord(start, 0);
T* in_dev_ptr = static_cast<T*>(in_chwn_device_buf.GetDeviceBuffer());
T* wei_dev_ptr = static_cast<T*>(wei_csrk_device_buf.GetDeviceBuffer());
T* out_dev_ptr = static_cast<T*>(out_khwn_device_buf.GetDeviceBuffer());
gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn<GridSize,
BlockSize,
T,
decltype(in_chwn_desc),
decltype(wei_csrk_desc),
decltype(out_khwn_desc),
NPerBlock,
KPerBlock,
CPerBlock,
HoPerBlock,
WoPerBlock,
NPerThread,
KPerThread,
CPerThread,
HoPerThread,
WoPerThread,
WeiBlockCopyThreadPerDim0,
WeiBlockCopyThreadPerDim1,
InBlockCopyDataPerRead,
WeiBlockCopyDataPerRead>
<<<grid_dim, block_dim>>>(in_chwn_desc,
static_cast<T*>(in_chwn_device_buf.GetDeviceBuffer()),
wei_csrk_desc,
static_cast<T*>(wei_csrk_device_buf.GetDeviceBuffer()),
out_khwn_desc,
static_cast<T*>(out_khwn_device_buf.GetDeviceBuffer()));
void* args[] = {&in_dev_ptr, &wei_dev_ptr, &out_dev_ptr};
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
launch_kernel(f, grid_dim, block_dim, args, time);
usleep(std::min(elapsedTime * 1000, float(10000)));
printf("Elapsed time : %f ms\n", time);
usleep(std::min(time * 1000, float(10000)));
}
checkCudaErrors(cudaGetLastError());
out_khwn_device_buf.FromDevice(out_khwn.mData.data());
// reorder output

View File

@@ -1,8 +1,8 @@
#pragma once
#include <unistd.h>
#include "device.hpp"
#include "gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn_padded.cuh"
#include "gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn_padded_lds_pipeline.cuh"
#include <unistd.h>
#include <algorithm>
template <class T, class InDesc, class WeiDesc, class OutDesc, class LowerPads, class UpperPads>
void device_implicit_gemm_convolution_1_chwn_csrk_khwn_padded(InDesc,
@@ -172,7 +172,7 @@ void device_implicit_gemm_convolution_1_chwn_csrk_khwn_padded(InDesc,
constexpr unsigned WoPerThread = 1;
constexpr unsigned BlockSize = 128;
#elif 0
#elif 1
// 3x3 56x56, NKC = 16,256,128, with padding
// 3x3 28x28, NKC = 16,512,256, with padding
// 3x3 20x84, NKC = 16,256,256, with padding
@@ -222,7 +222,7 @@ void device_implicit_gemm_convolution_1_chwn_csrk_khwn_padded(InDesc,
constexpr unsigned WoPerThread = 1;
constexpr unsigned BlockSize = 128;
#elif 1
#elif 0
// for 1x1, 28x28
constexpr unsigned NPerBlock = 16;
constexpr unsigned KPerBlock = 128;
@@ -253,16 +253,11 @@ void device_implicit_gemm_convolution_1_chwn_csrk_khwn_padded(InDesc,
for(unsigned i = 0; i < nrepeat; ++i)
{
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
#if 1
const void* f = reinterpret_cast<const void*>(
#if 0
gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn_padded
#elif 1
gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn_padded_lds_pipeline
gridwise_implicit_gemm_convolution_1_chwn_csrk_khwn_padded_lds_pipeline
#endif
<GridSize,
BlockSize,
@@ -283,22 +278,22 @@ void device_implicit_gemm_convolution_1_chwn_csrk_khwn_padded(InDesc,
HoPerThread,
WoPerThread,
WeiBlockCopyThreadPerDim0,
WeiBlockCopyThreadPerDim1>
<<<grid_dim, block_dim>>>(static_cast<T*>(in_chwn_device_buf.GetDeviceBuffer()),
static_cast<T*>(wei_csrk_device_buf.GetDeviceBuffer()),
static_cast<T*>(out_khwn_device_buf.GetDeviceBuffer()));
WeiBlockCopyThreadPerDim1>);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
T* in_dev_ptr = static_cast<T*>(in_chwn_device_buf.GetDeviceBuffer());
T* wei_dev_ptr = static_cast<T*>(wei_csrk_device_buf.GetDeviceBuffer());
T* out_dev_ptr = static_cast<T*>(out_khwn_device_buf.GetDeviceBuffer());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
void* args[] = {&in_dev_ptr, &wei_dev_ptr, &out_dev_ptr};
usleep(std::min(elapsedTime * 1000, float(10000)));
float time = 0;
launch_kernel(f, grid_dim, block_dim, args, time);
printf("Elapsed time : %f ms\n", time);
usleep(std::min(time * 1000, float(10000)));
}
checkCudaErrors(cudaGetLastError());
out_khwn_device_buf.FromDevice(out_khwn.mData.data());
// reorder output

View File

@@ -1,126 +0,0 @@
#pragma once
#include "gridwise_implicit_gemm_convolution_1_nchw_kcsr.cuh"
#include <unistd.h>
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_implicit_gemm_convolution_1_nchw_kcsr(InDesc,
const Tensor<T>& in,
WeiDesc,
const Tensor<T>& wei,
OutDesc,
Tensor<T>& out,
unsigned nrepeat)
{
std::size_t data_sz = sizeof(T);
DeviceMem in_device_buf(data_sz * in.mDesc.GetElementSpace());
DeviceMem wei_device_buf(data_sz * wei.mDesc.GetElementSpace());
DeviceMem out_device_buf(data_sz * out.mDesc.GetElementSpace());
int num_thread = std::thread::hardware_concurrency();
in_device_buf.ToDevice(in.mData.data());
wei_device_buf.ToDevice(wei.mData.data());
out_device_buf.ToDevice(out.mData.data());
constexpr auto I0 = Number<0>{};
constexpr auto I1 = Number<1>{};
constexpr auto I2 = Number<2>{};
constexpr auto I3 = Number<3>{};
constexpr auto in_desc = InDesc{};
constexpr auto wei_desc = WeiDesc{};
constexpr auto out_desc = OutDesc{};
#if 0
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 1;
constexpr unsigned CPerBlock = 1;
constexpr unsigned HoPerBlock = 2;
constexpr unsigned WoPerBlock = 32;
constexpr unsigned KPerThread = 1;
constexpr unsigned CPerThread = 1;
constexpr unsigned HoPerThread = 2;
constexpr unsigned WoPerThread = 2;
constexpr unsigned BlockSize = 16;
#elif 1
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 64;
constexpr unsigned CPerBlock = 2;
constexpr unsigned HoPerBlock = 4;
constexpr unsigned WoPerBlock = 32;
constexpr unsigned KPerThread = 16;
constexpr unsigned CPerThread = 1;
constexpr unsigned HoPerThread = 2;
constexpr unsigned WoPerThread = 2;
constexpr unsigned BlockSize = 128;
#elif 0
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 64;
constexpr unsigned CPerBlock = 4;
constexpr unsigned HoPerBlock = 4;
constexpr unsigned WoPerBlock = 32;
constexpr unsigned KPerThread = 8;
constexpr unsigned CPerThread = 2;
constexpr unsigned HoPerThread = 2;
constexpr unsigned WoPerThread = 4;
constexpr unsigned BlockSize = 128;
#endif
constexpr unsigned GridSize =
(out_desc.GetLength(I0) / NPerBlock) * (out_desc.GetLength(I1) / KPerBlock) *
(out_desc.GetLength(I2) / HoPerBlock) * (out_desc.GetLength(I3) / WoPerBlock);
dim3 block_dim(BlockSize);
dim3 grid_dim(GridSize);
printf("%s: BlockSize %u, GridSize %u \n", __func__, BlockSize, GridSize);
for(unsigned i = 0; i < nrepeat; ++i)
{
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
gridwise_implicit_gemm_convolution_1_nchw_kcsr<GridSize,
BlockSize,
T,
InDesc,
WeiDesc,
OutDesc,
NPerBlock,
KPerBlock,
CPerBlock,
HoPerBlock,
WoPerBlock,
KPerThread,
CPerThread,
HoPerThread,
WoPerThread>
<<<grid_dim, block_dim>>>(InDesc{},
static_cast<T*>(in_device_buf.GetDeviceBuffer()),
WeiDesc{},
static_cast<T*>(wei_device_buf.GetDeviceBuffer()),
OutDesc{},
static_cast<T*>(out_device_buf.GetDeviceBuffer()));
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
usleep(10000);
}
checkCudaErrors(cudaGetLastError());
out_device_buf.FromDevice(out.mData.data());
}

View File

@@ -0,0 +1,94 @@
#pragma once
#include <unistd.h>
#include "device.hpp"
#include "gridwise_implicit_gemm_convolution_1_nchw_kcsr_nkhw.cuh"
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_implicit_gemm_convolution_1_nchw_kcsr_nkhw(InDesc,
const Tensor<T>& in,
WeiDesc,
const Tensor<T>& wei,
OutDesc,
Tensor<T>& out,
unsigned nrepeat)
{
std::size_t data_sz = sizeof(T);
DeviceMem in_device_buf(data_sz * in.mDesc.GetElementSpace());
DeviceMem wei_device_buf(data_sz * wei.mDesc.GetElementSpace());
DeviceMem out_device_buf(data_sz * out.mDesc.GetElementSpace());
int num_thread = std::thread::hardware_concurrency();
in_device_buf.ToDevice(in.mData.data());
wei_device_buf.ToDevice(wei.mData.data());
out_device_buf.ToDevice(out.mData.data());
constexpr auto I0 = Number<0>{};
constexpr auto I1 = Number<1>{};
constexpr auto I2 = Number<2>{};
constexpr auto I3 = Number<3>{};
constexpr auto in_desc = InDesc{};
constexpr auto wei_desc = WeiDesc{};
constexpr auto out_desc = OutDesc{};
#if 1
// 3x3, 34x34
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 64;
constexpr unsigned CPerBlock = 2;
constexpr unsigned HoPerBlock = 4;
constexpr unsigned WoPerBlock = 32;
constexpr unsigned KPerThread = 16;
constexpr unsigned CPerThread = 1;
constexpr unsigned HoPerThread = 2;
constexpr unsigned WoPerThread = 2;
constexpr unsigned BlockSize = 128;
#endif
constexpr unsigned GridSize =
(out_desc.GetLength(I0) / NPerBlock) * (out_desc.GetLength(I1) / KPerBlock) *
(out_desc.GetLength(I2) / HoPerBlock) * (out_desc.GetLength(I3) / WoPerBlock);
dim3 block_dim(BlockSize);
dim3 grid_dim(GridSize);
printf("%s: BlockSize %u, GridSize %u \n", __func__, BlockSize, GridSize);
for(unsigned i = 0; i < nrepeat; ++i)
{
const void* f = reinterpret_cast<const void*>(
gridwise_implicit_gemm_convolution_1_nchw_kcsr_nkhw<GridSize,
BlockSize,
T,
InDesc,
WeiDesc,
OutDesc,
NPerBlock,
KPerBlock,
CPerBlock,
HoPerBlock,
WoPerBlock,
KPerThread,
CPerThread,
HoPerThread,
WoPerThread>);
T* in_dev_ptr = static_cast<T*>(in_device_buf.GetDeviceBuffer());
T* wei_dev_ptr = static_cast<T*>(wei_device_buf.GetDeviceBuffer());
T* out_dev_ptr = static_cast<T*>(out_device_buf.GetDeviceBuffer());
void* args[] = {&in_dev_ptr, &wei_dev_ptr, &out_dev_ptr};
float time = 0;
launch_kernel(f, grid_dim, block_dim, args, time);
printf("Elapsed time : %f ms\n", time);
usleep(std::min(time * 1000, float(10000)));
}
out_device_buf.FromDevice(out.mData.data());
}

View File

@@ -1,6 +1,7 @@
#pragma once
#include "gridwise_implicit_gemm_convolution_1_nchw_srck_nkhw.cuh"
#include <unistd.h>
#include "device.hpp"
#include "gridwise_implicit_gemm_convolution_1_nchw_srck_nkhw.cuh"
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_implicit_gemm_convolution_1_nchw_srck_nkhw(InDesc,
@@ -52,20 +53,7 @@ void device_implicit_gemm_convolution_1_nchw_srck_nkhw(InDesc,
wei_srck_device_buf.ToDevice(wei_srck.mData.data());
out_nkhw_device_buf.ToDevice(out_nkhw.mData.data());
#if 0
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 1;
constexpr unsigned CPerBlock = 1;
constexpr unsigned HoPerBlock = 2;
constexpr unsigned WoPerBlock = 32;
constexpr unsigned KPerThread = 1;
constexpr unsigned CPerThread = 1;
constexpr unsigned HoPerThread = 2;
constexpr unsigned WoPerThread = 2;
constexpr unsigned BlockSize = 16;
#elif 0
#if 1
// for 3x3, 34x34
constexpr unsigned NPerBlock = 1;
constexpr unsigned KPerBlock = 64;
@@ -123,45 +111,37 @@ void device_implicit_gemm_convolution_1_nchw_srck_nkhw(InDesc,
for(unsigned i = 0; i < nrepeat; ++i)
{
cudaEvent_t start, stop;
float elapsedTime;
const void* f = reinterpret_cast<const void*>(
gridwise_implicit_gemm_convolution_1_nchw_srck_nkhw<GridSize,
BlockSize,
T,
decltype(in_nchw_desc),
decltype(wei_srck_desc),
decltype(out_nkhw_desc),
NPerBlock,
KPerBlock,
CPerBlock,
HoPerBlock,
WoPerBlock,
NPerThread,
KPerThread,
CPerThread,
HoPerThread,
WoPerThread>);
cudaEventCreate(&start);
cudaEventRecord(start, 0);
T* in_dev_ptr = static_cast<T*>(in_nchw_device_buf.GetDeviceBuffer());
T* wei_dev_ptr = static_cast<T*>(wei_srck_device_buf.GetDeviceBuffer());
T* out_dev_ptr = static_cast<T*>(out_nkhw_device_buf.GetDeviceBuffer());
gridwise_implicit_gemm_convolution_1_nchw_srck_nkhw<GridSize,
BlockSize,
T,
decltype(in_nchw_desc),
decltype(wei_srck_desc),
decltype(out_nkhw_desc),
NPerBlock,
KPerBlock,
CPerBlock,
HoPerBlock,
WoPerBlock,
NPerThread,
KPerThread,
CPerThread,
HoPerThread,
WoPerThread>
<<<grid_dim, block_dim>>>(in_nchw_desc,
static_cast<T*>(in_nchw_device_buf.GetDeviceBuffer()),
wei_srck_desc,
static_cast<T*>(wei_srck_device_buf.GetDeviceBuffer()),
out_nkhw_desc,
static_cast<T*>(out_nkhw_device_buf.GetDeviceBuffer()));
void* args[] = {&in_dev_ptr, &wei_dev_ptr, &out_dev_ptr};
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
launch_kernel(f, grid_dim, block_dim, args, time);
usleep(10000);
printf("Elapsed time : %f ms\n", time);
usleep(std::min(time * 1000, float(10000)));
}
checkCudaErrors(cudaGetLastError());
out_nkhw_device_buf.FromDevice(out_nkhw.mData.data());
}

View File

@@ -1,7 +1,8 @@
#pragma once
#include <unistd.h>
#include "device.hpp"
#include "gridwise_implicit_gemm_convolution_2_cnhw_csrk_knhw.cuh"
#include "gridwise_implicit_gemm_convolution_2_cnhw_csrk_knhw_lds_double_buffer.cuh"
#include <unistd.h>
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_implicit_gemm_convolution_2_cnhw_csrk_knhw(InDesc,
@@ -69,6 +70,7 @@ void device_implicit_gemm_convolution_2_cnhw_csrk_knhw(InDesc,
#if 0
// 3x3, 34x34
// need to use register double buffer for GEMM
constexpr unsigned BPerBlock = 128;
constexpr unsigned KPerBlock = 64;
constexpr unsigned CPerBlock = 4;
@@ -211,60 +213,53 @@ void device_implicit_gemm_convolution_2_cnhw_csrk_knhw(InDesc,
for(unsigned i = 0; i < nrepeat; ++i)
{
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
const void* f = reinterpret_cast<const void*>(
#if 0
gridwise_implicit_gemm_convolution_2_cnhw_csrk_knhw
gridwise_implicit_gemm_convolution_2_cnhw_csrk_knhw
#else
gridwise_implicit_gemm_convolution_2_cnhw_csrk_knhw_lds_double_buffer
gridwise_implicit_gemm_convolution_2_cnhw_csrk_knhw_lds_double_buffer
#endif
<GridSize,
BlockSize,
T,
decltype(in_cnhw_desc),
decltype(wei_csrk_desc),
decltype(out_knhw_desc),
BPerBlock,
KPerBlock,
CPerBlock,
BPerThread,
KPerThread,
GemmThreadPerColumnPerCluster,
GemmThreadPerRowPerCluster,
GemmMPerThreadSubC,
GemmNPerThreadSubC,
GemmMLevel0Cluster,
GemmNLevel0Cluster,
GemmMLevel1Cluster,
GemmNLevel1Cluster,
GemmKPerThreadLoop,
InBlockCopyThreadPerDim0,
InBlockCopyThreadPerDim1,
WeiBlockCopyThreadPerDim0,
WeiBlockCopyThreadPerDim1,
InBlockCopyDataPerRead,
WeiBlockCopyDataPerRead>
<<<grid_dim, block_dim>>>(in_cnhw_desc,
static_cast<T*>(in_cnhw_device_buf.GetDeviceBuffer()),
wei_csrk_desc,
static_cast<T*>(wei_csrk_device_buf.GetDeviceBuffer()),
out_knhw_desc,
static_cast<T*>(out_knhw_device_buf.GetDeviceBuffer()));
<GridSize,
BlockSize,
T,
decltype(in_cnhw_desc),
decltype(wei_csrk_desc),
decltype(out_knhw_desc),
BPerBlock,
KPerBlock,
CPerBlock,
BPerThread,
KPerThread,
GemmThreadPerColumnPerCluster,
GemmThreadPerRowPerCluster,
GemmMPerThreadSubC,
GemmNPerThreadSubC,
GemmMLevel0Cluster,
GemmNLevel0Cluster,
GemmMLevel1Cluster,
GemmNLevel1Cluster,
GemmKPerThreadLoop,
InBlockCopyThreadPerDim0,
InBlockCopyThreadPerDim1,
WeiBlockCopyThreadPerDim0,
WeiBlockCopyThreadPerDim1,
InBlockCopyDataPerRead,
WeiBlockCopyDataPerRead>);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
T* in_dev_ptr = static_cast<T*>(in_cnhw_device_buf.GetDeviceBuffer());
T* wei_dev_ptr = static_cast<T*>(wei_csrk_device_buf.GetDeviceBuffer());
T* out_dev_ptr = static_cast<T*>(out_knhw_device_buf.GetDeviceBuffer());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
void* args[] = {&in_dev_ptr, &wei_dev_ptr, &out_dev_ptr};
usleep(std::min(elapsedTime * 1000, float(10000)));
float time;
launch_kernel(f, grid_dim, block_dim, args, time);
printf("Elapsed time : %f ms\n", time);
usleep(std::min(time * 1000, float(10000)));
}
checkCudaErrors(cudaGetLastError());
out_knhw_device_buf.FromDevice(out_knhw.mData.data());
// convert out_knhw to out_nkhw

View File

@@ -1,7 +1,8 @@
#pragma once
#include <unistd.h>
#include "device.hpp"
#include "gridwise_implicit_gemm_convolution_2_cnhw_srck_knhw.cuh"
#include "gridwise_implicit_gemm_convolution_2_cnhw_srck_knhw_lds_pipeline.cuh"
#include <unistd.h>
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_implicit_gemm_convolution_2_cnhw_srck_knhw(InDesc,
@@ -100,7 +101,7 @@ void device_implicit_gemm_convolution_2_cnhw_srck_knhw(InDesc,
constexpr unsigned InBlockCopyThreadPerDim1 = 16;
constexpr unsigned BlockSize = 128;
#elif 1
#elif 0
// 1x1, 28x28
constexpr unsigned BPerBlock = 64;
constexpr unsigned KPerBlock = 64;
@@ -140,50 +141,43 @@ void device_implicit_gemm_convolution_2_cnhw_srck_knhw(InDesc,
for(unsigned i = 0; i < nrepeat; ++i)
{
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
#if 0
gridwise_implicit_gemm_convolution_2_cnhw_srck_knhw
const void* f = reinterpret_cast<const void*>(
#if 1
gridwise_implicit_gemm_convolution_2_cnhw_srck_knhw
#else
gridwise_implicit_gemm_convolution_2_cnhw_srck_knhw_lds_pipeline
gridwise_implicit_gemm_convolution_2_cnhw_srck_knhw_lds_pipeline
#endif
<GridSize,
BlockSize,
T,
decltype(in_cnhw_desc),
decltype(wei_srck_desc),
decltype(out_knhw_desc),
BPerBlock,
KPerBlock,
CPerBlock,
BPerThread,
KPerThread,
CPerThread,
GemmThreadPerColumnPerCluster,
GemmThreadPerRowPerCluster,
InBlockCopyThreadPerDim0,
InBlockCopyThreadPerDim1>
<<<grid_dim, block_dim>>>(in_cnhw_desc,
static_cast<T*>(in_cnhw_device_buf.GetDeviceBuffer()),
wei_srck_desc,
static_cast<T*>(wei_srck_device_buf.GetDeviceBuffer()),
out_knhw_desc,
static_cast<T*>(out_knhw_device_buf.GetDeviceBuffer()));
<GridSize,
BlockSize,
T,
decltype(in_cnhw_desc),
decltype(wei_srck_desc),
decltype(out_knhw_desc),
BPerBlock,
KPerBlock,
CPerBlock,
BPerThread,
KPerThread,
CPerThread,
GemmThreadPerColumnPerCluster,
GemmThreadPerRowPerCluster,
InBlockCopyThreadPerDim0,
InBlockCopyThreadPerDim1>);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
T* in_dev_ptr = static_cast<T*>(in_cnhw_device_buf.GetDeviceBuffer());
T* wei_dev_ptr = static_cast<T*>(wei_srck_device_buf.GetDeviceBuffer());
T* out_dev_ptr = static_cast<T*>(out_knhw_device_buf.GetDeviceBuffer());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
void* args[] = {&in_dev_ptr, &wei_dev_ptr, &out_dev_ptr};
usleep(std::min(elapsedTime * 1000, float(10000)));
float time = 0;
launch_kernel(f, grid_dim, block_dim, args, time);
printf("Elapsed time : %f ms\n", time);
usleep(std::min(time * 1000, float(10000)));
}
checkCudaErrors(cudaGetLastError());
out_knhw_device_buf.FromDevice(out_knhw.mData.data());
// convert out_knhw to out_nkhw