tweaked params for direct conv; added a dummy winograd

This commit is contained in:
Chao Liu
2018-11-24 03:55:36 -06:00
parent dbffe05a98
commit 8732ea04fb
8 changed files with 529 additions and 23 deletions

View File

@@ -7,6 +7,7 @@
#include "constant_tensor_descriptor.cuh"
#include "device_direct_convolution_1.cuh"
#include "device_direct_convolution_2.cuh"
#include "device_winograd_convolution.cuh"
struct GeneratorConstant
{
@@ -395,7 +396,7 @@ int main()
Tensor<float> out_host(make_TensorDescriptor(out_desc));
Tensor<float> out_device(make_TensorDescriptor(out_desc));
#if 1
#if 0
std::size_t num_thread = std::thread::hardware_concurrency();
in.GenerateTensorValue(GeneratorTensor_2{-5, 5}, num_thread);
wei.GenerateTensorValue(GeneratorTensor_2{-5, 5}, num_thread);
@@ -403,16 +404,20 @@ int main()
for(int i = 0; i < 20; ++i)
{
device_direct_convolution_1(in_desc, in, wei_desc, wei, out_desc, out_device);
#if 1
device_direct_convolution_2(in_desc, in, wei_desc, wei, out_desc, out_device);
#else
device_winograd_convolution(in_desc, in, wei_desc, wei, out_desc, out_device);
#endif
}
#if 0
host_direct_convolution(in, wei, out_host);
#else
host_winograd_3x3_convolution(in, wei, out_host);
#endif
check_error(out_host, out_device);
#elif 0
host_winograd_3x3_convolution(in, wei, out_host);
check_error(out_host, out_device);
#endif
#if 0
LogRange(std::cout << "in : ", in.mData, ",") << std::endl;
@@ -420,4 +425,4 @@ int main()
LogRange(std::cout << "out_host : ", out_host.mData, ",") << std::endl;
LogRange(std::cout << "out_device: ", out_device.mData, ",") << std::endl;
#endif
}
}

View File

@@ -28,7 +28,7 @@ void device_direct_convolution_2(
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned NPerBlock = 2;
constexpr unsigned KPerBlock = 32;
constexpr unsigned CPerBlock = 2;
constexpr unsigned CPerBlock = 4;
constexpr unsigned YPerBlock = 1;
constexpr unsigned XPerBlock = 16;

View File

@@ -0,0 +1,89 @@
#pragma once
#include "gridwise_winograd_convolution.cuh"
template <class T, class InDesc, class WeiDesc, class OutDesc>
void device_winograd_convolution(
InDesc, const Tensor<T>& in, WeiDesc, const Tensor<T>& wei, OutDesc, Tensor<T>& out)
{
std::size_t data_sz = sizeof(T);
DeviceMem in_device_buf(data_sz * in.mDesc.GetElementSpace());
DeviceMem wei_device_buf(data_sz * wei.mDesc.GetElementSpace());
DeviceMem out_device_buf(data_sz * out.mDesc.GetElementSpace());
int num_thread = std::thread::hardware_concurrency();
in_device_buf.ToDevice(in.mData.data());
wei_device_buf.ToDevice(wei.mData.data());
out_device_buf.ToDevice(out.mData.data());
constexpr auto I0 = Index<0>{};
constexpr auto I1 = Index<1>{};
constexpr auto I2 = Index<2>{};
constexpr auto I3 = Index<3>{};
constexpr auto in_desc = InDesc{};
constexpr auto wei_desc = WeiDesc{};
constexpr auto out_desc = OutDesc{};
constexpr unsigned OutTileSizeH = 2;
constexpr unsigned OutTileSizeW = 2;
constexpr unsigned NPerBlock = 2;
constexpr unsigned KPerBlock = 16;
constexpr unsigned CPerBlock = 4;
constexpr unsigned YPerBlock = 1;
constexpr unsigned XPerBlock = 16;
constexpr unsigned NPerThread = 2;
constexpr unsigned KPerThread = 2;
constexpr unsigned CPerThread = 2;
constexpr unsigned BlockSize = 128;
constexpr unsigned GridSize = (out_desc.GetLength(I0) / NPerBlock) *
(out_desc.GetLength(I1) / KPerBlock) *
(out_desc.GetLength(I2) / (OutTileSizeH * YPerBlock)) *
(out_desc.GetLength(I3) / (OutTileSizeW * XPerBlock));
dim3 block_dim(BlockSize);
dim3 grid_dim(GridSize);
printf("%s: BlockSize %u, GridSize %u \n", __func__, BlockSize, GridSize);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
gridwise_winograd_convolution<T,
InDesc,
WeiDesc,
OutDesc,
OutTileSizeH,
OutTileSizeW,
NPerBlock,
KPerBlock,
CPerBlock,
YPerBlock,
XPerBlock,
NPerThread,
KPerThread,
CPerThread,
BlockSize,
GridSize>
<<<grid_dim, block_dim>>>(InDesc{},
static_cast<T*>(in_device_buf.GetDeviceBuffer()),
WeiDesc{},
static_cast<T*>(wei_device_buf.GetDeviceBuffer()),
OutDesc{},
static_cast<T*>(out_device_buf.GetDeviceBuffer()));
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
checkCudaErrors(cudaGetLastError());
out_device_buf.FromDevice(out.mData.data());
}