Resolution of issue #153: Add compiler warning on comparing int and size_t (#212)

* Turning compare warnings on

* Cleaning part I

* Cleaning part II

* Explicit static_cast to ck::type_convert

* Resolving large tensor size issue.

* format

* revert change to tensor descriptor; promote lementSpaceSize to 64bit

* use integer value for GEMM test

* Review remarks

* Review remarks + issues with (un)signed arithmetic

* Format fix

* Format

* Clang-format.

* fix 2gb limit issue

Co-authored-by: Chao Liu <chao.liu2@amd.com>
Co-authored-by: Adam Osewski <aosewski@amd.com>

[ROCm/composable_kernel commit: f03a1738d9]
This commit is contained in:
myamlak
2022-05-09 22:06:49 +02:00
committed by GitHub
parent 0bde7aaef1
commit 060938c9a5
30 changed files with 261 additions and 165 deletions

View File

@@ -45,7 +45,7 @@ static bool check_out(const Tensor<T>& ref, const Tensor<T>& result)
{
float max_diff = 1e-6;
for(int i = 0; i < ref.mData.size(); ++i)
for(std::size_t i = 0; i < ref.mData.size(); ++i)
{
float diff = std::abs(double(ref.mData[i]) - double(result.mData[i]));
if(max_diff < diff)

View File

@@ -104,7 +104,7 @@ bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
b_tensors_device.reserve(group_count);
c_tensors_device.reserve(group_count);
for(int i = 0; i < gemm_shapes.size(); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
a_tensors.emplace_back(Tensor<ADataType>(f_host_tensor_descriptor(
gemm_shapes[i].M, gemm_shapes[i].K, gemm_shapes[i].StrideA, ALayout{})));
@@ -119,7 +119,7 @@ bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
b_tensors[i].GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
}
for(int i = 0; i < gemm_shapes.size(); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
a_tensors_device.emplace_back(
std::make_unique<DeviceMem>(sizeof(ADataType) * a_tensors[i].mDesc.GetElementSize()));
@@ -147,7 +147,7 @@ bool TestGroupedGemm(DeviceGroupedGemmPtr_& groupedGemmPtr)
invoker_ptr->Run(argument_ptr.get());
for(int i = 0; i < gemm_shapes.size(); i++)
for(std::size_t i = 0; i < gemm_shapes.size(); i++)
{
c_tensors_device[i]->FromDevice(c_device_tensors[i].mData.data());

View File

@@ -460,7 +460,7 @@ class SimpleAppArgs
int processArgs(int argc, char* argv[])
{
unsigned int ch;
int ch;
while(1)
{

View File

@@ -9,7 +9,7 @@ namespace reduce_util {
template <typename T>
void to_f32_vector(const Tensor<T>& src, Tensor<float>& dst)
{
for(int i = 0; i < src.mData.size(); ++i)
for(std::size_t i = 0; i < src.mData.size(); ++i)
dst.mData[i] = type_convert<float>(src.mData[i]);
}

View File

@@ -463,7 +463,7 @@ class SimpleAppArgs
int processArgs(int argc, char* argv[])
{
unsigned int ch;
int ch;
while(1)
{