mirror of
https://github.com/NVIDIA/nvbench.git
synced 2026-03-14 20:27:24 +00:00
commit4b309e6ad8Author: Allison Piper <alliepiper16@gmail.com> Date: Sat Apr 6 13:19:14 2024 +0000 Minor cleanups commit476ed2ceaeAuthor: Allison Piper <alliepiper16@gmail.com> Date: Sat Apr 6 12:53:37 2024 +0000 WAR compiler ice in nlohmann json. Only seeing this on GCC 9 + CTK 11.1. Seems to be having trouble with the `[[no_unique_address]]` optimization. commita9bf1d3e42Author: Allison Piper <alliepiper16@gmail.com> Date: Sat Apr 6 00:24:47 2024 +0000 Bump nlohmann json. commit80980fe373Author: Allison Piper <alliepiper16@gmail.com> Date: Sat Apr 6 00:22:07 2024 +0000 Fix llvm filesystem support commitf6099e6311Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 23:18:44 2024 +0000 Drop MSVC 2017 testing. commit5ae50a8ef5Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 23:02:32 2024 +0000 Add mroe missing headers. commitb2a9ae04d9Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 22:37:56 2024 +0000 Remove old CUDA+MSVC builds and make windows build-only. commit5b18c26a28Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 22:37:07 2024 +0000 Fix header for std::min/max. Why do I always think it's utility instead of algorithm.... commit6a409efa2dAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 22:18:18 2024 +0000 Temporarily disable CUPTI on all windows builds. commitf432f88866Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 21:42:52 2024 +0000 Fix warnings on MSVC. commit829787649bAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 21:03:16 2024 +0000 More flailing about in powershell. commit21742e6beaAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 20:36:08 2024 +0000 Cleanup filesystem header handling. commitde3d202635Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 20:09:00 2024 +0000 Windows CI debugging. commita4151667ffAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 19:45:40 2024 +0000 Quotation mark madness commitdd04f3befeAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 19:27:27 2024 +0000 Temporarily disable NVML on windows CI until new containers are ready. commitf3952848c4Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 19:25:22 2024 +0000 WAR issues on gcc-7. commit198986875eAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 19:25:04 2024 +0000 More matrix/devcontainer updates. commitb9712f8696Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 18:30:35 2024 +0000 Fix windows build scripts. commit943f268280Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 18:18:33 2024 +0000 Fix warnings with clang host compiler. commit7063e1d60aAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 18:14:28 2024 +0000 More devcontainer hijinks. commit06532fde81Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 17:51:25 2024 +0000 More matrix updates. commit78a265ea55Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 17:34:00 2024 +0000 Support CLI CMake options for windows ci scripts. commit670895c867Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 17:31:59 2024 +0000 Add missing devcontainers. commitb121823e74Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 17:22:54 2024 +0000 Build for `all-major` architectures in presets. We can get away with this because we require CMake 3.23.1. This was added in 3.23. commitfccfd44685Author: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 17:22:08 2024 +0000 Update matrix file. commite7d43ba90eAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 16:23:48 2024 +0000 Consolidate build/test jobs. commitc4044056ecAuthor: Allison Piper <alliepiper16@gmail.com> Date: Fri Apr 5 16:04:11 2024 +0000 Add missing build script.
130 lines
4.0 KiB
Plaintext
130 lines
4.0 KiB
Plaintext
/*
|
|
* Copyright 2023 NVIDIA Corporation
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 with the LLVM exception
|
|
* (the "License"); you may not use this file except in compliance with
|
|
* the License.
|
|
*
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://llvm.org/foundation/relicensing/LICENSE.txt
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include <nvbench/detail/statistics.cuh>
|
|
#include <nvbench/types.cuh>
|
|
|
|
#include "test_asserts.cuh"
|
|
|
|
#include <algorithm>
|
|
#include <vector>
|
|
|
|
namespace statistics = nvbench::detail::statistics;
|
|
|
|
void test_mean()
|
|
{
|
|
{
|
|
std::vector<nvbench::float64_t> data{1.0, 2.0, 3.0, 4.0, 5.0};
|
|
const nvbench::float64_t actual = statistics::compute_mean(std::begin(data), std::end(data));
|
|
const nvbench::float64_t expected = 3.0;
|
|
ASSERT(std::abs(actual - expected) < 0.001);
|
|
}
|
|
|
|
{
|
|
std::vector<nvbench::float64_t> data;
|
|
const bool finite = std::isfinite(statistics::compute_mean(std::begin(data), std::end(data)));
|
|
ASSERT(!finite);
|
|
}
|
|
}
|
|
|
|
void test_std()
|
|
{
|
|
std::vector<nvbench::float64_t> data{1.0, 2.0, 3.0, 4.0, 5.0};
|
|
const nvbench::float64_t mean = 3.0;
|
|
const nvbench::float64_t actual = statistics::standard_deviation(std::begin(data), std::end(data), mean);
|
|
const nvbench::float64_t expected = 1.581;
|
|
ASSERT(std::abs(actual - expected) < 0.001);
|
|
}
|
|
|
|
void test_lin_regression()
|
|
{
|
|
{
|
|
std::vector<nvbench::float64_t> ys{1.0, 2.0, 3.0, 4.0, 5.0};
|
|
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
|
|
ASSERT(slope == 1.0);
|
|
ASSERT(intercept == 1.0);
|
|
}
|
|
{
|
|
std::vector<nvbench::float64_t> ys{42.0, 42.0, 42.0};
|
|
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
|
|
ASSERT(slope == 0.0);
|
|
ASSERT(intercept == 42.0);
|
|
}
|
|
{
|
|
std::vector<nvbench::float64_t> ys{8.0, 4.0, 0.0};
|
|
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
|
|
ASSERT(slope == -4.0);
|
|
ASSERT(intercept == 8.0);
|
|
}
|
|
}
|
|
|
|
void test_r2()
|
|
{
|
|
{
|
|
std::vector<nvbench::float64_t> ys{1.0, 2.0, 3.0, 4.0, 5.0};
|
|
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
|
|
const nvbench::float64_t actual = statistics::compute_r2(std::begin(ys), std::end(ys), slope, intercept);
|
|
const nvbench::float64_t expected = 1.0;
|
|
ASSERT(std::abs(actual - expected) < 0.001);
|
|
}
|
|
{
|
|
std::vector<nvbench::float64_t> signal{1.0, 2.0, 3.0, 4.0, 5.0};
|
|
std::vector<nvbench::float64_t> noise{-1.0, 1.0, -1.0, 1.0, -1.0};
|
|
std::vector<nvbench::float64_t> ys(signal.size());
|
|
|
|
std::transform(std::begin(signal),
|
|
std::end(signal),
|
|
std::begin(noise),
|
|
std::begin(ys),
|
|
std::plus<nvbench::float64_t>());
|
|
|
|
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
|
|
const nvbench::float64_t expected = 0.675;
|
|
const nvbench::float64_t actual = statistics::compute_r2(std::begin(ys), std::end(ys), slope, intercept);
|
|
ASSERT(std::abs(actual - expected) < 0.001);
|
|
}
|
|
}
|
|
|
|
void test_slope_conversion()
|
|
{
|
|
{
|
|
const nvbench::float64_t actual = statistics::slope2deg(0.0);
|
|
const nvbench::float64_t expected = 0.0;
|
|
ASSERT(std::abs(actual - expected) < 0.001);
|
|
}
|
|
{
|
|
const nvbench::float64_t actual = statistics::slope2deg(1.0);
|
|
const nvbench::float64_t expected = 45.0;
|
|
ASSERT(std::abs(actual - expected) < 0.001);
|
|
}
|
|
{
|
|
const nvbench::float64_t actual = statistics::slope2deg(5.0);
|
|
const nvbench::float64_t expected = 78.69;
|
|
ASSERT(std::abs(actual - expected) < 0.001);
|
|
}
|
|
}
|
|
|
|
int main()
|
|
{
|
|
test_mean();
|
|
test_std();
|
|
test_lin_regression();
|
|
test_r2();
|
|
test_slope_conversion();
|
|
}
|