Merge remote-tracking branch 'origin/main' into fea/axes_iteration_space

This commit is contained in:
Allison Piper
2025-05-01 10:42:35 -04:00
234 changed files with 10357 additions and 12278 deletions

View File

@@ -4,23 +4,38 @@ set(test_srcs
benchmark.cu
create.cu
cuda_timer.cu
cuda_stream.cu
cpu_timer.cu
criterion_manager.cu
criterion_params.cu
custom_main_custom_args.cu
custom_main_custom_exceptions.cu
custom_main_global_state_raii.cu
enum_type_list.cu
entropy_criterion.cu
float64_axis.cu
int64_axis.cu
named_values.cu
option_parser.cu
range.cu
reset_error.cu
ring_buffer.cu
runner.cu
state.cu
statistics.cu
state_generator.cu
stdrel_criterion.cu
string_axis.cu
type_axis.cu
type_list.cu
)
# Metatarget for all examples:
# Custom arguments:
# CTest commands+args can't be modified after creation, so we need to rely on substitution.
set(NVBench_TEST_ARGS_nvbench.test.custom_main_custom_args "--quiet" "--my-custom-arg" "--run-once" "-d" "0")
set(NVBench_TEST_ARGS_nvbench.test.custom_main_custom_exceptions "--quiet" "--run-once" "-d" "0")
# Metatarget for all tests:
add_custom_target(nvbench.test.all)
add_dependencies(nvbench.all nvbench.test.all)
@@ -32,10 +47,14 @@ foreach(test_src IN LISTS test_srcs)
target_link_libraries(${test_name} PRIVATE nvbench::nvbench fmt)
set_target_properties(${test_name} PROPERTIES COMPILE_FEATURES cuda_std_17)
nvbench_config_target(${test_name})
add_test(NAME ${test_name} COMMAND "$<TARGET_FILE:${test_name}>")
add_test(NAME ${test_name} COMMAND "$<TARGET_FILE:${test_name}>" ${NVBench_TEST_ARGS_${test_name}})
add_dependencies(nvbench.test.all ${test_name})
endforeach()
set_tests_properties(nvbench.test.custom_main_custom_exceptions PROPERTIES
PASS_REGULAR_EXPRESSION "Custom error detected: Expected exception thrown."
)
add_subdirectory(cmake)
add_subdirectory(device)

View File

@@ -17,22 +17,19 @@
*/
#include <nvbench/axes_metadata.cuh>
#include <nvbench/type_list.cuh>
#include <nvbench/type_strings.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include <algorithm>
#include <string_view>
using int_list = nvbench::type_list<nvbench::int8_t,
nvbench::int16_t,
nvbench::int32_t,
nvbench::int64_t>;
#include "test_asserts.cuh"
using int_list =
nvbench::type_list<nvbench::int8_t, nvbench::int16_t, nvbench::int32_t, nvbench::int64_t>;
using float_list = nvbench::type_list<nvbench::float32_t, nvbench::float64_t>;
@@ -110,7 +107,6 @@ void test_default_type_axes_names()
ASSERT(axes.get_type_axis(4).get_name() == "T4");
ASSERT(axes.get_type_axis(4).get_axis_index() == 4);
}
}
void test_type_axes()
@@ -129,17 +125,16 @@ void test_type_axes()
fmt::memory_buffer buffer;
for (const auto &axis : axes.get_axes())
{
fmt::format_to(buffer, "Axis: {}\n", axis->get_name());
fmt::format_to(std::back_inserter(buffer), "Axis: {}\n", axis->get_name());
const auto num_values = axis->get_size();
for (std::size_t i = 0; i < num_values; ++i)
{
auto input_string = axis->get_input_string(i);
auto description = axis->get_description(i);
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer),
" - {}{}\n",
input_string,
description.empty() ? ""
: fmt::format(" ({})", description));
description.empty() ? "" : fmt::format(" ({})", description));
}
}
@@ -157,9 +152,8 @@ Axis: Other
)expected";
const std::string test = fmt::to_string(buffer);
const auto diff =
std::mismatch(ref.cbegin(), ref.cend(), test.cbegin(), test.cend());
const auto idx = diff.second - test.cbegin();
const auto diff = std::mismatch(ref.cbegin(), ref.cend(), test.cbegin(), test.cend());
const auto idx = static_cast<std::size_t>(diff.second - test.cbegin());
ASSERT_MSG(test == ref,
"Differs at character {}.\n"
"Expected:\n\"{}\"\n\n"
@@ -189,9 +183,7 @@ void test_float64_axes()
void test_int64_axes()
{
nvbench::axes_metadata axes;
axes.add_int64_axis("I64 Axis",
{10, 11, 12, 13, 14},
nvbench::int64_axis_flags::none);
axes.add_int64_axis("I64 Axis", {10, 11, 12, 13, 14}, nvbench::int64_axis_flags::none);
ASSERT(axes.get_axes().size() == 1);
const auto &axis = axes.get_int64_axis("I64 Axis");
ASSERT(axis.get_size() == 5);
@@ -205,9 +197,7 @@ void test_int64_axes()
void test_int64_power_of_two_axes()
{
nvbench::axes_metadata axes;
axes.add_int64_axis("I64 POT Axis",
{1, 2, 3, 4, 5},
nvbench::int64_axis_flags::power_of_two);
axes.add_int64_axis("I64 POT Axis", {1, 2, 3, 4, 5}, nvbench::int64_axis_flags::power_of_two);
ASSERT(axes.get_axes().size() == 1);
const auto &axis = axes.get_int64_axis("I64 POT Axis");
ASSERT(axis.get_size() == 5);

View File

@@ -17,7 +17,6 @@
*/
#include <nvbench/benchmark.cuh>
#include <nvbench/callable.cuh>
#include <nvbench/named_values.cuh>
#include <nvbench/state.cuh>
@@ -25,8 +24,6 @@
#include <nvbench/type_strings.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include <algorithm>
@@ -34,6 +31,8 @@
#include <variant>
#include <vector>
#include "test_asserts.cuh"
template <typename T>
std::vector<T> sort(std::vector<T> &&vec)
{
@@ -44,13 +43,13 @@ std::vector<T> sort(std::vector<T> &&vec)
void no_op_generator(nvbench::state &state)
{
fmt::memory_buffer params;
fmt::format_to(params, "Params:");
fmt::format_to(std::back_inserter(params), "Params:");
const auto &axis_values = state.get_axis_values();
for (const auto &name : sort(axis_values.get_names()))
{
std::visit(
[&params, &name](const auto &value) {
fmt::format_to(params, " {}: {}", name, value);
fmt::format_to(std::back_inserter(params), " {}: {}", name, value);
},
axis_values.get_value(name));
}
@@ -61,34 +60,26 @@ void no_op_generator(nvbench::state &state)
NVBENCH_DEFINE_CALLABLE(no_op_generator, no_op_callable);
template <typename Integer, typename Float, typename Other>
void template_no_op_generator(nvbench::state &state,
nvbench::type_list<Integer, Float, Other>)
void template_no_op_generator(nvbench::state &state, nvbench::type_list<Integer, Float, Other>)
{
ASSERT(nvbench::type_strings<Integer>::input_string() ==
state.get_string("Integer"));
ASSERT(nvbench::type_strings<Float>::input_string() ==
state.get_string("Float"));
ASSERT(nvbench::type_strings<Other>::input_string() ==
state.get_string("Other"));
ASSERT(nvbench::type_strings<Integer>::input_string() == state.get_string("Integer"));
ASSERT(nvbench::type_strings<Float>::input_string() == state.get_string("Float"));
ASSERT(nvbench::type_strings<Other>::input_string() == state.get_string("Other"));
// Enum params using non-templated version:
no_op_generator(state);
}
NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_no_op_generator,
template_no_op_callable);
NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_no_op_generator, template_no_op_callable);
using int_list = nvbench::type_list<nvbench::int8_t,
nvbench::int16_t,
nvbench::int32_t,
nvbench::int64_t>;
using int_list =
nvbench::type_list<nvbench::int8_t, nvbench::int16_t, nvbench::int32_t, nvbench::int64_t>;
using float_list = nvbench::type_list<nvbench::float32_t, nvbench::float64_t>;
using misc_list = nvbench::type_list<bool, void>;
using lots_of_types_bench =
nvbench::benchmark<template_no_op_callable,
nvbench::type_list<int_list, float_list, misc_list>>;
nvbench::benchmark<template_no_op_callable, nvbench::type_list<int_list, float_list, misc_list>>;
using no_types_bench = nvbench::benchmark<no_op_callable>;
@@ -101,17 +92,16 @@ void test_type_axes()
const auto &axes = bench.get_axes().get_axes();
for (const auto &axis : axes)
{
fmt::format_to(buffer, "Axis: {}\n", axis->get_name());
fmt::format_to(std::back_inserter(buffer), "Axis: {}\n", axis->get_name());
const auto num_values = axis->get_size();
for (std::size_t i = 0; i < num_values; ++i)
{
auto input_string = axis->get_input_string(i);
auto description = axis->get_description(i);
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer),
" - {}{}\n",
input_string,
description.empty() ? ""
: fmt::format(" ({})", description));
description.empty() ? "" : fmt::format(" ({})", description));
}
}
@@ -148,7 +138,7 @@ void test_type_configs()
using Integer = nvbench::tl::get<0, Conf>;
using Float = nvbench::tl::get<1, Conf>;
using Other = nvbench::tl::get<2, Conf>;
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer),
"type_configs[{:2d}] = <{:>3}, {:>3}, {:>4}>\n",
idx++,
nvbench::type_strings<Integer>::input_string(),
@@ -292,11 +282,11 @@ void test_get_config_count()
bench.set_type_axes_names({"Integer", "Float", "Other"});
bench.get_axes().get_type_axis(0).set_active_inputs({"I16", "I32"}); // 2, 2
bench.get_axes().get_type_axis(1).set_active_inputs({"F32", "F64"}); // 2, 4
bench.get_axes().get_type_axis(2).set_active_inputs({"bool"}); // 1, 4
bench.add_float64_axis("foo", {0.4, 2.3, 4.3}); // 3, 12
bench.add_int64_axis("bar", {4, 6, 15}); // 3, 36
bench.add_string_axis("baz", {"str", "ing"}); // 2, 72
bench.add_string_axis("fez", {"single"}); // 1, 72
bench.get_axes().get_type_axis(2).set_active_inputs({"bool"}); // 1, 4
bench.add_float64_axis("foo", {0.4, 2.3, 4.3}); // 3, 12
bench.add_int64_axis("bar", {4, 6, 15}); // 3, 36
bench.add_string_axis("baz", {"str", "ing"}); // 2, 72
bench.add_string_axis("fez", {"single"}); // 1, 72
auto const num_devices = bench.get_devices().size();
ASSERT_MSG(bench.get_config_count() == 72 * num_devices,

View File

@@ -1,9 +1,3 @@
if ("MSVC" STREQUAL "${CMAKE_CXX_COMPILER_ID}")
# There's a bug that prevents build-and-test from working on MSVC.
# See NVIDIA/nvbench#43.
return()
endif()
# Need to escape the semicolons in CUDA_ARCHITECTURES or the tests break:
nvbench_escaped_cuda_arches(arches)
@@ -12,6 +6,7 @@ set(cmake_opts
-D "CMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
-D "CMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
-D "CMAKE_CUDA_COMPILER=${CMAKE_CUDA_COMPILER}"
-D "CMAKE_CUDA_FLAGS=${CMAKE_CUDA_FLAGS}"
-D "CMAKE_CUDA_ARCHITECTURES=${arches}"
)

View File

@@ -18,11 +18,11 @@
#include <nvbench/cpu_timer.cuh>
#include "test_asserts.cuh"
#include <chrono>
#include <thread>
#include "test_asserts.cuh"
void test_basic()
{
using namespace std::literals::chrono_literals;

View File

@@ -16,17 +16,14 @@
* limitations under the License.
*/
#include <nvbench/create.cuh>
#include <nvbench/benchmark.cuh>
#include <nvbench/callable.cuh>
#include <nvbench/create.cuh>
#include <nvbench/state.cuh>
#include <nvbench/type_list.cuh>
#include <nvbench/type_strings.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include <algorithm>
@@ -34,6 +31,8 @@
#include <variant>
#include <vector>
#include "test_asserts.cuh"
template <typename T>
std::vector<T> sort(std::vector<T> &&vec)
{
@@ -44,13 +43,13 @@ std::vector<T> sort(std::vector<T> &&vec)
void no_op_generator(nvbench::state &state)
{
fmt::memory_buffer params;
fmt::format_to(params, "Params:");
fmt::format_to(std::back_inserter(params), "Params:");
const auto &axis_values = state.get_axis_values();
for (const auto &name : sort(axis_values.get_names()))
{
std::visit(
[&params, &name](const auto &value) {
fmt::format_to(params, " {}: {}", name, value);
fmt::format_to(std::back_inserter(params), " {}: {}", name, value);
},
axis_values.get_value(name));
}
@@ -72,15 +71,11 @@ using misc_types = nvbench::type_list<bool, void>;
using type_axes = nvbench::type_list<float_types, int_types, misc_types>;
template <typename FloatT, typename IntT, typename MiscT>
void template_no_op_generator(nvbench::state &state,
nvbench::type_list<FloatT, IntT, MiscT>)
void template_no_op_generator(nvbench::state &state, nvbench::type_list<FloatT, IntT, MiscT>)
{
ASSERT(nvbench::type_strings<FloatT>::input_string() ==
state.get_string("FloatT"));
ASSERT(nvbench::type_strings<IntT>::input_string() ==
state.get_string("IntT"));
ASSERT(nvbench::type_strings<IntT>::input_string() ==
state.get_string("IntT"));
ASSERT(nvbench::type_strings<FloatT>::input_string() == state.get_string("FloatT"));
ASSERT(nvbench::type_strings<IntT>::input_string() == state.get_string("IntT"));
ASSERT(nvbench::type_strings<IntT>::input_string() == state.get_string("IntT"));
// Enum params using non-templated version:
no_op_generator(state);
@@ -109,15 +104,14 @@ std::string run_and_get_state_string(nvbench::benchmark_base &bench,
for (const auto &state : states)
{
ASSERT(state.is_skipped());
fmt::format_to(buffer, "{}\n", state.get_skip_reason());
fmt::format_to(std::back_inserter(buffer), "{}\n", state.get_skip_reason());
}
return fmt::to_string(buffer);
}
void validate_default_name()
{
auto bench =
nvbench::benchmark_manager::get().get_benchmark("no_op_generator").clone();
auto bench = nvbench::benchmark_manager::get().get_benchmark("no_op_generator").clone();
const std::string ref = "Params:\n";
@@ -127,8 +121,7 @@ void validate_default_name()
void validate_custom_name()
{
auto bench =
nvbench::benchmark_manager::get().get_benchmark("Custom Name").clone();
auto bench = nvbench::benchmark_manager::get().get_benchmark("Custom Name").clone();
const std::string ref = "Params:\n";
@@ -138,8 +131,7 @@ void validate_custom_name()
void validate_no_types()
{
auto bench =
nvbench::benchmark_manager::get().get_benchmark("No Types").clone();
auto bench = nvbench::benchmark_manager::get().get_benchmark("No Types").clone();
const std::string ref = R"expected(Params: Float: 11 Int: 1 String: One
Params: Float: 11 Int: 2 String: One
@@ -176,8 +168,7 @@ Params: Float: 13 Int: 3 String: Three
void validate_only_types()
{
auto bench =
nvbench::benchmark_manager::get().get_benchmark("Oops, All Types!").clone();
auto bench = nvbench::benchmark_manager::get().get_benchmark("Oops, All Types!").clone();
const std::string ref = R"expected(Params: FloatT: F32 IntT: I32 MiscT: bool
Params: FloatT: F32 IntT: I32 MiscT: void
@@ -195,8 +186,7 @@ Params: FloatT: F64 IntT: I64 MiscT: void
void validate_all_axes()
{
auto bench =
nvbench::benchmark_manager::get().get_benchmark("All The Axes").clone();
auto bench = nvbench::benchmark_manager::get().get_benchmark("All The Axes").clone();
const std::string ref =
R"expected(Params: Float: 11 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: One

View File

@@ -0,0 +1,82 @@
/*
* Copyright 2023 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/criterion_manager.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
void test_standard_criteria_exist()
{
ASSERT(nvbench::criterion_manager::get().get_criterion("stdrel").get_name() == "stdrel");
ASSERT(nvbench::criterion_manager::get().get_criterion("entropy").get_name() == "entropy");
}
class custom_criterion : public nvbench::stopping_criterion_base
{
public:
custom_criterion()
: nvbench::stopping_criterion_base("custom", nvbench::criterion_params{})
{}
protected:
virtual void do_initialize() override {}
virtual void do_add_measurement(nvbench::float64_t /* measurement */) override {}
virtual bool do_is_finished() override { return true; }
};
void test_no_duplicates_are_allowed()
{
nvbench::criterion_manager &manager = nvbench::criterion_manager::get();
bool exception_triggered = false;
try
{
[[maybe_unused]] nvbench::stopping_criterion_base &_ = manager.get_criterion("custom");
}
catch (...)
{
exception_triggered = true;
}
ASSERT(exception_triggered);
std::unique_ptr<custom_criterion> custom_ptr = std::make_unique<custom_criterion>();
custom_criterion *custom_raw = custom_ptr.get();
ASSERT(&manager.add(std::move(custom_ptr)) == custom_raw);
nvbench::stopping_criterion_base &custom =
nvbench::criterion_manager::get().get_criterion("custom");
ASSERT(custom_raw == &custom);
exception_triggered = false;
try
{
manager.add(std::make_unique<custom_criterion>());
}
catch (...)
{
exception_triggered = true;
}
ASSERT(exception_triggered);
}
int main()
{
test_standard_criteria_exist();
test_no_duplicates_are_allowed();
}

View File

@@ -0,0 +1,62 @@
/*
* Copyright 2023 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/criterion_manager.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
void test_compat_parameters()
{
nvbench::criterion_params params;
ASSERT(params.has_value("max-noise"));
ASSERT(params.has_value("min-time"));
ASSERT(params.get_float64("max-noise") == nvbench::detail::compat_max_noise());
ASSERT(params.get_float64("min-time") == nvbench::detail::compat_min_time());
}
void test_compat_overwrite()
{
nvbench::criterion_params params;
params.set_float64("max-noise", 40000.0);
params.set_float64("min-time", 42000.0);
ASSERT(params.get_float64("max-noise") == 40000.0);
ASSERT(params.get_float64("min-time") == 42000.0);
}
void test_overwrite()
{
nvbench::criterion_params params;
ASSERT(!params.has_value("custom"));
params.set_float64("custom", 42.0);
ASSERT(params.get_float64("custom") == 42.0);
params.set_float64("custom", 4.2);
ASSERT(params.get_float64("custom") == 4.2);
}
int main()
{
test_compat_parameters();
test_compat_overwrite();
test_overwrite();
}

77
testing/cuda_stream.cu Normal file
View File

@@ -0,0 +1,77 @@
/*
* Copyright 2023 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/config.cuh>
#include <nvbench/cuda_stream.cuh>
#include <nvbench/device_manager.cuh>
#include <nvbench/types.cuh>
#include <fmt/format.h>
#include "test_asserts.cuh"
namespace
{
#ifdef NVBENCH_HAS_CUPTI
/**
* @brief Queries and returns the device id that the given \p cuda_stream is associated with
*
* @param cuda_stream The stream to get the device id for
* @return The device id that \p cuda_stream is associated with
*/
int get_device_of_stream(cudaStream_t cuda_stream)
{
CUcontext ctx;
NVBENCH_DRIVER_API_CALL(cuStreamGetCtx(CUstream{cuda_stream}, &ctx));
NVBENCH_DRIVER_API_CALL(cuCtxPushCurrent(ctx));
CUdevice device_id{};
NVBENCH_DRIVER_API_CALL(cuCtxGetDevice(&device_id));
NVBENCH_DRIVER_API_CALL(cuCtxPopCurrent(&ctx));
return static_cast<int>(device_id);
}
#endif
} // namespace
void test_basic()
{
#ifdef NVBENCH_HAS_CUPTI
// Get devices
auto devices = nvbench::device_manager::get().get_devices();
// Iterate over devices
for (auto const &device_info : devices)
{
// Create stream on the device before it becomes the active device
nvbench::cuda_stream device_stream(device_info);
// Verify cuda stream is associated with the correct cuda device
ASSERT(get_device_of_stream(device_stream.get_stream()) == device_info.get_id());
// Set the device as active device
device_info.set_active();
// Create the stream (implicitly) on the device that is currently active
nvbench::cuda_stream current_device_stream{};
// Verify the cuda stream was in fact associated with the currently active device
ASSERT(get_device_of_stream(current_device_stream.get_stream()) == device_info.get_id());
}
#endif
}
int main() { test_basic(); }

View File

@@ -16,19 +16,16 @@
* limitations under the License.
*/
#include <nvbench/cuda_timer.cuh>
#include <nvbench/cuda_stream.cuh>
#include <nvbench/cuda_timer.cuh>
#include <nvbench/test_kernels.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
void test_basic(cudaStream_t time_stream,
cudaStream_t exec_stream,
bool expected)
#include "test_asserts.cuh"
void test_basic(cudaStream_t time_stream, cudaStream_t exec_stream, bool expected)
{
nvbench::cuda_timer timer;

View File

@@ -0,0 +1,132 @@
/*
* Copyright 2024 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/cuda_call.cuh>
#include <nvbench/nvbench.cuh>
/******************************************************************************
* Install custom parser.
* sSee <nvbench/main.cuh> for more details.
******************************************************************************/
//
// Step 1: Define a custom argument handler that accepts a vector of strings.
// - This handler should modify the vector in place to remove any custom
// arguments it handles. NVbench will then parse the remaining arguments.
// - The handler should also update any application state needed to handle
// the custom arguments.
//
// User code to handle a specific argument:
void handle_my_custom_arg();
// NVBench hook for modifying the command line arguments before parsing:
void custom_arg_handler(std::vector<std::string> &args)
{
// Handle and remove "--my-custom-arg"
if (auto it = std::find(args.begin(), args.end(), "--my-custom-arg"); it != args.end())
{
handle_my_custom_arg();
args.erase(it);
}
}
//
// Step 2: Install the custom argument handler.
// - This is done by defining a macro that invokes the custom argument handler.
//
// Install the custom argument handler:
// Either define this before any NVBench headers are included, or undefine and redefine:
#undef NVBENCH_MAIN_CUSTOM_ARGS_HANDLER
#define NVBENCH_MAIN_CUSTOM_ARGS_HANDLER(args) custom_arg_handler(args)
// Step 3: Define `main`
//
// After installing the custom argument handler, define the main function using:
//
// ```
// NVBENCH_MAIN
// ```
//
// Here, this is done at the end of this file.
/******************************************************************************
* Unit test verification:
******************************************************************************/
// Track whether the args are found / handled.
bool h_custom_arg_found = false;
bool h_handled_on_device = false;
__device__ bool d_custom_arg_found = false;
__device__ bool d_handled_on_device = false;
// Copy host values to device:
void copy_host_state_to_device()
{
NVBENCH_CUDA_CALL(cudaMemcpyToSymbol(d_custom_arg_found, &h_custom_arg_found, sizeof(bool)));
NVBENCH_CUDA_CALL(cudaMemcpyToSymbol(d_handled_on_device, &h_handled_on_device, sizeof(bool)));
}
// Copy device values to host:
void copy_device_state_to_host()
{
NVBENCH_CUDA_CALL(cudaMemcpyFromSymbol(&h_custom_arg_found, d_custom_arg_found, sizeof(bool)));
NVBENCH_CUDA_CALL(cudaMemcpyFromSymbol(&h_handled_on_device, d_handled_on_device, sizeof(bool)));
}
void handle_my_custom_arg()
{
h_custom_arg_found = true;
copy_host_state_to_device();
}
void verify()
{
copy_device_state_to_host();
if (!h_custom_arg_found)
{
throw std::runtime_error("Custom argument not detected.");
}
if (!h_handled_on_device)
{
throw std::runtime_error("Custom argument not handled on device.");
}
}
// Install a verification check to ensure the custom argument was handled.
// Use the `PRE` finalize hook to ensure we check device state before resetting the context.
#undef NVBENCH_MAIN_FINALIZE_CUSTOM_PRE
#define NVBENCH_MAIN_FINALIZE_CUSTOM_PRE() verify()
// Simple kernel/benchmark to make sure that the handler can successfully modify CUDA state:
__global__ void kernel()
{
if (d_custom_arg_found)
{
d_handled_on_device = true;
}
}
void bench(nvbench::state &state)
{
state.exec([](nvbench::launch &) { kernel<<<1, 1>>>(); });
}
NVBENCH_BENCH(bench);
// Define the customized main function:
NVBENCH_MAIN

View File

@@ -0,0 +1,64 @@
/*
* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/nvbench.cuh>
#include <stdexcept>
/******************************************************************************
* Install exception handler around the NVBench main body. This is used
* to print helpful information when a user exception is thrown before exiting.
*
* Note that this will **NOT** be used when a benchmark throws an exception.
* That will fail the benchmark and note the exception, and continue
* execution.
*
* This is used to catch exceptions in user extensions of NVBench, things like
* customized initialization, command line parsing, finalization, etc. See
* <nvbench/main.cuh> for more details.
******************************************************************************/
struct user_exception : public std::runtime_error
{
user_exception()
: std::runtime_error("Expected exception thrown.")
{}
};
// User code to handle user exception:
void handle_my_exception(user_exception &e)
{
std::cerr << "Custom error detected: " << e.what() << std::endl;
std::exit(1);
}
// Install the exception handler around the NVBench main body.
// NVBench will have sensible defaults for common exceptions following this if no terminating catch
// block is defined.
// Either define this before any NVBench headers are included, or undefine and redefine.
#undef NVBENCH_MAIN_CATCH_EXCEPTIONS_CUSTOM
#define NVBENCH_MAIN_CATCH_EXCEPTIONS_CUSTOM \
catch (user_exception & e) { handle_my_exception(e); }
// For testing purposes, install a argument parser that throws:
void really_robust_argument_parser(std::vector<std::string> &) { throw user_exception(); }
#undef NVBENCH_MAIN_CUSTOM_ARGS_HANDLER
#define NVBENCH_MAIN_CUSTOM_ARGS_HANDLER(args) really_robust_argument_parser(args);
// Define the customized main function:
NVBENCH_MAIN

View File

@@ -0,0 +1,121 @@
/*
* Copyright 2024 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/nvbench.cuh>
#include <algorithm>
#include <cstdio>
#include <cstdlib>
/******************************************************************************
* Test having global state that is initialized and finalized via RAII.
*****************************************************************************/
struct raii
{
const char m_ref_data[6];
char *m_data;
bool m_cuda;
const char *m_outer_data;
bool m_outer_cuda;
explicit raii(bool cuda, char *outer_data = nullptr, bool outer_cuda = false)
: m_ref_data{'a', 'b', 'c', '1', '2', '3'}
, m_data(nullptr)
, m_cuda(cuda)
, m_outer_data(outer_data)
, m_outer_cuda(outer_cuda)
{
if (m_cuda)
{
printf("(%p) RAII test: allocating device memory\n", this);
NVBENCH_CUDA_CALL(cudaMalloc(&m_data, 6));
NVBENCH_CUDA_CALL(cudaMemcpy(m_data, m_ref_data, 6, cudaMemcpyHostToDevice));
}
else
{
printf("(%p) RAII test: allocating host memory\n", this);
m_data = new char[6];
std::copy(m_ref_data, m_ref_data + 6, m_data);
}
}
~raii()
{
this->verify();
if (m_cuda)
{
printf("(%p) RAII test: invalidating device memory\n", this);
NVBENCH_CUDA_CALL(cudaMemset(m_data, 0, 6));
printf("(%p) RAII test: freeing device memory\n", this);
NVBENCH_CUDA_CALL(cudaFree(m_data));
}
else
{
printf("(%p) RAII test: invalidating host memory\n", this);
std::fill(m_data, m_data + 6, '\0');
printf("(%p) RAII test: freeing host memory\n", this);
delete[] m_data;
}
}
void verify() noexcept
{
printf("(%p) RAII test: verifying instance state\n", this);
this->verify(m_cuda, m_data);
if (m_outer_data)
{
printf("(%p) RAII test: verifying outer state\n", this);
this->verify(m_outer_cuda, m_outer_data);
}
}
void verify(bool cuda, const char *data) noexcept
{
if (cuda)
{
char test_data[6];
NVBENCH_CUDA_CALL(cudaMemcpy(test_data, data, 6, cudaMemcpyDeviceToHost));
if (strncmp(test_data, m_ref_data, 6) != 0)
{
printf("(%p) RAII test failed: device data mismatch\n", this);
std::exit(1);
}
}
else
{
if (strncmp(data, m_ref_data, 6) != 0)
{
printf("(%p) RAII test failed: host data mismatch\n", this);
std::exit(1);
}
}
}
};
// These will be destroyed in the opposite order in which they are created:
#undef NVBENCH_MAIN_INITIALIZE_CUSTOM_PRE
#define NVBENCH_MAIN_INITIALIZE_CUSTOM_PRE(argc, argv) raii raii_outer(false);
#undef NVBENCH_MAIN_INITIALIZE_CUSTOM_POST
#define NVBENCH_MAIN_INITIALIZE_CUSTOM_POST(argc, argv) \
[[maybe_unused]] raii raii_inner(true, raii_outer.m_data, raii_outer.m_cuda);
NVBENCH_MAIN

View File

@@ -29,12 +29,10 @@
void noisy_bench(nvbench::state &state)
{
// time, convert ms -> s
const auto mean = static_cast<nvbench::float32_t>(state.get_float64("Mean")) /
1000.f;
const auto mean = static_cast<nvbench::float32_t>(state.get_float64("Mean")) / 1000.f;
// rel stdev
const auto noise_pct =
static_cast<nvbench::float32_t>(state.get_float64("Noise"));
const auto noise = noise_pct / 100.f;
const auto noise_pct = static_cast<nvbench::float32_t>(state.get_float64("Noise"));
const auto noise = noise_pct / 100.f;
// abs stdev
const auto stdev = noise * mean;
@@ -53,8 +51,7 @@ void noisy_bench(nvbench::state &state)
try
{
return static_cast<nvbench::float32_t>(
state.get_summary("nv/cold/time/gpu/stdev/relative")
.get_float64("value"));
state.get_summary("nv/cold/time/gpu/stdev/relative").get_float64("value"));
}
catch (std::invalid_argument &)
{

View File

@@ -0,0 +1,91 @@
/*
* Copyright 2023 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/detail/entropy_criterion.cuh>
#include <nvbench/stopping_criterion.cuh>
#include <nvbench/types.cuh>
#include <numeric>
#include <random>
#include <vector>
#include "test_asserts.cuh"
void test_const()
{
nvbench::criterion_params params;
nvbench::detail::entropy_criterion criterion;
criterion.initialize(params);
for (int i = 0; i < 6; i++)
{ // nvbench wants at least 5 to compute the standard deviation
criterion.add_measurement(42.0);
}
ASSERT(criterion.is_finished());
}
void produce_entropy_arch(nvbench::detail::entropy_criterion &criterion)
{
/*
* This pattern is designed to simulate the entropy:
*
* 0.0, 1.0, 1.5, 2.0, 2.3, 2.5 <---- no unexpected measurement after this point
* 2.5, 2.4, 2.2, 2.1, 2.0, 1.9 <-+
* 1.8, 1.7, 1.6, 1.6, 1.5, 1.4 |
* 1.4, 1.3, 1.3, 1.3, 1.2, 1.2 |
* 1.1, 1.1, 1.1, 1.0, 1.0, 1.0 +-- entropy only decreases after 5-th sample,
* 1.0, 0.9, 0.9, 0.9, 0.9, 0.9 | so the slope should be negative
* 0.8, 0.8, 0.8, 0.8, 0.8, 0.8 |
* 0.7, 0.7, 0.7, 0.7, 0.7, 0.7 <-+
*/
for (nvbench::float64_t x = 0.0; x < 50.0; x += 1.0)
{
criterion.add_measurement(x > 5.0 ? 5.0 : x);
}
}
void test_entropy_arch()
{
nvbench::detail::entropy_criterion criterion;
// The R2 should be around 0.5
// The angle should be around -1.83
nvbench::criterion_params params;
params.set_float64("min-r2", 0.3);
params.set_float64("max-angle", -1.0);
criterion.initialize(params);
produce_entropy_arch(criterion);
ASSERT(criterion.is_finished());
params.set_float64("min-r2", 0.7);
criterion.initialize(params);
produce_entropy_arch(criterion);
ASSERT(!criterion.is_finished());
params.set_float64("min-r2", 0.3);
params.set_float64("max-angle", -2.0);
criterion.initialize(params);
produce_entropy_arch(criterion);
ASSERT(!criterion.is_finished());
}
int main()
{
test_const();
test_entropy_arch();
}

View File

@@ -18,12 +18,17 @@
#include <nvbench/enum_type_list.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include <type_traits>
#include "test_asserts.cuh"
// If using gcc version < 7, disable some tests to WAR a compiler bug. See NVIDIA/nvbench#39.
#if defined(__GNUC__) && __GNUC__ == 7
#define USING_GCC_7
#endif
enum class scoped_enum
{
val_1,
@@ -97,8 +102,7 @@ NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
void test_int()
{
ASSERT((std::is_same_v<nvbench::enum_type_list<>, nvbench::type_list<>>));
ASSERT((std::is_same_v<nvbench::enum_type_list<0>,
nvbench::type_list<nvbench::enum_type<0>>>));
ASSERT((std::is_same_v<nvbench::enum_type_list<0>, nvbench::type_list<nvbench::enum_type<0>>>));
ASSERT((std::is_same_v<nvbench::enum_type_list<0, 1, 2, 3, 4>,
nvbench::type_list<nvbench::enum_type<0>,
nvbench::enum_type<1>,
@@ -109,39 +113,36 @@ void test_int()
void test_scoped_enum()
{
ASSERT((
std::is_same_v<nvbench::enum_type_list<scoped_enum::val_1>,
nvbench::type_list<nvbench::enum_type<scoped_enum::val_1>>>));
ASSERT((
std::is_same_v<nvbench::enum_type_list<scoped_enum::val_1,
scoped_enum::val_2,
scoped_enum::val_3>,
nvbench::type_list<nvbench::enum_type<scoped_enum::val_1>,
nvbench::enum_type<scoped_enum::val_2>,
nvbench::enum_type<scoped_enum::val_3>>>));
#ifndef USING_GCC_7
ASSERT((std::is_same_v<nvbench::enum_type_list<scoped_enum::val_1>,
nvbench::type_list<nvbench::enum_type<scoped_enum::val_1>>>));
#endif
ASSERT((std::is_same_v<
nvbench::enum_type_list<scoped_enum::val_1, scoped_enum::val_2, scoped_enum::val_3>,
nvbench::type_list<nvbench::enum_type<scoped_enum::val_1>,
nvbench::enum_type<scoped_enum::val_2>,
nvbench::enum_type<scoped_enum::val_3>>>));
}
void test_unscoped_enum()
{
ASSERT(
(std::is_same_v<nvbench::enum_type_list<unscoped_val_1>,
nvbench::type_list<nvbench::enum_type<unscoped_val_1>>>));
ASSERT(
(std::is_same_v<
nvbench::enum_type_list<unscoped_val_1, unscoped_val_2, unscoped_val_3>,
nvbench::type_list<nvbench::enum_type<unscoped_val_1>,
nvbench::enum_type<unscoped_val_2>,
nvbench::enum_type<unscoped_val_3>>>));
#ifndef USING_GCC_7
ASSERT((std::is_same_v<nvbench::enum_type_list<unscoped_val_1>,
nvbench::type_list<nvbench::enum_type<unscoped_val_1>>>));
ASSERT((std::is_same_v<nvbench::enum_type_list<unscoped_val_1, unscoped_val_2, unscoped_val_3>,
nvbench::type_list<nvbench::enum_type<unscoped_val_1>,
nvbench::enum_type<unscoped_val_2>,
nvbench::enum_type<unscoped_val_3>>>));
#endif
}
void test_scoped_enum_type_strings()
{
using values = nvbench::enum_type_list<scoped_enum::val_1,
scoped_enum::val_2,
scoped_enum::val_3>;
using val_1 = nvbench::tl::get<0, values>;
using val_2 = nvbench::tl::get<1, values>;
using val_3 = nvbench::tl::get<2, values>;
using values =
nvbench::enum_type_list<scoped_enum::val_1, scoped_enum::val_2, scoped_enum::val_3>;
using val_1 = nvbench::tl::get<0, values>;
using val_2 = nvbench::tl::get<1, values>;
using val_3 = nvbench::tl::get<2, values>;
ASSERT((nvbench::type_strings<val_1>::input_string() == "1"));
ASSERT((nvbench::type_strings<val_1>::description() == "scoped_enum::val_1"));
ASSERT((nvbench::type_strings<val_2>::input_string() == "2"));

View File

@@ -34,8 +34,7 @@ void test_empty()
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::float64_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::float64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Empty");
@@ -62,8 +61,7 @@ void test_basic()
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::float64_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::float64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Basic");

View File

@@ -18,10 +18,10 @@
#include <nvbench/int64_axis.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include "test_asserts.cuh"
void test_empty()
{
nvbench::int64_axis axis("Empty");
@@ -36,8 +36,7 @@ void test_empty()
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Empty");
@@ -66,8 +65,7 @@ void test_basic()
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "BasicAxis");
@@ -87,8 +85,7 @@ void test_basic()
void test_power_of_two()
{
nvbench::int64_axis axis{"POTAxis"};
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4},
nvbench::int64_axis_flags::power_of_two);
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4}, nvbench::int64_axis_flags::power_of_two);
const std::vector<nvbench::int64_t> ref_inputs{0, 1, 2, 3, 7, 6, 5, 4};
const std::vector<nvbench::int64_t> ref_values{1, 2, 4, 8, 128, 64, 32, 16};
@@ -102,14 +99,12 @@ void test_power_of_two()
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref_inputs[i]));
ASSERT(axis.get_description(i) ==
fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
ASSERT(axis.get_description(i) == fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
}
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "POTAxis");
@@ -122,8 +117,7 @@ void test_power_of_two()
for (size_t i = 0; i < 8; ++i)
{
ASSERT(clone->get_input_string(i) == fmt::to_string(ref_inputs[i]));
ASSERT(clone->get_description(i) ==
fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
ASSERT(clone->get_description(i) == fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
}
}
@@ -250,8 +244,7 @@ void test_update_none_to_pow2()
void test_update_pow2_to_none()
{
nvbench::int64_axis axis{"TestAxis"};
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4},
nvbench::int64_axis_flags::power_of_two);
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4}, nvbench::int64_axis_flags::power_of_two);
const std::vector<nvbench::int64_t> ref_inputs{0, 1, 2, 3, 7, 6, 5, 4};
const std::vector<nvbench::int64_t> ref_values{1, 2, 4, 8, 128, 64, 32, 16};
@@ -304,8 +297,7 @@ void test_update_pow2_to_none()
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref_inputs[i]));
ASSERT(axis.get_description(i) ==
fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
ASSERT(axis.get_description(i) == fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
}
}
@@ -313,8 +305,7 @@ void test_update_pow2_to_pow2()
{
nvbench::int64_axis axis{"TestAxis"};
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4},
nvbench::int64_axis_flags::power_of_two);
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4}, nvbench::int64_axis_flags::power_of_two);
const std::vector<nvbench::int64_t> ref_inputs{0, 1, 2, 3, 7, 6, 5, 4};
const std::vector<nvbench::int64_t> ref_values{1, 2, 4, 8, 128, 64, 32, 16};
@@ -369,8 +360,7 @@ void test_update_pow2_to_pow2()
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref_inputs[i]));
ASSERT(axis.get_description(i) ==
fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
ASSERT(axis.get_description(i) == fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
}
}

View File

@@ -18,10 +18,10 @@
#include <nvbench/named_values.cuh>
#include "test_asserts.cuh"
#include <algorithm>
#include "test_asserts.cuh"
void test_empty()
{
nvbench::named_values vals;

View File

@@ -16,16 +16,14 @@
* limitations under the License.
*/
#include <nvbench/option_parser.cuh>
#include <nvbench/create.cuh>
#include <nvbench/option_parser.cuh>
#include <nvbench/type_list.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include <iostream>
#include "test_asserts.cuh"
//==============================================================================
// Declare a couple benchmarks for testing:
@@ -52,15 +50,14 @@ NVBENCH_BENCH_TYPES(TestBench, NVBENCH_TYPE_AXES(Ts, Us))
namespace
{
[[nodiscard]] std::string
states_to_string(const std::vector<nvbench::state> &states)
[[nodiscard]] std::string states_to_string(const std::vector<nvbench::state> &states)
{
fmt::memory_buffer buffer;
std::string table_format = "| {:^5} | {:^10} | {:^4} | {:^4} | {:^4} "
"| {:^4} | {:^6} | {:^8} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer), "\n");
fmt::format_to(std::back_inserter(buffer),
table_format,
"State",
"TypeConfig",
@@ -74,7 +71,7 @@ states_to_string(const std::vector<nvbench::state> &states)
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer),
table_format,
config++,
state.get_type_config_index(),
@@ -90,7 +87,7 @@ states_to_string(const std::vector<nvbench::state> &states)
// Expects the parser to have a single TestBench benchmark. Runs the benchmark
// and returns the resulting states.
[[nodiscard]] const auto& parser_to_states(nvbench::option_parser &parser)
[[nodiscard]] const auto &parser_to_states(nvbench::option_parser &parser)
{
const auto &benches = parser.get_benchmarks();
ASSERT(benches.size() == 1);
@@ -270,8 +267,7 @@ void test_int64_axis_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 : 2 : 1 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 : 2 : 1 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -311,8 +307,7 @@ void test_int64_axis_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 , 7 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 , 7 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -326,8 +321,7 @@ void test_int64_axis_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 : 7 : 5 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 : 7 : 5 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -372,8 +366,7 @@ void test_int64_axis_pow2_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 7 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 7 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -387,8 +380,7 @@ void test_int64_axis_pow2_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 7 : 7 : 1 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 7 : 7 : 1 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -428,8 +420,7 @@ void test_int64_axis_pow2_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 2 , 7 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 2 , 7 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -443,8 +434,7 @@ void test_int64_axis_pow2_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 2 : 7 : 5 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 2 : 7 : 5 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -489,8 +479,7 @@ void test_int64_axis_none_to_pow2_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 7 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 7 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -504,8 +493,7 @@ void test_int64_axis_none_to_pow2_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 7 : 7 : 1 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 7 : 7 : 1 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -545,8 +533,7 @@ void test_int64_axis_none_to_pow2_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 2 , 7 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 2 , 7 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -560,8 +547,7 @@ void test_int64_axis_none_to_pow2_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 2 : 7 : 5 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 2 : 7 : 5 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -620,8 +606,7 @@ void test_int64_axis_pow2_to_none_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 : 2 : 1 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 : 2 : 1 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -661,8 +646,7 @@ void test_int64_axis_pow2_to_none_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 , 7 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 , 7 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -676,8 +660,7 @@ void test_int64_axis_pow2_to_none_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 : 7 : 5 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 : 7 : 5 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -722,8 +705,7 @@ void test_float64_axis_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -737,10 +719,7 @@ void test_float64_axis_single()
{
nvbench::option_parser parser;
parser.parse({"--benchmark",
"TestBench",
"--axis",
" Floats [ ] = [ 3.5 : 3.6 : 1 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 : 3.6 : 1 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -780,8 +759,7 @@ void test_float64_axis_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 , 4.1 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 , 4.1 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -795,18 +773,14 @@ void test_float64_axis_multi()
{
nvbench::option_parser parser;
parser.parse({"--benchmark",
"TestBench",
"--axis",
" Floats [ ] = [ 3.5 : 4.2 : 0.6 ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 : 4.2 : 0.6 ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", "Floats=[3.5:4.2:0.6]"});
parser.parse({"--benchmark", "TestBench", "--axis", "Floats=[3.5:4.2:0.6]"});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -830,8 +804,7 @@ void test_string_axis_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Strings [ ] = fo br "});
parser.parse({"--benchmark", "TestBench", "--axis", " Strings [ ] = fo br "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -845,8 +818,7 @@ void test_string_axis_single()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Strings [ ] = [ fo br ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Strings [ ] = [ fo br ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -886,8 +858,7 @@ void test_string_axis_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " Strings [ ] = [ fo br , baz ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " Strings [ ] = [ fo br , baz ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -954,8 +925,7 @@ void test_type_axis_multi()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "TestBench", "--axis", " T [ ] = [ U8, void ] "});
parser.parse({"--benchmark", "TestBench", "--axis", " T [ ] = [ U8, void ] "});
const auto test = parser_to_state_string(parser);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
@@ -1180,9 +1150,8 @@ void test_axis_before_benchmark()
void test_min_samples()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "DummyBench", "--min-samples", "12345"});
const auto& states = parser_to_states(parser);
parser.parse({"--benchmark", "DummyBench", "--min-samples", "12345"});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(states[0].get_min_samples() == 12345);
@@ -1191,9 +1160,8 @@ void test_min_samples()
void test_min_time()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "DummyBench", "--min-time", "12345e2"});
const auto& states = parser_to_states(parser);
parser.parse({"--benchmark", "DummyBench", "--min-time", "12345e2"});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(std::abs(states[0].get_min_time() - 12345e2) < 1.);
@@ -1202,9 +1170,8 @@ void test_min_time()
void test_max_noise()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "DummyBench", "--max-noise", "50.3"});
const auto& states = parser_to_states(parser);
parser.parse({"--benchmark", "DummyBench", "--max-noise", "50.3"});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(std::abs(states[0].get_max_noise() - 0.503) < 1.e-4);
@@ -1213,9 +1180,8 @@ void test_max_noise()
void test_skip_time()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "DummyBench", "--skip-time", "12345e2"});
const auto& states = parser_to_states(parser);
parser.parse({"--benchmark", "DummyBench", "--skip-time", "12345e2"});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(std::abs(states[0].get_skip_time() - 12345e2) < 1.);
@@ -1224,14 +1190,273 @@ void test_skip_time()
void test_timeout()
{
nvbench::option_parser parser;
parser.parse(
{"--benchmark", "DummyBench", "--timeout", "12345e2"});
const auto& states = parser_to_states(parser);
parser.parse({"--benchmark", "DummyBench", "--timeout", "12345e2"});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(std::abs(states[0].get_timeout() - 12345e2) < 1.);
}
void test_stopping_criterion()
{
{ // Per benchmark criterion
nvbench::option_parser parser;
parser.parse({
"--benchmark",
"DummyBench",
"--stopping-criterion",
"entropy",
"--max-angle",
"0.42",
"--min-r2",
"0.6",
});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(states[0].get_stopping_criterion() == "entropy");
const nvbench::criterion_params &criterion_params = states[0].get_criterion_params();
ASSERT(criterion_params.has_value("max-angle"));
ASSERT(criterion_params.has_value("min-r2"));
ASSERT(criterion_params.get_float64("max-angle") == 0.42);
ASSERT(criterion_params.get_float64("min-r2") == 0.6);
}
{ // Global criterion
nvbench::option_parser parser;
parser.parse({
"--stopping-criterion",
"entropy",
"--max-angle",
"0.42",
"--min-r2",
"0.6",
"--benchmark",
"DummyBench",
});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(states[0].get_stopping_criterion() == "entropy");
const nvbench::criterion_params &criterion_params = states[0].get_criterion_params();
ASSERT(criterion_params.has_value("max-angle"));
ASSERT(criterion_params.has_value("min-r2"));
ASSERT(criterion_params.get_float64("max-angle") == 0.42);
ASSERT(criterion_params.get_float64("min-r2") == 0.6);
}
{ // Global criterion, per-benchmark params
nvbench::option_parser parser;
parser.parse({
"--stopping-criterion",
"entropy",
"--benchmark",
"DummyBench",
"--max-angle",
"0.42",
"--min-r2",
"0.6",
});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(states[0].get_stopping_criterion() == "entropy");
const nvbench::criterion_params &criterion_params = states[0].get_criterion_params();
ASSERT(criterion_params.has_value("max-angle"));
ASSERT(criterion_params.has_value("min-r2"));
ASSERT(criterion_params.get_float64("max-angle") == 0.42);
ASSERT(criterion_params.get_float64("min-r2") == 0.6);
}
{ // Global params to default criterion should work:
nvbench::option_parser parser;
parser.parse({
"--max-noise",
"0.5",
"--min-time",
"0.1",
"--benchmark",
"DummyBench",
"--stopping-criterion",
"entropy",
"--max-angle",
"0.42",
"--min-r2",
"0.6",
});
const auto &states = parser_to_states(parser);
ASSERT(states.size() == 1);
ASSERT(states[0].get_stopping_criterion() == "entropy");
const nvbench::criterion_params &criterion_params = states[0].get_criterion_params();
ASSERT(criterion_params.has_value("max-angle"));
ASSERT(criterion_params.has_value("min-r2"));
ASSERT(criterion_params.get_float64("max-angle") == 0.42);
ASSERT(criterion_params.get_float64("min-r2") == 0.6);
}
{ // Unknown stopping criterion should throw
bool exception_thrown = false;
try
{
nvbench::option_parser parser;
parser.parse({
"--benchmark",
"DummyBench",
"--stopping-criterion",
"I_do_not_exist",
});
}
catch (const std::runtime_error &)
{
exception_thrown = true;
}
ASSERT(exception_thrown);
}
{ // Global criterion to non-default params without global --stopping-criterion should throw
bool exception_thrown = false;
try
{
nvbench::option_parser parser;
parser.parse({
"--max-angle",
"0.42",
"--min-r2",
"0.6",
"--benchmark",
"DummyBench",
"--stopping-criterion",
"entropy",
});
}
catch (const std::runtime_error &)
{
exception_thrown = true;
}
ASSERT(exception_thrown);
}
{ // Invalid global param throws exception:
bool exception_thrown = false;
try
{
nvbench::option_parser parser;
parser.parse({
"--max-angle",
"0.42",
"--benchmark",
"DummyBench",
"--stopping-criterion",
"entropy",
"--min-r2",
"0.6",
"--max-angle",
"0.42",
"--benchmark",
"TestBench",
"--stopping-criterion",
"stdrel",
});
}
catch (const std::runtime_error & /*ex*/)
{
exception_thrown = true;
}
ASSERT(exception_thrown);
}
{ // Invalid per-bench param throws exception:
bool exception_thrown = false;
try
{
nvbench::option_parser parser;
parser.parse({
"--benchmark",
"DummyBench",
"--stopping-criterion",
"entropy",
"--min-r2",
"0.6",
"--max-angle",
"0.42",
"--benchmark",
"TestBench",
"--stopping-criterion",
"stdrel",
"--max-angle",
"0.42",
});
}
catch (const std::runtime_error & /*ex*/)
{
exception_thrown = true;
}
ASSERT(exception_thrown);
}
{ // global param-before-criterion throws exception:
bool exception_thrown = false;
try
{
nvbench::option_parser parser;
parser.parse({
"--min-r2", //
"0.6",
"--stopping-criterion",
"entropy",
"--benchmark",
"DummyBench",
});
}
catch (const std::runtime_error & /*ex*/)
{
exception_thrown = true;
}
ASSERT(exception_thrown);
}
{ // per-benchmark param-before-criterion throws exception:
bool exception_thrown = false;
try
{
nvbench::option_parser parser;
parser.parse({
"--benchmark", //
"DummyBench",
"--min-r2",
"0.6",
"--stopping-criterion",
"entropy",
});
}
catch (const std::runtime_error & /*ex*/)
{
exception_thrown = true;
}
ASSERT(exception_thrown);
}
{ // Invalid param type throws exception:
bool exception_thrown = false;
try
{
nvbench::option_parser parser;
parser.parse({
"--benchmark", //
"DummyBench",
"--stopping-criterion",
"entropy",
"--min-r2",
"\"foo\"",
});
}
catch (const std::runtime_error &)
{
exception_thrown = true;
}
ASSERT(exception_thrown);
}
}
int main()
try
{
@@ -1268,10 +1493,12 @@ try
test_skip_time();
test_timeout();
test_stopping_criterion();
return 0;
}
catch (std::exception &err)
{
fmt::print(stderr, "{}", err.what());
fmt::print(stderr, "Unexpected exception:\n{}\n", err.what());
return 1;
}

View File

@@ -22,12 +22,9 @@
void test_basic()
{
ASSERT((nvbench::range(0, 6) ==
std::vector<nvbench::int64_t>{0, 1, 2, 3, 4, 5, 6}));
ASSERT((nvbench::range(0, 6, 1) ==
std::vector<nvbench::int64_t>{0, 1, 2, 3, 4, 5, 6}));
ASSERT(
(nvbench::range(0, 6, 2) == std::vector<nvbench::int64_t>{0, 2, 4, 6}));
ASSERT((nvbench::range(0, 6) == std::vector<nvbench::int64_t>{0, 1, 2, 3, 4, 5, 6}));
ASSERT((nvbench::range(0, 6, 1) == std::vector<nvbench::int64_t>{0, 1, 2, 3, 4, 5, 6}));
ASSERT((nvbench::range(0, 6, 2) == std::vector<nvbench::int64_t>{0, 2, 4, 6}));
ASSERT((nvbench::range(0, 6, 3) == std::vector<nvbench::int64_t>{0, 3, 6}));
ASSERT((nvbench::range(0, 6, 4) == std::vector<nvbench::int64_t>{0, 4}));
ASSERT((nvbench::range(0, 6, 5) == std::vector<nvbench::int64_t>{0, 5}));
@@ -37,26 +34,19 @@ void test_basic()
void test_result_type()
{
// All ints should turn into int64 by default:
ASSERT((std::is_same_v<decltype(nvbench::range(0ll, 1ll)),
std::vector<nvbench::int64_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range(0, 1)),
std::vector<nvbench::int64_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range(0u, 1u)),
std::vector<nvbench::int64_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range(0ll, 1ll)), std::vector<nvbench::int64_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range(0, 1)), std::vector<nvbench::int64_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range(0u, 1u)), std::vector<nvbench::int64_t>>));
// All floats should turn into float64 by default:
ASSERT((std::is_same_v<decltype(nvbench::range(0., 1.)),
std::vector<nvbench::float64_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range(0.f, 1.f)),
std::vector<nvbench::float64_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range(0., 1.)), std::vector<nvbench::float64_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range(0.f, 1.f)), std::vector<nvbench::float64_t>>));
// Other types may be explicitly specified:
ASSERT((std::is_same_v<decltype(nvbench::range<nvbench::float32_t,
nvbench::float32_t>(0.f, 1.f)),
ASSERT((std::is_same_v<decltype(nvbench::range<nvbench::float32_t, nvbench::float32_t>(0.f, 1.f)),
std::vector<nvbench::float32_t>>));
ASSERT((std::is_same_v<
decltype(nvbench::range<nvbench::int32_t, nvbench::int32_t>(0, 1)),
std::vector<nvbench::int32_t>>));
ASSERT((std::is_same_v<decltype(nvbench::range<nvbench::int32_t, nvbench::int32_t>(0, 1)),
std::vector<nvbench::int32_t>>));
}
void test_fp_tolerance()
@@ -68,10 +58,8 @@ void test_fp_tolerance()
const nvbench::float32_t stride = 1e-4f;
for (std::size_t size = 1; size < 1024; ++size)
{
const nvbench::float32_t end =
start + stride * static_cast<nvbench::float32_t>(size - 1);
ASSERT_MSG(nvbench::range(start, end, stride).size() == size,
"size={}", size);
const nvbench::float32_t end = start + stride * static_cast<nvbench::float32_t>(size - 1);
ASSERT_MSG(nvbench::range(start, end, stride).size() == size, "size={}", size);
}
}

29
testing/reset_error.cu Normal file
View File

@@ -0,0 +1,29 @@
#include <nvbench/cuda_call.cuh>
#include "test_asserts.cuh"
namespace
{
__global__ void multiply5(const int32_t *__restrict__ a, int32_t *__restrict__ b)
{
const auto id = blockIdx.x * blockDim.x + threadIdx.x;
b[id] = 5 * a[id];
}
} // namespace
int main()
{
multiply5<<<256, 256>>>(nullptr, nullptr);
try
{
NVBENCH_CUDA_CALL(cudaStreamSynchronize(0));
ASSERT(false);
}
catch (const std::runtime_error &)
{
ASSERT(cudaGetLastError() == cudaError_t::cudaSuccess);
}
return 0;
}

View File

@@ -18,16 +18,15 @@
#include <nvbench/detail/ring_buffer.cuh>
#include "test_asserts.cuh"
#include <algorithm>
#include <vector>
#include "test_asserts.cuh"
template <typename T>
bool equal(const nvbench::detail::ring_buffer<T> &buffer,
const std::vector<T> &reference)
bool equal(const nvbench::detail::ring_buffer<T> &buffer, const std::vector<T> &reference)
{
return std::equal(buffer.cbegin(), buffer.cend(), reference.cbegin());
return std::equal(buffer.begin(), buffer.end(), reference.begin());
}
int main()
@@ -62,12 +61,12 @@ try
ASSERT(avg.size() == 3);
ASSERT(avg.capacity() == 3);
ASSERT_MSG(avg.back() == 5, " (got {})", avg.back());
ASSERT(equal(avg, {5, 2, -15}));
ASSERT(equal(avg, {2, -15, 5}));
avg.push_back(0);
ASSERT(avg.size() == 3);
ASSERT(avg.capacity() == 3);
ASSERT(equal(avg, {5, 0, -15}));
ASSERT(equal(avg, {-15, 5, 0}));
ASSERT_MSG(avg.back() == 0, " (got {})", avg.back());
avg.push_back(128);

View File

@@ -16,23 +16,22 @@
* limitations under the License.
*/
#include <nvbench/runner.cuh>
#include <nvbench/benchmark.cuh>
#include <nvbench/callable.cuh>
#include <nvbench/runner.cuh>
#include <nvbench/state.cuh>
#include <nvbench/type_list.cuh>
#include <nvbench/type_strings.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include <algorithm>
#include <variant>
#include <vector>
#include "test_asserts.cuh"
template <typename T>
std::vector<T> sort(std::vector<T> &&vec)
{
@@ -43,13 +42,13 @@ std::vector<T> sort(std::vector<T> &&vec)
void no_op_generator(nvbench::state &state)
{
fmt::memory_buffer params;
fmt::format_to(params, "Params:");
fmt::format_to(std::back_inserter(params), "Params:");
const auto &axis_values = state.get_axis_values();
for (const auto &name : sort(axis_values.get_names()))
{
std::visit(
[&params, &name](const auto &value) {
fmt::format_to(params, " {}: {}", name, value);
fmt::format_to(std::back_inserter(params), " {}: {}", name, value);
},
axis_values.get_value(name));
}
@@ -65,21 +64,16 @@ using misc_types = nvbench::type_list<bool, void>;
using type_axes = nvbench::type_list<float_types, int_types, misc_types>;
template <typename FloatT, typename IntT, typename MiscT>
void template_no_op_generator(nvbench::state &state,
nvbench::type_list<FloatT, IntT, MiscT>)
void template_no_op_generator(nvbench::state &state, nvbench::type_list<FloatT, IntT, MiscT>)
{
ASSERT(nvbench::type_strings<FloatT>::input_string() ==
state.get_string("FloatT"));
ASSERT(nvbench::type_strings<IntT>::input_string() ==
state.get_string("IntT"));
ASSERT(nvbench::type_strings<IntT>::input_string() ==
state.get_string("IntT"));
ASSERT(nvbench::type_strings<FloatT>::input_string() == state.get_string("FloatT"));
ASSERT(nvbench::type_strings<IntT>::input_string() == state.get_string("IntT"));
ASSERT(nvbench::type_strings<IntT>::input_string() == state.get_string("IntT"));
// Enum params using non-templated version:
no_op_generator(state);
}
NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_no_op_generator,
template_no_op_callable);
NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_no_op_generator, template_no_op_callable);
void test_empty()
{
@@ -124,7 +118,7 @@ void test_non_types()
for (const auto &state : bench.get_states())
{
ASSERT(state.is_skipped() == true);
fmt::format_to(buffer, "{}\n", state.get_skip_reason());
fmt::format_to(std::back_inserter(buffer), "{}\n", state.get_skip_reason());
}
const std::string ref = R"expected(Params: Float: 11 Int: 1 String: One
@@ -184,7 +178,7 @@ void test_types()
for (const auto &state : bench.get_states())
{
ASSERT(state.is_skipped() == true);
fmt::format_to(buffer, "{}\n", state.get_skip_reason());
fmt::format_to(std::back_inserter(buffer), "{}\n", state.get_skip_reason());
}
const std::string ref = R"expected(Params: FloatT: F32 IntT: I32 MiscT: bool
@@ -228,7 +222,7 @@ void test_both()
for (const auto &state : bench.get_states())
{
ASSERT(state.is_skipped() == true);
fmt::format_to(buffer, "{}\n", state.get_skip_reason());
fmt::format_to(std::back_inserter(buffer), "{}\n", state.get_skip_reason());
}
const std::string ref =

View File

@@ -16,10 +16,9 @@
* limitations under the License.
*/
#include <nvbench/state.cuh>
#include <nvbench/benchmark.cuh>
#include <nvbench/callable.cuh>
#include <nvbench/state.cuh>
#include <nvbench/summary.cuh>
#include <nvbench/types.cuh>
@@ -43,8 +42,7 @@ struct state_tester : public nvbench::state
void set_param(std::string name, T &&value)
{
this->state::m_axis_values.set_value(std::move(name),
nvbench::named_values::value_type{
std::forward<T>(value)});
nvbench::named_values::value_type{std::forward<T>(value)});
}
};
} // namespace nvbench::detail
@@ -57,9 +55,13 @@ void test_streams()
state_tester state{bench};
// Confirm that the stream hasn't been initialized yet
ASSERT(!state.get_cuda_stream_optional().has_value());
// Test non-owning stream
cudaStream_t default_stream = 0;
state.set_cuda_stream(nvbench::cuda_stream{default_stream, false});
ASSERT(state.get_cuda_stream_optional() == default_stream);
ASSERT(state.get_cuda_stream() == default_stream);
// Test owning stream

View File

@@ -16,17 +16,16 @@
* limitations under the License.
*/
#include <nvbench/detail/state_generator.cuh>
#include <nvbench/axes_metadata.cuh>
#include <nvbench/axis_base.cuh>
#include <nvbench/benchmark.cuh>
#include <nvbench/callable.cuh>
#include "test_asserts.cuh"
#include <nvbench/detail/state_generator.cuh>
#include <fmt/format.h>
#include "test_asserts.cuh"
// Mock up a benchmark for testing:
void dummy_generator(nvbench::state &) {}
NVBENCH_DEFINE_CALLABLE(dummy_generator, dummy_callable);
@@ -37,7 +36,7 @@ using ints = nvbench::type_list<nvbench::int32_t, nvbench::int64_t>;
using misc = nvbench::type_list<void, bool>;
using type_axes = nvbench::type_list<floats, ints, misc>;
template <typename F, typename I, typename M>
void template_generator(nvbench::state &, nvbench::type_list<F, I, M>){};
void template_generator(nvbench::state &, nvbench::type_list<F, I, M>) {};
NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_generator, template_callable);
using template_bench = nvbench::benchmark<template_callable, type_axes>;
@@ -112,17 +111,17 @@ void test_basic()
for (sg.init(); sg.iter_valid(); sg.next())
{
line.clear();
fmt::format_to(line, "| {:^2}", line_num++);
fmt::format_to(std::back_inserter(line), "| {:^2}", line_num++);
for (auto &axis_index : sg.get_current_indices())
{
ASSERT(axis_index.type == nvbench::axis_type::string);
fmt::format_to(line,
fmt::format_to(std::back_inserter(line),
" | {}: {}/{}",
axis_index.name,
axis_index.index,
axis_index.size);
}
fmt::format_to(buffer, "{} |\n", fmt::to_string(line));
fmt::format_to(std::back_inserter(buffer), "{} |\n", fmt::to_string(line));
}
const std::string ref =
@@ -174,23 +173,19 @@ void test_create()
bench.set_devices(std::vector<int>{});
bench.add_float64_axis("Radians", {3.14, 6.28});
bench.add_int64_axis("VecSize", {2, 3, 4}, nvbench::int64_axis_flags::none);
bench.add_int64_axis("NumInputs",
{10, 15, 20},
nvbench::int64_axis_flags::power_of_two);
bench.add_int64_axis("NumInputs", {10, 15, 20}, nvbench::int64_axis_flags::power_of_two);
bench.add_string_axis("Strategy", {"Recursive", "Iterative"});
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
const std::vector<nvbench::state> states = nvbench::detail::state_generator::create(bench);
// 2 (Radians) * 3 (VecSize) * 3 (NumInputs) * 2 (Strategy) = 36
ASSERT(states.size() == 36);
fmt::memory_buffer buffer;
const std::string table_format =
"| {:^5} | {:^10} | {:^7} | {:^7} | {:^9} | {:^9} |\n";
const std::string table_format = "| {:^5} | {:^10} | {:^7} | {:^7} | {:^9} | {:^9} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer), "\n");
fmt::format_to(std::back_inserter(buffer),
table_format,
"State",
"TypeConfig",
@@ -202,7 +197,7 @@ void test_create()
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer),
table_format,
config++,
state.get_type_config_index(),
@@ -264,13 +259,10 @@ void test_create_with_types()
bench.set_type_axes_names({"Floats", "Ints", "Misc"});
bench.add_float64_axis("Radians", {3.14, 6.28});
bench.add_int64_axis("VecSize", {2, 3, 4}, nvbench::int64_axis_flags::none);
bench.add_int64_axis("NumInputs",
{10, 15, 20},
nvbench::int64_axis_flags::power_of_two);
bench.add_int64_axis("NumInputs", {10, 15, 20}, nvbench::int64_axis_flags::power_of_two);
bench.add_string_axis("Strategy", {"Recursive", "Iterative"});
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
const std::vector<nvbench::state> states = nvbench::detail::state_generator::create(bench);
// - 2 (Floats) * 2 (Ints) * 2 (Misc) = 8 total type_configs
// - 2 (Radians) * 3 (VecSize) * 3 (NumInputs) * 2 (Strategy) = 36 non_type
@@ -281,8 +273,8 @@ void test_create_with_types()
std::string table_format = "| {:^5} | {:^10} | {:^6} | {:^4} | {:^4} | {:^7} "
"| {:^7} | {:^9} | {:^9} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer), "\n");
fmt::format_to(std::back_inserter(buffer),
table_format,
"State",
"TypeConfig",
@@ -297,7 +289,7 @@ void test_create_with_types()
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer),
table_format,
config++,
state.get_type_config_index(),
@@ -614,24 +606,21 @@ void test_create_with_masked_types()
bench.set_type_axes_names({"Floats", "Ints", "Misc"});
bench.add_float64_axis("Radians", {3.14, 6.28});
bench.add_int64_axis("VecSize", {2, 3, 4}, nvbench::int64_axis_flags::none);
bench.add_int64_axis("NumInputs",
{10, 15, 20},
nvbench::int64_axis_flags::power_of_two);
bench.add_int64_axis("NumInputs", {10, 15, 20}, nvbench::int64_axis_flags::power_of_two);
bench.add_string_axis("Strategy", {"Recursive", "Iterative"});
// Mask out some types:
bench.get_axes().get_type_axis("Floats").set_active_inputs({"F32"});
bench.get_axes().get_type_axis("Ints").set_active_inputs({"I64"});
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
const std::vector<nvbench::state> states = nvbench::detail::state_generator::create(bench);
fmt::memory_buffer buffer;
std::string table_format = "| {:^5} | {:^10} | {:^6} | {:^4} | {:^4} | {:^7} "
"| {:^7} | {:^9} | {:^9} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer), "\n");
fmt::format_to(std::back_inserter(buffer),
table_format,
"State",
"TypeConfig",
@@ -646,7 +635,7 @@ void test_create_with_masked_types()
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer),
table_format,
config++,
state.get_type_config_index(),
@@ -751,8 +740,7 @@ void test_devices()
bench.add_string_axis("S", {"foo", "bar"});
bench.add_int64_axis("I", {2, 4});
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
const std::vector<nvbench::state> states = nvbench::detail::state_generator::create(bench);
// 3 devices * 4 axis configs = 12 total states
ASSERT(states.size() == 12);
@@ -760,13 +748,13 @@ void test_devices()
fmt::memory_buffer buffer;
const std::string table_format = "| {:^5} | {:^6} | {:^5} | {:^3} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer, table_format, "State", "Device", "S", "I");
fmt::format_to(std::back_inserter(buffer), "\n");
fmt::format_to(std::back_inserter(buffer), table_format, "State", "Device", "S", "I");
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
fmt::format_to(std::back_inserter(buffer),
table_format,
config++,
state.get_device()->get_id(),
@@ -814,8 +802,7 @@ void test_termination_criteria()
bench.set_skip_time(skip_time);
bench.set_timeout(timeout);
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
const std::vector<nvbench::state> states = nvbench::detail::state_generator::create(bench);
ASSERT(states.size() == 1);
ASSERT(min_samples == states[0].get_min_samples());

132
testing/statistics.cu Normal file
View File

@@ -0,0 +1,132 @@
/*
* Copyright 2023 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/detail/statistics.cuh>
#include <nvbench/types.cuh>
#include <algorithm>
#include <vector>
#include "test_asserts.cuh"
namespace statistics = nvbench::detail::statistics;
void test_mean()
{
{
std::vector<nvbench::float64_t> data{1.0, 2.0, 3.0, 4.0, 5.0};
const nvbench::float64_t actual = statistics::compute_mean(std::begin(data), std::end(data));
const nvbench::float64_t expected = 3.0;
ASSERT(std::abs(actual - expected) < 0.001);
}
{
std::vector<nvbench::float64_t> data;
const bool finite = std::isfinite(statistics::compute_mean(std::begin(data), std::end(data)));
ASSERT(!finite);
}
}
void test_std()
{
std::vector<nvbench::float64_t> data{1.0, 2.0, 3.0, 4.0, 5.0};
const nvbench::float64_t mean = 3.0;
const nvbench::float64_t actual =
statistics::standard_deviation(std::begin(data), std::end(data), mean);
const nvbench::float64_t expected = 1.581;
ASSERT(std::abs(actual - expected) < 0.001);
}
void test_lin_regression()
{
{
std::vector<nvbench::float64_t> ys{1.0, 2.0, 3.0, 4.0, 5.0};
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
ASSERT(slope == 1.0);
ASSERT(intercept == 1.0);
}
{
std::vector<nvbench::float64_t> ys{42.0, 42.0, 42.0};
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
ASSERT(slope == 0.0);
ASSERT(intercept == 42.0);
}
{
std::vector<nvbench::float64_t> ys{8.0, 4.0, 0.0};
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
ASSERT(slope == -4.0);
ASSERT(intercept == 8.0);
}
}
void test_r2()
{
{
std::vector<nvbench::float64_t> ys{1.0, 2.0, 3.0, 4.0, 5.0};
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
const nvbench::float64_t actual =
statistics::compute_r2(std::begin(ys), std::end(ys), slope, intercept);
const nvbench::float64_t expected = 1.0;
ASSERT(std::abs(actual - expected) < 0.001);
}
{
std::vector<nvbench::float64_t> signal{1.0, 2.0, 3.0, 4.0, 5.0};
std::vector<nvbench::float64_t> noise{-1.0, 1.0, -1.0, 1.0, -1.0};
std::vector<nvbench::float64_t> ys(signal.size());
std::transform(std::begin(signal),
std::end(signal),
std::begin(noise),
std::begin(ys),
std::plus<nvbench::float64_t>());
auto [slope, intercept] = statistics::compute_linear_regression(std::begin(ys), std::end(ys));
const nvbench::float64_t expected = 0.675;
const nvbench::float64_t actual =
statistics::compute_r2(std::begin(ys), std::end(ys), slope, intercept);
ASSERT(std::abs(actual - expected) < 0.001);
}
}
void test_slope_conversion()
{
{
const nvbench::float64_t actual = statistics::slope2deg(0.0);
const nvbench::float64_t expected = 0.0;
ASSERT(std::abs(actual - expected) < 0.001);
}
{
const nvbench::float64_t actual = statistics::slope2deg(1.0);
const nvbench::float64_t expected = 45.0;
ASSERT(std::abs(actual - expected) < 0.001);
}
{
const nvbench::float64_t actual = statistics::slope2deg(5.0);
const nvbench::float64_t expected = 78.69;
ASSERT(std::abs(actual - expected) < 0.001);
}
}
int main()
{
test_mean();
test_std();
test_lin_regression();
test_r2();
test_slope_conversion();
}

View File

@@ -0,0 +1,85 @@
/*
* Copyright 2023 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/detail/stdrel_criterion.cuh>
#include <nvbench/stopping_criterion.cuh>
#include <nvbench/types.cuh>
#include <algorithm>
#include <numeric>
#include <random>
#include <vector>
#include "test_asserts.cuh"
void test_const()
{
nvbench::criterion_params params;
nvbench::detail::stdrel_criterion criterion;
criterion.initialize(params);
for (int i = 0; i < 5; i++)
{ // nvbench wants at least 5 to compute the standard deviation
criterion.add_measurement(42.0);
}
ASSERT(criterion.is_finished());
}
std::vector<double> generate(double mean, double rel_std_dev, int size)
{
static std::mt19937::result_type seed = 0;
std::mt19937 gen(seed++);
std::vector<nvbench::float64_t> v(static_cast<std::size_t>(size));
std::normal_distribution<nvbench::float64_t> dist(mean, mean * rel_std_dev);
std::generate(v.begin(), v.end(), [&] { return dist(gen); });
return v;
}
void test_stdrel()
{
const nvbench::int64_t size = 10;
const nvbench::float64_t mean = 42.0;
const nvbench::float64_t max_noise = 0.1;
nvbench::criterion_params params;
params.set_float64("max-noise", max_noise);
nvbench::detail::stdrel_criterion criterion;
criterion.initialize(params);
for (nvbench::float64_t measurement : generate(mean, max_noise / 2, size))
{
criterion.add_measurement(measurement);
}
ASSERT(criterion.is_finished());
params.set_float64("max-noise", max_noise);
criterion.initialize(params);
for (nvbench::float64_t measurement : generate(mean, max_noise * 2, size))
{
criterion.add_measurement(measurement);
}
ASSERT(!criterion.is_finished());
}
int main()
{
test_const();
test_stdrel();
}

View File

@@ -32,8 +32,7 @@ void test_empty()
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::string_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::string_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Empty");
@@ -61,8 +60,7 @@ void test_basic()
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::string_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::string_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Basic");

View File

@@ -23,55 +23,54 @@
#include <cstdio>
#include <stdexcept>
#define ASSERT(cond) \
do \
{ \
if (cond) \
{} \
else \
{ \
fmt::print("{}:{}: Assertion failed ({}).\n", __FILE__, __LINE__, #cond); \
std::fflush(stdout); \
throw std::runtime_error("Unit test failure."); \
} \
#define ASSERT(cond) \
do \
{ \
if (cond) \
{ \
} \
else \
{ \
fmt::print("{}:{}: Assertion failed ({}).\n", __FILE__, __LINE__, #cond); \
std::fflush(stdout); \
throw std::runtime_error("Unit test failure."); \
} \
} while (false)
#define ASSERT_MSG(cond, fmtstr, ...) \
do \
{ \
if (cond) \
{} \
else \
{ \
fmt::print("{}:{}: Test assertion failed ({}) {}\n", \
__FILE__, \
__LINE__, \
#cond, \
fmt::format(fmtstr, __VA_ARGS__)); \
std::fflush(stdout); \
throw std::runtime_error("Unit test failure."); \
} \
#define ASSERT_MSG(cond, fmtstr, ...) \
do \
{ \
if (cond) \
{ \
} \
else \
{ \
fmt::print("{}:{}: Test assertion failed ({}) {}\n", \
__FILE__, \
__LINE__, \
#cond, \
fmt::format(fmtstr, __VA_ARGS__)); \
std::fflush(stdout); \
throw std::runtime_error("Unit test failure."); \
} \
} while (false)
#define ASSERT_THROWS_ANY(expr) \
do \
{ \
bool threw = false; \
try \
{ \
expr; \
} \
catch (...) \
{ \
threw = true; \
} \
if (!threw) \
{ \
fmt::print("{}:{}: Expression expected exception: '{}'.", \
__FILE__, \
__LINE__, \
#expr); \
std::fflush(stdout); \
throw std::runtime_error("Unit test failure."); \
} \
#define ASSERT_THROWS_ANY(expr) \
do \
{ \
bool threw = false; \
try \
{ \
expr; \
} \
catch (...) \
{ \
threw = true; \
} \
if (!threw) \
{ \
fmt::print("{}:{}: Expression expected exception: '{}'.", __FILE__, __LINE__, #expr); \
std::fflush(stdout); \
throw std::runtime_error("Unit test failure."); \
} \
} while (false)

View File

@@ -17,13 +17,12 @@
*/
#include <nvbench/type_axis.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include "test_asserts.cuh"
void test_empty()
{
nvbench::type_axis axis("Basic", 0);
@@ -39,8 +38,7 @@ void test_empty()
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::type_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::type_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Basic");
@@ -63,8 +61,7 @@ void test_single()
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone =
dynamic_cast<nvbench::type_axis *>(clone_base.get());
auto *clone = dynamic_cast<nvbench::type_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Single");
@@ -102,8 +99,7 @@ void test_single()
void test_several()
{
nvbench::type_axis axis("Several", 0);
axis.set_inputs<
nvbench::type_list<nvbench::int32_t, nvbench::float64_t, bool>>();
axis.set_inputs<nvbench::type_list<nvbench::int32_t, nvbench::float64_t, bool>>();
ASSERT(axis.get_name() == "Several");
ASSERT(axis.get_size() == 3);
@@ -122,8 +118,7 @@ void test_several()
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone =
dynamic_cast<nvbench::type_axis *>(clone_base.get());
auto *clone = dynamic_cast<nvbench::type_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Several");
@@ -177,9 +172,8 @@ void test_several()
void test_get_type_index()
{
nvbench::type_axis axis("GetIndexTest", 0);
axis.set_inputs<
nvbench::
type_list<nvbench::int8_t, nvbench::uint16_t, nvbench::float32_t, bool>>();
axis
.set_inputs<nvbench::type_list<nvbench::int8_t, nvbench::uint16_t, nvbench::float32_t, bool>>();
ASSERT(axis.get_type_index("I8") == 0);
ASSERT(axis.get_type_index("U16") == 1);
@@ -188,8 +182,7 @@ void test_get_type_index()
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::type_axis *>(clone_base.get());
const auto *clone = dynamic_cast<const nvbench::type_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_type_index("I8") == 0);

View File

@@ -17,11 +17,8 @@
*/
#include <nvbench/type_list.cuh>
#include <nvbench/type_strings.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
#include <fmt/ranges.h>
@@ -30,6 +27,8 @@
#include <type_traits>
#include <vector>
#include "test_asserts.cuh"
// Unique, numbered types for testing type_list functionality.
using T0 = std::integral_constant<std::size_t, 0>;
using T1 = std::integral_constant<std::size_t, 1>;
@@ -80,14 +79,13 @@ struct test_concat
struct empty_tests
{
static_assert(
std::is_same_v<nvbench::tl::concat<TLEmpty, TLEmpty>, TLEmpty>);
static_assert(std::is_same_v<nvbench::tl::concat<TLEmpty, TLEmpty>, TLEmpty>);
static_assert(std::is_same_v<nvbench::tl::concat<TLEmpty, TL012>, TL012>);
static_assert(std::is_same_v<nvbench::tl::concat<TL012, TLEmpty>, TL012>);
};
static_assert(std::is_same_v<nvbench::tl::concat<TL012, TL765>,
nvbench::type_list<T0, T1, T2, T7, T6, T5>>);
static_assert(
std::is_same_v<nvbench::tl::concat<TL012, TL765>, nvbench::type_list<T0, T1, T2, T7, T6, T5>>);
};
struct test_prepend_each
@@ -97,8 +95,7 @@ struct test_prepend_each
using T23 = nvbench::type_list<T2, T3>;
using TLs = nvbench::type_list<T01, T23>;
using Expected = nvbench::type_list<nvbench::type_list<T, T0, T1>,
nvbench::type_list<T, T2, T3>>;
using Expected = nvbench::type_list<nvbench::type_list<T, T0, T1>, nvbench::type_list<T, T2, T3>>;
static_assert(std::is_same_v<nvbench::tl::prepend_each<T, TLs>, Expected>);
};
@@ -110,16 +107,12 @@ struct test_empty_cartesian_product
struct test_single_cartesian_product
{
using prod_1 =
nvbench::tl::cartesian_product<nvbench::type_list<nvbench::type_list<T0>>>;
static_assert(
std::is_same_v<prod_1, nvbench::type_list<nvbench::type_list<T0>>>);
using prod_1 = nvbench::tl::cartesian_product<nvbench::type_list<nvbench::type_list<T0>>>;
static_assert(std::is_same_v<prod_1, nvbench::type_list<nvbench::type_list<T0>>>);
using prod_2 = nvbench::tl::cartesian_product<
nvbench::type_list<nvbench::type_list<T0, T1>>>;
static_assert(std::is_same_v<prod_2,
nvbench::type_list<nvbench::type_list<T0>,
nvbench::type_list<T1>>>);
using prod_2 = nvbench::tl::cartesian_product<nvbench::type_list<nvbench::type_list<T0, T1>>>;
static_assert(
std::is_same_v<prod_2, nvbench::type_list<nvbench::type_list<T0>, nvbench::type_list<T1>>>);
};
struct test_cartesian_product