Add initial nvbench::benchmark.

It's basically just a container for the various axis classes at this
point.
This commit is contained in:
Allison Vacanti
2020-12-24 17:33:03 -05:00
parent 691ed2c18d
commit 40f92b4705
5 changed files with 307 additions and 8 deletions

111
nvbench/benchmark.cuh Normal file
View File

@@ -0,0 +1,111 @@
#pragma once
#include <nvbench/float64_axis.cuh>
#include <nvbench/int64_axis.cuh>
#include <nvbench/string_axis.cuh>
#include <nvbench/type_axis.cuh>
#include <memory>
#include <stdexcept>
#include <vector>
namespace nvbench
{
template <typename Kernel, typename TypeAxes>
struct benchmark
{
using kernel_type = Kernel;
using type_axes = TypeAxes;
using type_configs = nvbench::tl::cartesian_product<type_axes>;
static constexpr std::size_t num_type_configs =
nvbench::tl::size<type_configs>{};
void set_name(std::string name) { m_name = std::move(name); }
const std::string &get_name() const { return m_name; }
// Convenience API for a single type_axis.
benchmark &set_type_axes_name(std::string name)
{
return this->set_type_axes_names({std::move(name)});
}
benchmark &set_type_axes_names(std::vector<std::string> names);
benchmark &add_float64_axis(std::string name,
std::vector<nvbench::float64_t> data)
{
auto axis = std::make_unique<nvbench::float64_axis>(std::move(name));
axis->set_inputs(std::move(data));
m_float64_axes.push_back(std::move(axis));
return *this;
}
benchmark &add_int64_axis(
std::string name,
std::vector<nvbench::int64_t> data,
nvbench::int64_axis_flags flags = nvbench::int64_axis_flags::none)
{
auto axis = std::make_unique<nvbench::int64_axis>(std::move(name), flags);
axis->set_inputs(std::move(data));
m_int64_axes.push_back(std::move(axis));
return *this;
}
benchmark &add_int64_power_of_two_axis(std::string name,
std::vector<nvbench::int64_t> data)
{
return this->add_int64_axis(std::move(name),
std::move(data),
nvbench::int64_axis_flags::power_of_two);
}
benchmark &add_string_axis(std::string name, std::vector<std::string> data)
{
auto axis = std::make_unique<nvbench::string_axis>(std::move(name));
axis->set_inputs(std::move(data));
m_string_axes.push_back(std::move(axis));
return *this;
}
[[nodiscard]] const auto &get_float64_axes() const { return m_float64_axes; }
[[nodiscard]] const auto &get_int64_axes() const { return m_int64_axes; }
[[nodiscard]] const auto &get_string_axes() const { return m_string_axes; }
[[nodiscard]] const auto &get_type_axes() const { return m_type_axes; }
private:
std::string m_name;
std::vector<std::unique_ptr<nvbench::float64_axis>> m_float64_axes;
std::vector<std::unique_ptr<nvbench::int64_axis>> m_int64_axes;
std::vector<std::unique_ptr<nvbench::string_axis>> m_string_axes;
std::vector<std::unique_ptr<nvbench::type_axis>> m_type_axes;
};
template <typename Kernel, typename TypeAxes>
benchmark<Kernel, TypeAxes> &
benchmark<Kernel, TypeAxes>::set_type_axes_names(std::vector<std::string> names)
{
if (names.size() != nvbench::tl::size<type_axes>::value)
{ // TODO Find a way to get a better error message w/o bringing fmt
// into this header.
throw std::runtime_error("set_type_axes_names(): len(names) != "
"len(type_axes)");
}
auto names_iter = names.begin(); // contents will be moved from
nvbench::tl::foreach<type_axes>([&axes = m_type_axes, &names_iter](
[[maybe_unused]] auto wrapped_type) {
// Note:
// The word "type" appears 6 times in the next line.
// Every. Single. Token.
// Take a moment to just appreciate this beautiful language:
typedef typename decltype(wrapped_type)::type type_list;
auto axis = std::make_unique<nvbench::type_axis>(std::move(*names_iter++));
axis->set_inputs<type_list>();
axes.push_back(std::move(axis));
});
return *this;
}
} // namespace nvbench

View File

@@ -11,16 +11,16 @@ namespace nvbench
int64_axis::~int64_axis() = default;
void int64_axis::set_inputs(const std::vector<int64_t> &inputs)
void int64_axis::set_inputs(std::vector<int64_t> inputs)
{
m_inputs = inputs;
m_inputs = std::move(inputs);
if (!this->is_power_of_two())
{
m_values = inputs;
m_values = m_inputs;
}
else
{
m_values.resize(inputs.size());
m_values.resize(m_inputs.size());
auto conv = [](int64_t in) -> int64_t {
if (in < 0 || in >= 64)
@@ -35,13 +35,15 @@ void int64_axis::set_inputs(const std::vector<int64_t> &inputs)
return 1ll << in;
};
std::transform(inputs.cbegin(), inputs.cend(), m_values.begin(), conv);
std::transform(m_inputs.cbegin(), m_inputs.cend(), m_values.begin(), conv);
}
}
std::string int64_axis::do_get_input_string(std::size_t i) const
{
return fmt::to_string(m_inputs[i]);
}
std::string int64_axis::do_get_description(std::size_t i) const
{
return this->is_power_of_two()

View File

@@ -40,16 +40,16 @@ struct int64_axis final : public axis_base
return static_cast<bool>(m_flags & int64_axis_flags::power_of_two);
}
void set_inputs(const std::vector<int64_t> &inputs);
void set_inputs(std::vector<int64_t> inputs);
[[nodiscard]] const std::vector<int64_t> &get_inputs() const
{
return m_inputs;
};
[[nodiscard]] const std::vector<int64_t> &get_values() const
[[nodiscard]] int64_t get_value(std::size_t i) const
{
return m_values;
return m_values[i];
};
private:

View File

@@ -1,4 +1,5 @@
set(test_srcs
benchmark.cu
int64_axis.cu
float64_axis.cu
params.cu

185
testing/benchmark.cu Normal file
View File

@@ -0,0 +1,185 @@
#include <nvbench/benchmark.cuh>
#include <nvbench/type_list.cuh>
#include <nvbench/type_strings.cuh>
#include <nvbench/types.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
struct dummy_kernel;
using int_list = nvbench::type_list<nvbench::int8_t,
nvbench::int16_t,
nvbench::int32_t,
nvbench::int64_t>;
using float_list = nvbench::type_list<nvbench::float32_t, nvbench::float64_t>;
using misc_list = nvbench::type_list<bool, void>;
using lots_of_types_bench =
nvbench::benchmark<dummy_kernel,
nvbench::type_list<int_list, float_list, misc_list>>;
using no_types = nvbench::type_list<>;
using no_types_bench = nvbench::benchmark<dummy_kernel, no_types>;
void test_type_axes()
{
lots_of_types_bench bench;
bench.set_type_axes_names({"Integer", "Float", "Other"});
fmt::memory_buffer buffer;
const auto &axes = bench.get_type_axes();
for (const auto &axis : axes)
{
fmt::format_to(buffer, "Axis: {}\n", axis->get_name());
const auto num_values = axis->get_size();
for (std::size_t i = 0; i < num_values; ++i)
{
auto input_string = axis->get_input_string(i);
auto description = axis->get_description(i);
fmt::format_to(buffer,
" - {}{}\n",
input_string,
description.empty() ? ""
: fmt::format(" ({})", description));
}
}
const std::string ref =
R"expected(Axis: Integer
- I8 (int8_t)
- I16 (int16_t)
- I32 (int32_t)
- I64 (int64_t)
Axis: Float
- F32 (float)
- F64 (double)
Axis: Other
- bool
- void
)expected";
const std::string test = fmt::to_string(buffer);
ASSERT_MSG(test == ref,
fmt::format("Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test));
}
void test_type_configs()
{
lots_of_types_bench bench;
bench.set_type_axes_names({"Integer", "Float", "Other"});
ASSERT(bench.num_type_configs == 16);
std::size_t idx = 0;
fmt::memory_buffer buffer;
nvbench::tl::foreach<lots_of_types_bench::type_configs>(
[&idx, &buffer]([[maybe_unused]] auto type_wrapper) {
using Conf = typename decltype(type_wrapper)::type;
using Integer = nvbench::tl::get<0, Conf>;
using Float = nvbench::tl::get<1, Conf>;
using Other = nvbench::tl::get<2, Conf>;
fmt::format_to(buffer,
"type_configs[{:2d}] = <{:>3}, {:>3}, {:>4}>\n",
idx++,
nvbench::type_strings<Integer>::input_string(),
nvbench::type_strings<Float>::input_string(),
nvbench::type_strings<Other>::input_string());
});
const std::string ref = R"type_config_ref(type_configs[ 0] = < I8, F32, bool>
type_configs[ 1] = < I8, F32, void>
type_configs[ 2] = < I8, F64, bool>
type_configs[ 3] = < I8, F64, void>
type_configs[ 4] = <I16, F32, bool>
type_configs[ 5] = <I16, F32, void>
type_configs[ 6] = <I16, F64, bool>
type_configs[ 7] = <I16, F64, void>
type_configs[ 8] = <I32, F32, bool>
type_configs[ 9] = <I32, F32, void>
type_configs[10] = <I32, F64, bool>
type_configs[11] = <I32, F64, void>
type_configs[12] = <I64, F32, bool>
type_configs[13] = <I64, F32, void>
type_configs[14] = <I64, F64, bool>
type_configs[15] = <I64, F64, void>
)type_config_ref";
const std::string test = fmt::to_string(buffer);
ASSERT_MSG(test == ref,
fmt::format("Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test));
}
void test_float64_axes()
{
no_types_bench bench;
bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.});
ASSERT(bench.get_float64_axes().size() == 1);
ASSERT(bench.get_float64_axes()[0] != nullptr);
const auto &axis = *bench.get_float64_axes()[0];
ASSERT(axis.get_size() == 5);
ASSERT(axis.get_value(0) == 0.);
ASSERT(axis.get_value(1) == .1);
ASSERT(axis.get_value(2) == .25);
ASSERT(axis.get_value(3) == .5);
ASSERT(axis.get_value(4) == 1.);
}
void test_int64_axes()
{
no_types_bench bench;
bench.add_int64_axis("I64 Axis", {10, 11, 12, 13, 14});
ASSERT(bench.get_int64_axes().size() == 1);
ASSERT(bench.get_int64_axes()[0] != nullptr);
const auto &axis = *bench.get_int64_axes()[0];
ASSERT(axis.get_size() == 5);
ASSERT(axis.get_value(0) == 10);
ASSERT(axis.get_value(1) == 11);
ASSERT(axis.get_value(2) == 12);
ASSERT(axis.get_value(3) == 13);
ASSERT(axis.get_value(4) == 14);
}
void test_int64_power_of_two_axes()
{
no_types_bench bench;
bench.add_int64_power_of_two_axis("I64 POT Axis", {1, 2, 3, 4, 5});
ASSERT(bench.get_int64_axes().size() == 1);
ASSERT(bench.get_int64_axes()[0] != nullptr);
const auto &axis = *bench.get_int64_axes()[0];
ASSERT(axis.get_size() == 5);
ASSERT(axis.get_value(0) == 2);
ASSERT(axis.get_value(1) == 4);
ASSERT(axis.get_value(2) == 8);
ASSERT(axis.get_value(3) == 16);
ASSERT(axis.get_value(4) == 32);
}
void test_string_axes()
{
no_types_bench bench;
bench.add_string_axis("Strings", {"string a", "string b", "string c"});
ASSERT(bench.get_string_axes().size() == 1);
ASSERT(bench.get_string_axes()[0] != nullptr);
const auto &axis = *bench.get_string_axes()[0];
ASSERT(axis.get_size() == 3);
ASSERT(axis.get_value(0) == "string a");
ASSERT(axis.get_value(1) == "string b");
ASSERT(axis.get_value(2) == "string c");
}
int main()
{
test_type_axes();
test_type_configs();
test_float64_axes();
test_int64_axes();
test_int64_power_of_two_axes();
test_string_axes();
}