HEBench
hebench_eltwiseadd_l.cpp
Go to the documentation of this file.
1 
2 // Copyright (C) 2021 Intel Corporation
3 // SPDX-License-Identifier: Apache-2.0
4 
5 #include <bitset>
6 #include <cassert>
7 #include <cstring>
8 #include <iomanip>
9 #include <iostream>
10 #include <sstream>
11 #include <stdexcept>
12 #include <utility>
13 
14 #include "hebench/modules/timer/include/timer.h"
15 
16 #include "hebench/api_bridge/api.h"
17 #include "hebench/modules/general/include/hebench_math_utils.h"
18 #include "include/hebench_engine.h"
19 
20 #include "../include/hebench_eltwiseadd_l.h"
21 
22 namespace hebench {
23 namespace TestHarness {
24 namespace EltwiseAdd {
25 namespace Latency {
26 
27 //----------------------------
28 // class BenchmarkDescription
29 //----------------------------
30 
31 bool BenchmarkDescriptor::m_b_registered = // register the benchmark with the factory
32  hebench::TestHarness::BenchmarkFactory::registerSupportedBenchmark(std::make_shared<BenchmarkDescriptor>());
33 
35  const std::vector<hebench::APIBridge::WorkloadParam> &w_params) const
36 {
37  assert(m_b_registered);
38 
39  // return true if benchmark is supported
40 
41  bool retval =
44 
45  return retval;
46 }
47 
49  const Engine &engine,
50  const BenchmarkDescription::Backend &backend_desc,
51  const BenchmarkDescription::Configuration &config) const
52 {
53  // finish describing workload
54  assert(OpParameterCount == 2);
55  assert(DefaultBatchSize == 1);
56 
57  BenchmarkDescriptorCategory::completeWorkloadDescription(output, engine, backend_desc, config);
58 
59  assert(OpParameterCount == output.operation_params_count);
60 
61  // finish benchmark header description
62 
63  std::stringstream ss;
64  std::uint64_t batch_sizes[OpParameterCount];
65  std::uint64_t vector_size = fetchVectorSize(config.w_params);
66 
67  std::uint64_t result_batch_size = 1;
68  for (std::size_t param_i = 0; param_i < OpParameterCount; ++param_i)
69  {
70  batch_sizes[param_i] = DefaultBatchSize;
71  result_batch_size *= batch_sizes[param_i];
72  } // end for
73  // complete header with workload specifics
74  ss << ", , C = V0 + V1" << std::endl
75  << ", , , Elements, Batch size" << std::endl;
76  for (std::size_t i = 0; i < OpParameterCount; ++i)
77  {
78  ss << ", , V" << i << ", " << vector_size << ", " << batch_sizes[i] << std::endl;
79  } // end for
80  ss << ", , C, " << vector_size << ", " << result_batch_size << std::endl;
81 
82  output.workload_header = ss.str();
83 }
84 
86  const DescriptionToken &description_token)
87 {
88  assert(m_b_registered);
89  Benchmark *retval = nullptr;
90 
91  try
92  {
93  retval = new Benchmark(p_engine, description_token);
94  }
95  catch (...)
96  {
97  if (retval)
98  delete retval;
99  throw;
100  }
101 
102  return retval;
103 }
104 
106 {
107  assert(m_b_registered);
108  if (p_bench)
109  delete p_bench;
110 }
111 
112 //-----------------
113 // class Benchmark
114 //-----------------
115 
116 Benchmark::Benchmark(std::shared_ptr<Engine> p_engine,
117  const IBenchmarkDescriptor::DescriptionToken &description_token) :
118  BenchmarkLatency(p_engine, description_token)
119 {
120 }
121 
123 {
124  hebench::Common::EventTimer timer;
125  hebench::Common::TimingReportEvent::Ptr p_timing_event;
126  std::uint64_t vector_size;
127  std::uint64_t batch_sizes[BenchmarkDescriptor::OpParameterCount];
128  std::stringstream ss;
129 
130  vector_size = BenchmarkDescriptor::fetchVectorSize(this->getBenchmarkConfiguration().w_params);
131  for (std::size_t param_i = 0; param_i < BenchmarkDescriptor::OpParameterCount; ++param_i)
132  batch_sizes[param_i] = BenchmarkDescriptor::DefaultBatchSize;
133 
134  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log("Preparing workload.") << std::endl;
135 
136  timer.start();
137  if (this->getBenchmarkConfiguration().dataset_filename.empty())
138  {
139  // generates random vectors for input and generates (computes) ground truth
140  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log("Generating data...") << std::endl;
141  m_data = DataLoader::create(vector_size,
142  batch_sizes[0], batch_sizes[1],
143  this->getBackendDescription().descriptor.data_type);
144  } // end if
145  else
146  {
147  std::stringstream ss;
148  ss << "Loading data from external dataset: " << std::endl
149  << "\"" << this->getBenchmarkConfiguration().dataset_filename << "\"";
150  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log(ss.str()) << std::endl;
151  // load vectors for input and ground truth from file
152  m_data = DataLoader::create(vector_size,
153  batch_sizes[0], batch_sizes[1],
154  this->getBackendDescription().descriptor.data_type,
155  this->getBenchmarkConfiguration().dataset_filename);
156  } // end else
157  p_timing_event = timer.stop<std::milli>();
158 
159  ss = std::stringstream();
160  ss << "Total data loaded: " << m_data->getTotalDataLoaded() << " bytes";
161  std::cout << IOS_MSG_DONE << hebench::Logging::GlobalLogger::log(ss.str()) << std::endl;
162  ss = std::stringstream();
163  ss << "Elapsed wall time: " << p_timing_event->elapsedWallTime<std::milli>() << " ms";
164  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log(ss.str()) << std::endl;
165  ss = std::stringstream();
166  ss << "Elapsed CPU time: " << p_timing_event->elapsedCPUTime<std::milli>() << " ms";
167  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log(ss.str()) << std::endl;
168 }
169 
171  const std::uint64_t *param_data_pack_indices,
172  const std::vector<hebench::APIBridge::NativeDataBuffer *> &outputs,
173  hebench::APIBridge::DataType data_type) const
174 {
175  assert(dataset->getParameterCount() == BenchmarkDescriptorCategory::OpParameterCount
176  && dataset->getResultCount() == BenchmarkDescriptorCategory::OpResultCount);
177 
178  return BenchmarkLatency::validateResult(dataset, param_data_pack_indices, outputs, data_type);
179 }
180 
181 } // namespace Latency
182 } // namespace EltwiseAdd
183 } // namespace TestHarness
184 } // namespace hebench
std::vector< hebench::APIBridge::WorkloadParam > w_params
Set of arguments for workload parameters.
std::string dataset_filename
File containing data for the benchmark. If empty string, benchmarks that can auto generate the datase...
static bool registerSupportedBenchmark(std::shared_ptr< IBenchmarkDescriptor > p_desc_obj)
Registers a benchmark description object that represents one of the supported workloads.
Base class for workload benchmarks in the latency category.
void completeWorkloadDescription(WorkloadDescriptionOutput &output, const Engine &engine, const BenchmarkDescription::Backend &backend_desc, const BenchmarkDescription::Configuration &config) const override
Completes the description for the matched benchmark.
static std::uint64_t fetchVectorSize(const std::vector< hebench::APIBridge::WorkloadParam > &w_params)
bool matchBenchmarkDescriptor(const hebench::APIBridge::BenchmarkDescriptor &bench_desc, const std::vector< hebench::APIBridge::WorkloadParam > &w_params) const override
Determines if the represented benchmark can perform the workload described by a specified HEBench ben...
static DataLoader::Ptr create(std::uint64_t vector_size, std::uint64_t batch_size_a, std::uint64_t batch_size_b, hebench::APIBridge::DataType data_type)
void destroyBenchmark(hebench::TestHarness::PartialBenchmark *p_bench) override
Destroys an object returned by createBenchmark().
bool matchBenchmarkDescriptor(const hebench::APIBridge::BenchmarkDescriptor &bench_desc, const std::vector< hebench::APIBridge::WorkloadParam > &w_params) const override
Determines if the represented benchmark can perform the workload described by a specified HEBench ben...
hebench::TestHarness::PartialBenchmark * createBenchmark(std::shared_ptr< Engine > p_engine, const DescriptionToken &description_token) override
Creates the represented IBenchmark object that can perform the workload specified by the HEBench benc...
void completeWorkloadDescription(WorkloadDescriptionOutput &output, const Engine &engine, const BenchmarkDescription::Backend &backend_desc, const BenchmarkDescription::Configuration &config) const override
Completes the description for the matched benchmark.
bool validateResult(IDataLoader::Ptr dataset, const std::uint64_t *param_data_pack_indices, const std::vector< hebench::APIBridge::NativeDataBuffer * > &p_outputs, hebench::APIBridge::DataType data_type) const override
Validates the result of an operation against the ground truth.
void init() override
Initializes the partial benchmark members.
Token returned by a successful call to IBenchmarkDescriptor::matchBenchmarkDescriptor().
std::shared_ptr< IDataLoader > Ptr
virtual bool validateResult(IDataLoader::Ptr dataset, const std::uint64_t *param_data_pack_indices, const std::vector< hebench::APIBridge::NativeDataBuffer * > &outputs, hebench::APIBridge::DataType data_type) const
Validates the result of an operation against the ground truth.
std::string workload_header
Workload specific information to be added to the report header.
std::size_t operation_params_count
Number of parameters for the represented workload operation.
Bundles values that need to be filled by a workload during completeWorkloadDescription().
const BenchmarkDescription::Backend & getBackendDescription() const
Allows read-only access to this benchmark backend description.
const BenchmarkDescription::Configuration & getBenchmarkConfiguration() const
Allows read-only access to this benchmark configuration.
#define IOS_MSG_DONE
#define IOS_MSG_INFO
DataType
Defines data types for a workload.
Definition: types.h:379
Category category
Category for the benchmark.
Definition: types.h:531
Defines a benchmark test.
Definition: types.h:527