HEBench
hebench_eltwisemult_l.cpp
Go to the documentation of this file.
1 
2 // Copyright (C) 2021 Intel Corporation
3 // SPDX-License-Identifier: Apache-2.0
4 
5 #include <bitset>
6 #include <cassert>
7 #include <cstring>
8 #include <iomanip>
9 #include <iostream>
10 #include <sstream>
11 #include <stdexcept>
12 #include <utility>
13 
14 #include "hebench/modules/timer/include/timer.h"
15 
16 #include "hebench/api_bridge/api.h"
17 #include "hebench/modules/general/include/hebench_math_utils.h"
18 #include "include/hebench_engine.h"
19 
20 #include "../include/hebench_eltwisemult_l.h"
21 
22 namespace hebench {
23 namespace TestHarness {
24 namespace EltwiseMult {
25 namespace Latency {
26 
27 //----------------------------
28 // class BenchmarkDescription
29 //----------------------------
30 
31 bool BenchmarkDescriptor::m_b_registered = // register the benchmark with the factory
32  hebench::TestHarness::BenchmarkFactory::registerSupportedBenchmark(std::make_shared<BenchmarkDescriptor>());
33 
35  const std::vector<hebench::APIBridge::WorkloadParam> &w_params) const
36 {
37  assert(m_b_registered);
38 
39  // return true if benchmark is supported
40 
41  bool retval =
44 
45  return retval;
46 }
47 
49  const Engine &engine,
50  const BenchmarkDescription::Backend &backend_desc,
51  const BenchmarkDescription::Configuration &config) const
52 {
53  // finish describing workload
54  assert(OpParameterCount == 2);
55  assert(DefaultBatchSize == 1);
56 
57  BenchmarkDescriptorCategory::completeWorkloadDescription(output, engine, backend_desc, config);
58 
59  assert(OpParameterCount == output.operation_params_count);
60 
61  // finish benchmark header description
62 
63  std::stringstream ss;
64  std::uint64_t batch_sizes[OpParameterCount];
65  std::uint64_t vector_size = fetchVectorSize(config.w_params);
66 
67  ss = std::stringstream();
68 
69  std::uint64_t result_batch_size = 1;
70  for (std::size_t param_i = 0; param_i < OpParameterCount; ++param_i)
71  {
72  batch_sizes[param_i] = DefaultBatchSize;
73  result_batch_size *= batch_sizes[param_i];
74  } // end for
75  // complete header with workload specifics
76  ss << ", , C[i] = V0[i] * V1[i]" << std::endl
77  << ", , , Elements, Batch size" << std::endl;
78  for (std::size_t i = 0; i < OpParameterCount; ++i)
79  {
80  ss << ", , V" << i << ", " << vector_size << ", " << batch_sizes[i] << std::endl;
81  } // end for
82  ss << ", , C, " << vector_size << ", " << result_batch_size << std::endl;
83 
84  output.workload_header = ss.str();
85 }
86 
88  const DescriptionToken &description_token)
89 {
90  assert(m_b_registered);
91  Benchmark *retval = nullptr;
92 
93  try
94  {
95  retval = new Benchmark(p_engine, description_token);
96  }
97  catch (...)
98  {
99  if (retval)
100  delete retval;
101  throw;
102  }
103 
104  return retval;
105 }
106 
108 {
109  assert(m_b_registered);
110  if (p_bench)
111  delete p_bench;
112 }
113 
114 //-----------------
115 // class Benchmark
116 //-----------------
117 
118 Benchmark::Benchmark(std::shared_ptr<Engine> p_engine,
119  const IBenchmarkDescriptor::DescriptionToken &description_token) :
120  BenchmarkLatency(p_engine, description_token)
121 {
122 }
123 
125 {
126  hebench::Common::EventTimer timer;
127  hebench::Common::TimingReportEvent::Ptr p_timing_event;
128  std::uint64_t vector_size;
129  std::uint64_t batch_sizes[BenchmarkDescriptor::OpParameterCount];
130  std::stringstream ss;
131 
132  vector_size = BenchmarkDescriptor::fetchVectorSize(this->getBenchmarkConfiguration().w_params);
133  for (std::size_t param_i = 0; param_i < BenchmarkDescriptor::OpParameterCount; ++param_i)
134  batch_sizes[param_i] = BenchmarkDescriptor::DefaultBatchSize;
135 
136  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log("Preparing workload.") << std::endl;
137 
138  timer.start();
139  if (this->getBenchmarkConfiguration().dataset_filename.empty())
140  {
141  // generates random vectors for input and generates (computes) ground truth
142  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log("Generating data...") << std::endl;
143  m_data = DataLoader::create(vector_size,
144  batch_sizes[0], batch_sizes[1],
145  this->getBackendDescription().descriptor.data_type);
146  } // end if
147  else
148  {
149  std::stringstream ss;
150  ss << "Loading data from external dataset: " << std::endl
151  << "\"" << this->getBenchmarkConfiguration().dataset_filename << "\"";
152  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log(ss.str()) << std::endl;
153  // load vectors for input and ground truth from file
154  m_data = DataLoader::create(vector_size,
155  batch_sizes[0], batch_sizes[1],
156  this->getBackendDescription().descriptor.data_type,
157  this->getBenchmarkConfiguration().dataset_filename);
158  } // end else
159  p_timing_event = timer.stop<std::milli>();
160 
161  ss = std::stringstream();
162  ss << "Total data loaded: " << m_data->getTotalDataLoaded() << " bytes";
163  std::cout << IOS_MSG_DONE << hebench::Logging::GlobalLogger::log(ss.str()) << std::endl;
164  ss = std::stringstream();
165  ss << "Elapsed wall time: " << p_timing_event->elapsedWallTime<std::milli>() << " ms";
166  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log(ss.str()) << std::endl;
167  ss = std::stringstream();
168  ss << "Elapsed CPU time: " << p_timing_event->elapsedCPUTime<std::milli>() << " ms";
169  std::cout << IOS_MSG_INFO << hebench::Logging::GlobalLogger::log(ss.str()) << std::endl;
170 }
171 
173  const std::uint64_t *param_data_pack_indices,
174  const std::vector<hebench::APIBridge::NativeDataBuffer *> &outputs,
175  hebench::APIBridge::DataType data_type) const
176 {
177  assert(dataset->getParameterCount() == BenchmarkDescriptorCategory::OpParameterCount
178  && dataset->getResultCount() == BenchmarkDescriptorCategory::OpResultCount);
179 
180  return BenchmarkLatency::validateResult(dataset, param_data_pack_indices, outputs, data_type);
181 }
182 
183 } // namespace Latency
184 } // namespace EltwiseMult
185 } // namespace TestHarness
186 } // namespace hebench
std::vector< hebench::APIBridge::WorkloadParam > w_params
Set of arguments for workload parameters.
std::string dataset_filename
File containing data for the benchmark. If empty string, benchmarks that can auto generate the datase...
static bool registerSupportedBenchmark(std::shared_ptr< IBenchmarkDescriptor > p_desc_obj)
Registers a benchmark description object that represents one of the supported workloads.
Base class for workload benchmarks in the latency category.
bool matchBenchmarkDescriptor(const hebench::APIBridge::BenchmarkDescriptor &bench_desc, const std::vector< hebench::APIBridge::WorkloadParam > &w_params) const override
Determines if the represented benchmark can perform the workload described by a specified HEBench ben...
static std::uint64_t fetchVectorSize(const std::vector< hebench::APIBridge::WorkloadParam > &w_params)
void completeWorkloadDescription(WorkloadDescriptionOutput &output, const Engine &engine, const BenchmarkDescription::Backend &backend_desc, const BenchmarkDescription::Configuration &config) const override
Completes the description for the matched benchmark.
static DataLoader::Ptr create(std::uint64_t vector_size, std::uint64_t batch_size_a, std::uint64_t batch_size_b, hebench::APIBridge::DataType data_type)
bool matchBenchmarkDescriptor(const hebench::APIBridge::BenchmarkDescriptor &bench_desc, const std::vector< hebench::APIBridge::WorkloadParam > &w_params) const override
Determines if the represented benchmark can perform the workload described by a specified HEBench ben...
void destroyBenchmark(hebench::TestHarness::PartialBenchmark *p_bench) override
Destroys an object returned by createBenchmark().
hebench::TestHarness::PartialBenchmark * createBenchmark(std::shared_ptr< Engine > p_engine, const DescriptionToken &description_token) override
Creates the represented IBenchmark object that can perform the workload specified by the HEBench benc...
void completeWorkloadDescription(WorkloadDescriptionOutput &output, const Engine &engine, const BenchmarkDescription::Backend &backend_desc, const BenchmarkDescription::Configuration &config) const override
Completes the description for the matched benchmark.
bool validateResult(IDataLoader::Ptr dataset, const std::uint64_t *param_data_pack_indices, const std::vector< hebench::APIBridge::NativeDataBuffer * > &p_outputs, hebench::APIBridge::DataType data_type) const override
Validates the result of an operation against the ground truth.
void init() override
Initializes the partial benchmark members.
Token returned by a successful call to IBenchmarkDescriptor::matchBenchmarkDescriptor().
std::shared_ptr< IDataLoader > Ptr
virtual bool validateResult(IDataLoader::Ptr dataset, const std::uint64_t *param_data_pack_indices, const std::vector< hebench::APIBridge::NativeDataBuffer * > &outputs, hebench::APIBridge::DataType data_type) const
Validates the result of an operation against the ground truth.
std::string workload_header
Workload specific information to be added to the report header.
std::size_t operation_params_count
Number of parameters for the represented workload operation.
Bundles values that need to be filled by a workload during completeWorkloadDescription().
const BenchmarkDescription::Backend & getBackendDescription() const
Allows read-only access to this benchmark backend description.
const BenchmarkDescription::Configuration & getBenchmarkConfiguration() const
Allows read-only access to this benchmark configuration.
#define IOS_MSG_DONE
#define IOS_MSG_INFO
DataType
Defines data types for a workload.
Definition: types.h:379
Category category
Category for the benchmark.
Definition: types.h:531
Defines a benchmark test.
Definition: types.h:527