This C++ API example demonstrates programming flow when reordering memory between CPU and GPU engines.
#include <iostream>
#include <stdexcept>
#include <vector>
#include "example_utils.hpp"
#include "example_utils.hpp"
using namespace std;
std::vector<float> array(product(adims));
for (size_t e = 0; e < array.size(); ++e) {
array[e] = e % 7 ? 1.0f : -1.0f;
}
write_to_dnnl_memory(array.data(), mem);
}
int negs = 0;
size_t nelems = product(adims);
std::vector<float> array(nelems);
read_from_dnnl_memory(array.data(), mem);
for (size_t e = 0; e < nelems; ++e)
negs += array[e] < 0.0f;
return negs;
}
void cross_engine_reorder_tutorial() {
auto m_cpu
cpu_engine);
auto m_gpu
gpu_engine);
fill(m_cpu, tz);
auto r1 = reorder(m_cpu, m_gpu);
auto relu_pd = eltwise_forward::primitive_desc(relu_d, gpu_engine);
auto relu = eltwise_forward(relu_pd);
auto r2 = reorder(m_gpu, m_cpu);
r1.execute(stream_gpu, m_cpu, m_gpu);
r2.execute(stream_gpu, m_gpu, m_cpu);
stream_gpu.wait();
if (find_negative(m_cpu, tz) != 0)
throw std::logic_error(
"Unexpected output, find a negative value after the ReLU "
"execution.");
}
int main(int argc, char **argv) {
cross_engine_reorder_tutorial);
}
@ eltwise_relu
Elementwise: rectified linear unit (ReLU)
@ forward
Forward data propagation, alias for dnnl::prop_kind::forward_training.
#define DNNL_ARG_DST
A special mnemonic for destination argument for primitives that have a single destination.
Definition: dnnl_types.h:2376
#define DNNL_ARG_SRC
A special mnemonic for source argument for primitives that have a single source.
Definition: dnnl_types.h:2352
oneDNN namespace
Definition: dnnl.hpp:74
@ nchw
4D CNN activations tensor; an alias for dnnl::memory::format_tag::abcd
@ f32
32-bit/single-precision floating point.
std::vector< dim > dims
Vector of dimensions.
Definition: dnnl.hpp:1131
@ in_order
In-order execution.