SMAUG
Simulating Machine Learning Applications on gem5-Aladdin
backend.cpp
1 #include "smaug/core/backend.h"
2 #include "smaug/operators/batch_norm_op.h"
3 #include "smaug/operators/concat_op.h"
4 #include "smaug/operators/control_flow_ops.h"
5 #include "smaug/operators/convolution_op.h"
6 #include "smaug/operators/data_op.h"
7 #include "smaug/operators/depthwise_convolution_op.h"
8 #include "smaug/operators/eltwise_add_op.h"
9 #include "smaug/operators/eltwise_mul_op.h"
10 #include "smaug/operators/elu_op.h"
11 #include "smaug/operators/greater_op.h"
12 #include "smaug/operators/inner_product_op.h"
13 #include "smaug/operators/less_op.h"
14 #include "smaug/operators/padding_op.h"
15 #include "smaug/operators/pooling_op.h"
16 #include "smaug/operators/relu_op.h"
17 #include "smaug/operators/reorder_op.h"
18 #include "smaug/operators/repeat_op.h"
19 #include "smaug/operators/reshape_op.h"
20 #include "smaug/operators/sigmoid_op.h"
21 #include "smaug/operators/smv/smv_batch_norm_op.h"
22 #include "smaug/operators/smv/smv_convolution_op.h"
23 #include "smaug/operators/smv/smv_eltwise_add_op.h"
24 #include "smaug/operators/smv/smv_eltwise_mul_op.h"
25 #include "smaug/operators/smv/smv_elu_op.h"
26 #include "smaug/operators/smv/smv_greater_op.h"
27 #include "smaug/operators/smv/smv_inner_product_op.h"
28 #include "smaug/operators/smv/smv_less_op.h"
29 #include "smaug/operators/smv/smv_pooling_op.h"
30 #include "smaug/operators/smv/smv_relu_op.h"
31 #include "smaug/operators/smv/smv_sigmoid_op.h"
32 #include "smaug/operators/smv/smv_softmax_op.h"
33 #include "smaug/operators/smv/smv_tanh_op.h"
34 #include "smaug/operators/softmax_op.h"
35 #include "smaug/operators/split_op.h"
36 #include "smaug/operators/tanh_op.h"
37 
38 namespace smaug {
39 
40 #define DEF_CREATE_OP(OpType, Backend) \
41  OpType<Backend>* Backend::create##OpType( \
42  const std::string& name, Workspace* workspace) { \
43  return new OpType<Backend>(name, workspace); \
44  }
45 
46 #define DEF_CREATE_SMV_OP(OpType) \
47  Smv##OpType* SmvBackend::create##OpType( \
48  const std::string& name, Workspace* workspace) { \
49  return new Smv##OpType(name, workspace); \
50  }
51 
52 const std::string ReferenceBackend::Name = "Reference";
53 const std::string SmvBackend::Name = "SMV";
54 
55 DEF_CREATE_OP(ConvolutionOp, ReferenceBackend)
56 DEF_CREATE_OP(DataOp, ReferenceBackend)
58 DEF_CREATE_OP(MaxPoolingOp, ReferenceBackend)
59 DEF_CREATE_OP(AvgPoolingOp, ReferenceBackend)
60 DEF_CREATE_OP(InnerProductOp, ReferenceBackend)
61 DEF_CREATE_OP(SoftmaxOp, ReferenceBackend)
62 DEF_CREATE_OP(ReorderOp, ReferenceBackend)
63 DEF_CREATE_OP(ConcatOp, ReferenceBackend)
64 DEF_CREATE_OP(SplitOp, ReferenceBackend)
65 DEF_CREATE_OP(ReshapeOp, ReferenceBackend)
66 DEF_CREATE_OP(RepeatOp, ReferenceBackend)
67 DEF_CREATE_OP(FlattenOp, ReferenceBackend)
68 DEF_CREATE_OP(BatchNormOp, ReferenceBackend)
69 DEF_CREATE_OP(EltwiseAddOp, ReferenceBackend)
70 DEF_CREATE_OP(EltwiseMulOp, ReferenceBackend)
71 DEF_CREATE_OP(LessOp, ReferenceBackend)
72 DEF_CREATE_OP(LessEqualOp, ReferenceBackend)
73 DEF_CREATE_OP(GreaterOp, ReferenceBackend)
74 DEF_CREATE_OP(GreaterEqualOp, ReferenceBackend)
75 DEF_CREATE_OP(SwitchOp, ReferenceBackend)
76 DEF_CREATE_OP(MergeOp, ReferenceBackend)
77 DEF_CREATE_OP(ReluOp, ReferenceBackend)
78 DEF_CREATE_OP(SigmoidOp, ReferenceBackend)
79 DEF_CREATE_OP(EluOp, ReferenceBackend)
80 DEF_CREATE_OP(SeluOp, ReferenceBackend)
81 DEF_CREATE_OP(TanhOp, ReferenceBackend)
82 DEF_CREATE_OP(HardTanhOp, ReferenceBackend)
83 DEF_CREATE_OP(PaddingOp, ReferenceBackend)
84 
85 DEF_CREATE_SMV_OP(ConvolutionOp)
86 DEF_CREATE_SMV_OP(InnerProductOp)
87 DEF_CREATE_SMV_OP(MaxPoolingOp)
88 DEF_CREATE_SMV_OP(AvgPoolingOp)
89 DEF_CREATE_SMV_OP(BatchNormOp)
90 DEF_CREATE_SMV_OP(ReluOp)
91 DEF_CREATE_SMV_OP(EluOp)
92 DEF_CREATE_SMV_OP(SeluOp)
93 DEF_CREATE_SMV_OP(TanhOp)
94 DEF_CREATE_SMV_OP(HardTanhOp)
95 DEF_CREATE_SMV_OP(SigmoidOp)
96 DEF_CREATE_SMV_OP(SoftmaxOp)
97 DEF_CREATE_SMV_OP(EltwiseAddOp)
98 DEF_CREATE_SMV_OP(EltwiseMulOp)
99 DEF_CREATE_SMV_OP(LessOp)
100 DEF_CREATE_SMV_OP(LessEqualOp)
101 DEF_CREATE_SMV_OP(GreaterOp)
102 DEF_CREATE_SMV_OP(GreaterEqualOp)
103 DEF_CREATE_OP(DataOp, SmvBackend)
104 DEF_CREATE_OP(DepthwiseConvolutionOp, SmvBackend)
105 DEF_CREATE_OP(ReorderOp, SmvBackend)
106 DEF_CREATE_OP(ConcatOp, SmvBackend)
107 DEF_CREATE_OP(SplitOp, SmvBackend)
108 DEF_CREATE_OP(ReshapeOp, SmvBackend)
109 DEF_CREATE_OP(RepeatOp, SmvBackend)
110 DEF_CREATE_OP(FlattenOp, SmvBackend)
111 DEF_CREATE_OP(SwitchOp, SmvBackend)
112 DEF_CREATE_OP(MergeOp, SmvBackend)
113 DEF_CREATE_OP(PaddingOp, SmvBackend)
114 
115 // for simple tracing.
116 namespace ref {
117 const unsigned kConvolutionHw = 0x0001;
118 const unsigned kInnerProductHw = 0x0002;
119 const unsigned kEltwiseOpHw = 0x0003;
120 const unsigned kBatchNormHw = 0x0004;
121 const unsigned kPoolingHw = 0x0005;
122 } // namespace ref
123 
124 namespace smv {
125 int kSpadSize;
126 // Use the same accelerator id for all hardware blocks. This means we will
127 // simulate only ONE datapath instead of multiple, which means that the two
128 // blocks can share the scratchpads (without any infrastructure
129 // changes). The key is that we still trace the functions at the _hw level,
130 // so that Aladdin will exit after simulating each block, and we can return
131 // control to the CPU at the right places. In contrast, if we used two
132 // different ids, we would have two different datapaths that could not share
133 // data directly.
134 const unsigned kConvolutionHw = 0x0003;
135 const unsigned kInnerProductHw = 0x0003;
136 const unsigned kEltwiseOpHw = 0x0003;
137 const unsigned kBatchNormHw = 0x0003;
138 const unsigned kPoolingHw = 0x0003;
139 // The systolic array is implemented in gem5 instead of Aladdin, so it needs to
140 // have a different accelerator id.
141 const unsigned kSystolicArrayHw = 0x0004;
142 float* spad0;
143 float* spad1;
144 float* spad2;
145 } // namespace smv
146 
147 } // namespace smaug
smaug::RepeatOp
Replicates a Tensor's data among all dimensions.
Definition: backend.h:44
smaug::ConcatOp
Concatenates N Tensors along a specified axis.
Definition: backend.h:41
smaug::EltwiseAddOp
Adds two Tensors elementwise.
Definition: backend.h:47
smaug::SmvBackend
SmvBackend implements a set of models of optimized DL kernels that were taped out on a machine learni...
Definition: backend.h:178
smaug::LessEqualOp
Implements an elementwise less-than-or-equal-to operator.
Definition: backend.h:50
smaug::SigmoidOp
Implements the sigmoid operator, defined as 1/(1 + exp(-input)).
Definition: backend.h:56
smaug::SoftmaxOp
Implements the softmax operator.
Definition: backend.h:39
smaug::SeluOp
Implements the scaled exponential linear unit function.
Definition: backend.h:58
smaug::HardTanhOp
Implements the hard tanh operator, which bounds the min and max value of the tanh operator.
Definition: backend.h:60
smaug::InnerProductOp
Implements the inner product operator.
Definition: backend.h:38
smaug::TanhOp
Implements the tanh operator.
Definition: backend.h:59
smaug::SplitOp
Implements the split operator, which divides a Tensor into N output Tensors along a specified dimensi...
Definition: backend.h:42
smaug::SwitchOp
Conditionally forwards an input to one of two outputs.
Definition: backend.h:53
smaug::LessOp
Implements an elementwise less-than operator.
Definition: backend.h:49
smaug::DepthwiseConvolutionOp
Implements the depthwise convolution operator.
Definition: backend.h:35
smaug::ReferenceBackend
ReferenceBackend provides reference implementations of all operators supported by SMAUG.
Definition: backend.h:83
smaug::BatchNormOp
Implements the batch normalization layer.
Definition: backend.h:46
smaug::GreaterOp
Implements an elementwise greater than operator.
Definition: backend.h:51
smaug::MaxPoolingOp
Implements the max-pooling operator.
Definition: backend.h:36
smaug::EluOp
Implements the exponential linear unit function.
Definition: backend.h:57
smaug::ReluOp
Implements the rectified linear unit operator: max(slope * x, 0).
Definition: backend.h:55
smaug
The smaug namespace is the parent namespace of all C++ code in SMAUG.
Definition: backend.cpp:38
smaug::AvgPoolingOp
Implements the arithmetic-average-pooling operator.
Definition: backend.h:37
smaug::MergeOp
Forwards the first live input to its output.
Definition: backend.h:54
smaug::DataOp
Exposes a Tensor as its only output.
Definition: backend.h:34
smaug::EltwiseMulOp
Multiplies two Tensors elementwise.
Definition: backend.h:48
smaug::PaddingOp
Pad a given tensor in any number of dimensions with arbitrary size.
Definition: backend.h:61
smaug::ConvolutionOp
The base class for all 4D spatial convolution operators.
Definition: backend.h:33
smaug::GreaterEqualOp
Implements an elementwise greater than or equal to operator.
Definition: backend.h:52
smaug::FlattenOp
Flattens each batch of a Tensor.
Definition: backend.h:45
smaug::ReorderOp
Implements a Tensor reordering operation to convert between different DataLayouts.
Definition: backend.h:40
smaug::ReshapeOp
Changes the Tensor's shape while retaining the number of elements.
Definition: backend.h:43