SMAUG
Simulating Machine Learning Applications on gem5-Aladdin
node.proto
1 syntax = "proto3";
2 
3 package smaug;
4 
5 import "smaug/core/tensor.proto";
6 import "smaug/core/types.proto";
7 
8 message ConvParams {
9  PaddingType padding = 1;
10  repeated int32 stride = 2;
11 }
12 
13 message PoolParams {
14  repeated int32 stride = 1;
15  repeated int32 pool_size = 2;
16 }
17 
18 message PaddingParams {
19  repeated int32 padding_size = 1;
20 }
21 
22 message ConcatParams {
23  int32 concat_axis = 1;
24 }
25 
26 message SplitParams {
27  int32 split_axis = 1;
28 }
29 
30 message LreluParams {
31  float slope = 1;
32 }
33 
34 message EluParams {
35  float alpha = 1;
36  float lambda_param = 2;
37 }
38 
39 message HardTanhParams {
40  float min = 1;
41  float max = 2;
42 }
43 
44 message ActivationParams {
45  OpType activation = 1;
46  oneof value {
47  LreluParams lrelu_params = 2;
48  EluParams elu_params = 3;
49  HardTanhParams hard_tanh_params = 4;
50  }
51 }
52 
53 message Params {
54  oneof value {
55  ConvParams conv_params = 1;
56  PoolParams pool_params = 2;
57  ConcatParams concat_params = 4;
58  SplitParams split_params = 5;
59  PaddingParams padding_params = 6;
60  }
61  ActivationParams act_params = 3;
62 }
63 
64 message NodeProto {
65  // Unique node name in the form of "scopes:name". Scopes are optional.
66  string name = 1;
67  OpType op = 2;
68  // Parents of this node.
69  repeated string parents = 3;
70  // The size of src_tensors_indices must be the same as `parents`. For each
71  // parent node parents[i], src_tensors_indices[i] indicates the index of its
72  // output tensor that becomes the input tensor of this node. Note that if a
73  // parent has multiple output tensors connected to this node, we would have to
74  // specify that one operator multiple times in the parents field, with
75  // different src_tensors_indices.
76  repeated int32 src_tensors_indices = 5;
77  // In theory, we could specify the above node-to-node connection information
78  // in TensorProto, but the current proto design seems easier for the C++
79  // side which was built to construct the operators first and then create
80  // tensors.
81  repeated TensorProto input_tensors = 6;
82  repeated TensorProto output_tensors = 7;
83  // Parameters
84  Params params = 8;
85 }