5 import "smaug/core/tensor.proto";
6 import "smaug/core/types.proto";
9 PaddingType padding = 1;
10 repeated int32 stride = 2;
14 repeated int32 stride = 1;
15 repeated int32 pool_size = 2;
18 message PaddingParams {
19 repeated int32 padding_size = 1;
22 message ConcatParams {
23 int32 concat_axis = 1;
36 float lambda_param = 2;
39 message HardTanhParams {
44 message ActivationParams {
45 OpType activation = 1;
47 LreluParams lrelu_params = 2;
48 EluParams elu_params = 3;
49 HardTanhParams hard_tanh_params = 4;
55 ConvParams conv_params = 1;
56 PoolParams pool_params = 2;
57 ConcatParams concat_params = 4;
58 SplitParams split_params = 5;
59 PaddingParams padding_params = 6;
61 ActivationParams act_params = 3;
65 // Unique node name in the form of "scopes:name". Scopes are optional.
68 // Parents of this node.
69 repeated string parents = 3;
70 // The size of src_tensors_indices must be the same as `parents`. For each
71 // parent node parents[i], src_tensors_indices[i] indicates the index of its
72 // output tensor that becomes the input tensor of this node. Note that if a
73 // parent has multiple output tensors connected to this node, we would have to
74 // specify that one operator multiple times in the parents field, with
75 // different src_tensors_indices.
76 repeated int32 src_tensors_indices = 5;
77 // In theory, we could specify the above node-to-node connection information
78 // in TensorProto, but the current proto design seems easier for the C++
79 // side which was built to construct the operators first and then create
81 repeated TensorProto input_tensors = 6;
82 repeated TensorProto output_tensors = 7;