1 | /******************************************************************************* |
2 | * Copyright 2016-2018 Intel Corporation |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | *******************************************************************************/ |
16 | |
17 | #include <assert.h> |
18 | #include "mkldnn.h" |
19 | |
20 | #include "c_types_map.hpp" |
21 | #include "type_helpers.hpp" |
22 | #include "utils.hpp" |
23 | |
24 | using namespace mkldnn::impl; |
25 | using namespace mkldnn::impl::utils; |
26 | using namespace mkldnn::impl::status; |
27 | using namespace mkldnn::impl::prop_kind; |
28 | using namespace mkldnn::impl::alg_kind; |
29 | using namespace mkldnn::impl::types; |
30 | |
31 | namespace mkldnn { |
32 | namespace impl { |
33 | status_t conv_desc_init(convolution_desc_t *conv_desc, |
34 | prop_kind_t prop_kind, alg_kind_t alg_kind, |
35 | const memory_desc_t *src_desc, const memory_desc_t *weights_desc, |
36 | const memory_desc_t *bias_desc, const memory_desc_t *dst_desc, |
37 | const dims_t strides, const dims_t dilates, |
38 | const dims_t padding_l, const dims_t padding_r, |
39 | padding_kind_t padding_kind) { |
40 | bool args_ok = true |
41 | && !any_null(conv_desc, src_desc, weights_desc, dst_desc, strides, |
42 | padding_l) |
43 | && one_of(alg_kind, convolution_auto, convolution_direct, convolution_winograd) |
44 | && one_of(padding_kind, padding_kind::padding_zero); |
45 | if (!args_ok) return invalid_arguments; |
46 | |
47 | if (padding_r == nullptr) padding_r = padding_l; |
48 | |
49 | auto cd = convolution_desc_t(); |
50 | cd.primitive_kind = primitive_kind::convolution; |
51 | cd.prop_kind = prop_kind; |
52 | cd.alg_kind = alg_kind; |
53 | |
54 | cd.diff_src_desc = cd.src_desc = zero_md(); |
55 | cd.diff_dst_desc = cd.dst_desc = zero_md(); |
56 | cd.diff_weights_desc = cd.weights_desc = zero_md(); |
57 | cd.diff_bias_desc = cd.bias_desc = zero_md(); |
58 | |
59 | const bool is_fwd = one_of(prop_kind, forward_training, forward_inference); |
60 | const bool with_bias = |
61 | bias_desc && bias_desc->format_kind != format_kind::undef; |
62 | const bool with_groups = weights_desc->ndims == src_desc->ndims + 1; |
63 | |
64 | (prop_kind == backward_data ? cd.diff_src_desc : cd.src_desc) = *src_desc; |
65 | (is_fwd ? cd.dst_desc : cd.diff_dst_desc) = *dst_desc; |
66 | (prop_kind == backward_weights ? cd.diff_weights_desc : cd.weights_desc) = |
67 | *weights_desc; |
68 | if (with_bias) |
69 | (prop_kind == backward_weights ? cd.diff_bias_desc : cd.bias_desc) = |
70 | *bias_desc; |
71 | |
72 | int sp_dims = src_desc->ndims - 2; |
73 | utils::array_copy(cd.strides, strides, sp_dims); |
74 | utils::array_copy(cd.padding[0], padding_l, sp_dims); |
75 | utils::array_copy(cd.padding[1], padding_r, sp_dims); |
76 | if (dilates) |
77 | utils::array_copy(cd.dilates, dilates, sp_dims); |
78 | else |
79 | utils::array_set(cd.dilates, 0, sp_dims); |
80 | |
81 | cd.padding_kind = padding_kind; |
82 | cd.accum_data_type = types::default_accum_data_type(src_desc->data_type, |
83 | weights_desc->data_type, dst_desc->data_type, prop_kind); |
84 | |
85 | const int g = with_groups ? weights_desc->dims[0] : 1; |
86 | const int bias_dim = prop_kind == backward_data |
87 | ? src_desc->dims[1] |
88 | : dst_desc->dims[1]; |
89 | |
90 | bool consistency = true |
91 | && memory_desc_wrapper(weights_desc).nelems() |
92 | && src_desc->ndims == dst_desc->ndims |
93 | && utils::one_of(src_desc->ndims, 3, 4, 5) |
94 | && utils::one_of(weights_desc->ndims, src_desc->ndims, |
95 | src_desc->ndims + 1) |
96 | && (with_bias ? bias_desc->ndims == 1 : true) |
97 | && (with_bias ? bias_desc->dims[0] == bias_dim : true) |
98 | && src_desc->dims[0] == dst_desc->dims[0] |
99 | && src_desc->dims[1] == g * weights_desc->dims[with_groups + 1] |
100 | && dst_desc->dims[1] == g * weights_desc->dims[with_groups + 0]; |
101 | for (int i = 2; i < src_desc->ndims; ++i) |
102 | { |
103 | int src = src_desc->dims[i]; |
104 | int ker = weights_desc->dims[with_groups + i]; |
105 | int dil = cd.dilates[i - 2]; |
106 | int pad_l = padding_l[i - 2]; |
107 | int pad_r = padding_r[i - 2]; |
108 | int str = strides[i - 2]; |
109 | int dst = dst_desc->dims[i]; |
110 | int ker_range = 1 + (ker - 1) * (dil + 1); |
111 | |
112 | if (str < 1) return invalid_arguments; |
113 | consistency = consistency |
114 | && dil >= 0 |
115 | && pad_l >= 0 |
116 | && pad_r + str > 0 |
117 | && (src - ker_range + pad_l + pad_r) / str + 1 == dst; |
118 | } |
119 | if (!consistency) return invalid_arguments; |
120 | |
121 | *conv_desc = cd; |
122 | return success; |
123 | } |
124 | } |
125 | } |
126 | |
127 | status_t mkldnn_convolution_forward_desc_init(convolution_desc_t *conv_desc, |
128 | prop_kind_t prop_kind, alg_kind_t alg_kind, |
129 | const memory_desc_t *src_desc, const memory_desc_t *weights_desc, |
130 | const memory_desc_t *bias_desc, const memory_desc_t *dst_desc, |
131 | const dims_t strides, const dims_t padding_l, const dims_t padding_r, |
132 | padding_kind_t padding_kind) { |
133 | if (!one_of(prop_kind, forward_training, forward_inference)) |
134 | return invalid_arguments; |
135 | return mkldnn::impl::conv_desc_init(conv_desc, prop_kind, alg_kind, src_desc, |
136 | weights_desc, bias_desc, dst_desc, strides, nullptr, |
137 | padding_l, padding_r, padding_kind); |
138 | } |
139 | |
140 | status_t mkldnn_dilated_convolution_forward_desc_init( |
141 | convolution_desc_t *conv_desc, prop_kind_t prop_kind, |
142 | alg_kind_t alg_kind, const memory_desc_t *src_desc, |
143 | const memory_desc_t *weights_desc, const memory_desc_t *bias_desc, |
144 | const memory_desc_t *dst_desc, const dims_t strides, |
145 | const dims_t dilates, const dims_t padding_l, |
146 | const dims_t padding_r, padding_kind_t padding_kind) { |
147 | if (!one_of(prop_kind, forward_training, forward_inference)) |
148 | return invalid_arguments; |
149 | return mkldnn::impl::conv_desc_init(conv_desc, prop_kind, alg_kind, src_desc, |
150 | weights_desc, bias_desc, dst_desc, strides, dilates, |
151 | padding_l, padding_r, padding_kind); |
152 | } |
153 | |
154 | status_t mkldnn_convolution_backward_data_desc_init( |
155 | convolution_desc_t *conv_desc, alg_kind_t alg_kind, |
156 | const memory_desc_t *diff_src_desc, const memory_desc_t *weights_desc, |
157 | const memory_desc_t *diff_dst_desc, const dims_t strides, |
158 | const dims_t padding_l, const dims_t padding_r, |
159 | padding_kind_t padding_kind) { |
160 | return mkldnn::impl::conv_desc_init(conv_desc, backward_data, alg_kind, diff_src_desc, |
161 | weights_desc, nullptr, diff_dst_desc, strides, nullptr, |
162 | padding_l, padding_r, padding_kind); |
163 | } |
164 | |
165 | status_t mkldnn_dilated_convolution_backward_data_desc_init( |
166 | convolution_desc_t *conv_desc, alg_kind_t alg_kind, |
167 | const memory_desc_t *diff_src_desc, const memory_desc_t *weights_desc, |
168 | const memory_desc_t *diff_dst_desc, const dims_t strides, |
169 | const dims_t dilates, const dims_t padding_l, const dims_t padding_r, |
170 | padding_kind_t padding_kind) { |
171 | return mkldnn::impl::conv_desc_init(conv_desc, backward_data, alg_kind, diff_src_desc, |
172 | weights_desc, nullptr, diff_dst_desc, strides, dilates, |
173 | padding_l, padding_r, padding_kind); |
174 | } |
175 | |
176 | status_t mkldnn_convolution_backward_weights_desc_init( |
177 | convolution_desc_t *conv_desc, alg_kind_t alg_kind, |
178 | const memory_desc_t *src_desc, const memory_desc_t *diff_weights_desc, |
179 | const memory_desc_t *diff_bias_desc, |
180 | const memory_desc_t *diff_dst_desc, const dims_t strides, |
181 | const dims_t padding_l, const dims_t padding_r, |
182 | padding_kind_t padding_kind) { |
183 | return mkldnn::impl::conv_desc_init(conv_desc, backward_weights, alg_kind, src_desc, |
184 | diff_weights_desc, diff_bias_desc, diff_dst_desc, strides, |
185 | nullptr, padding_l, padding_r, padding_kind); |
186 | } |
187 | |
188 | status_t mkldnn_dilated_convolution_backward_weights_desc_init( |
189 | convolution_desc_t *conv_desc, alg_kind_t alg_kind, |
190 | const memory_desc_t *src_desc, const memory_desc_t *diff_weights_desc, |
191 | const memory_desc_t *diff_bias_desc, |
192 | const memory_desc_t *diff_dst_desc, const dims_t strides, |
193 | const dims_t dilates, const dims_t padding_l, const dims_t padding_r, |
194 | padding_kind_t padding_kind) { |
195 | return mkldnn::impl::conv_desc_init(conv_desc, backward_weights, alg_kind, src_desc, |
196 | diff_weights_desc, diff_bias_desc, diff_dst_desc, strides, |
197 | dilates, padding_l, padding_r, padding_kind); |
198 | } |
199 | |
200 | // vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s |
201 | |