@@ -18,6 +18,7 @@ limitations under the License.
18
18
#include " tensorflow/cc/ops/standard_ops.h"
19
19
20
20
#include " tensorflow/cc/framework/grad_op_registry.h"
21
+ #include " tensorflow/cc/framework/gradients.h"
21
22
22
23
namespace tensorflow {
23
24
namespace ops {
@@ -118,6 +119,86 @@ Status BiasAddGradHelper(const Scope& scope, const Operation& op,
118
119
}
119
120
REGISTER_GRADIENT_OP (" BiasAdd" , BiasAddGradHelper);
120
121
122
+ Status Conv2DGrad (const Scope& scope, const Operation& op,
123
+ const std::vector<Output>& grad_inputs,
124
+ std::vector<Output>* grad_outputs) {
125
+ string data_format;
126
+ string padding;
127
+ std::vector<int32> strides;
128
+ bool use_cudnn_on_gpu;
129
+ auto attrs = op.output (0 ).node ()->attrs ();
130
+ GetNodeAttr (attrs, " data_format" , &data_format);
131
+ GetNodeAttr (attrs, " padding" , &padding);
132
+ GetNodeAttr (attrs, " strides" , &strides);
133
+ GetNodeAttr (attrs, " use_cudnn_on_gpu" , &use_cudnn_on_gpu);
134
+ Conv2DBackpropInput::Attrs input_attrs;
135
+ input_attrs.DataFormat (data_format);
136
+ input_attrs.UseCudnnOnGpu (use_cudnn_on_gpu);
137
+ auto dx_1 = Conv2DBackpropInput (scope, Shape (scope, op.input (0 )),
138
+ op.input (1 ), grad_inputs[0 ],
139
+ strides, padding, input_attrs);
140
+ grad_outputs->push_back (dx_1);
141
+ Conv2DBackpropFilter::Attrs filter_attrs;
142
+ filter_attrs.DataFormat (data_format);
143
+ filter_attrs.UseCudnnOnGpu (use_cudnn_on_gpu);
144
+ auto dx_2 = Conv2DBackpropFilter (scope, op.input (0 ),
145
+ Shape (scope, op.input (1 )), grad_inputs[0 ],
146
+ strides, padding, filter_attrs);
147
+ grad_outputs->push_back (dx_2);
148
+ return scope.status ();
149
+ }
150
+ REGISTER_GRADIENT_OP (" Conv2D" , Conv2DGrad);
151
+
152
+ Status MaxPoolGradHelper (const Scope& scope, const Operation& op,
153
+ const std::vector<Output>& grad_inputs,
154
+ std::vector<Output>* grad_outputs) {
155
+ string data_format;
156
+ string padding;
157
+ std::vector<int32> strides;
158
+ std::vector<int32> ksize;
159
+ auto attrs = op.output (0 ).node ()->attrs ();
160
+ GetNodeAttr (attrs, " data_format" , &data_format);
161
+ GetNodeAttr (attrs, " ksize" , &ksize);
162
+ GetNodeAttr (attrs, " padding" , &padding);
163
+ GetNodeAttr (attrs, " strides" , &strides);
164
+ internal::MaxPoolGrad::Attrs grad_attrs;
165
+ grad_attrs.DataFormat (data_format);
166
+ auto dx = internal::MaxPoolGrad (scope, op.input (0 ),
167
+ op.output (0 ),
168
+ grad_inputs[0 ],
169
+ ksize, strides,
170
+ padding, grad_attrs);
171
+ grad_outputs->push_back (dx);
172
+ return scope.status ();
173
+ }
174
+ REGISTER_GRADIENT_OP (" MaxPool" , MaxPoolGradHelper);
175
+
176
+ Status MaxPoolGradV2Helper (const Scope& scope, const Operation& op,
177
+ const std::vector<Output>& grad_inputs,
178
+ std::vector<Output>* grad_outputs) {
179
+ string data_format;
180
+ string padding;
181
+ auto attrs = op.output (0 ).node ()->attrs ();
182
+ GetNodeAttr (attrs, " data_format" , &data_format);
183
+ GetNodeAttr (attrs, " padding" , &padding);
184
+ MaxPoolGradV2::Attrs grad_attrs;
185
+ grad_attrs.DataFormat (data_format);
186
+ auto dx = MaxPoolGradV2 (scope, op.input (0 ),
187
+ op.output (0 ),
188
+ grad_inputs[0 ],
189
+ op.input (1 ),
190
+ op.input (2 ),
191
+ padding,
192
+ grad_attrs);
193
+ grad_outputs->push_back (dx);
194
+ grad_outputs->push_back (NoGradient ());
195
+ grad_outputs->push_back (NoGradient ());
196
+ return scope.status ();
197
+ }
198
+ REGISTER_GRADIENT_OP (" MaxPoolV2" , MaxPoolGradV2Helper);
199
+
200
+
201
+
121
202
} // anonymous namespace
122
203
} // namespace ops
123
204
} // namespace tensorflow
0 commit comments