Skip to content

Commit 9a5b6d2

Browse files
Rob Hessjeffdonahue
authored andcommitted
Fix all rebase errors. All tests are passing.
1 parent 8bff9ce commit 9a5b6d2

38 files changed

+254
-300
lines changed

include/caffe/blob.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
#include "caffe/common.hpp"
55
#include "caffe/proto/caffe.pb.h"
66
#include "caffe/syncedmem.hpp"
7-
#include "caffe/util/math_functions.hpp"
87

98
namespace caffe {
109

include/caffe/common_layers.hpp

Lines changed: 17 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,11 @@ class ArgMaxLayer : public Layer<Dtype> {
3131
: Layer<Dtype>(param) {}
3232
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
3333
vector<Blob<Dtype>*>* top);
34+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
35+
vector<Blob<Dtype>*>* top);
36+
virtual void Backward(const vector<Blob<Dtype>*>& top,
37+
const vector<bool>& propagate_down,
38+
vector<Blob<Dtype>*>* bottom) { NOT_IMPLEMENTED; }
3439

3540
virtual inline LayerParameter_LayerType type() const {
3641
return LayerParameter_LayerType_ARGMAX;
@@ -39,12 +44,6 @@ class ArgMaxLayer : public Layer<Dtype> {
3944
virtual inline int ExactNumTopBlobs() const { return 1; }
4045

4146
protected:
42-
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
43-
vector<Blob<Dtype>*>* top);
44-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
45-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
46-
NOT_IMPLEMENTED;
47-
}
4847
bool out_max_val_;
4948
size_t top_k_;
5049
};
@@ -60,6 +59,10 @@ class ConcatLayer : public Layer<Dtype> {
6059
: Layer<Dtype>(param) {}
6160
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
6261
vector<Blob<Dtype>*>* top);
62+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
63+
vector<Blob<Dtype>*>* top);
64+
virtual void Backward(const vector<Blob<Dtype>*>& top,
65+
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
6366

6467
virtual inline LayerParameter_LayerType type() const {
6568
return LayerParameter_LayerType_CONCAT;
@@ -68,15 +71,6 @@ class ConcatLayer : public Layer<Dtype> {
6871
virtual inline int ExactNumTopBlobs() const { return 1; }
6972

7073
protected:
71-
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
72-
vector<Blob<Dtype>*>* top);
73-
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
74-
vector<Blob<Dtype>*>* top);
75-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
76-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
77-
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
78-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
79-
8074
Blob<Dtype> col_bob_;
8175
int count_;
8276
int num_;
@@ -95,6 +89,10 @@ class FlattenLayer : public Layer<Dtype> {
9589
: Layer<Dtype>(param) {}
9690
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
9791
vector<Blob<Dtype>*>* top);
92+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
93+
vector<Blob<Dtype>*>* top);
94+
virtual void Backward(const vector<Blob<Dtype>*>& top,
95+
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
9896

9997
virtual inline LayerParameter_LayerType type() const {
10098
return LayerParameter_LayerType_FLATTEN;
@@ -103,15 +101,6 @@ class FlattenLayer : public Layer<Dtype> {
103101
virtual inline int ExactNumTopBlobs() const { return 1; }
104102

105103
protected:
106-
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
107-
vector<Blob<Dtype>*>* top);
108-
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
109-
vector<Blob<Dtype>*>* top);
110-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
111-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
112-
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
113-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
114-
115104
int count_;
116105
};
117106

@@ -188,6 +177,10 @@ class SplitLayer : public Layer<Dtype> {
188177
: Layer<Dtype>(param) {}
189178
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
190179
vector<Blob<Dtype>*>* top);
180+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
181+
vector<Blob<Dtype>*>* top);
182+
virtual void Backward(const vector<Blob<Dtype>*>& top,
183+
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
191184

192185
virtual inline LayerParameter_LayerType type() const {
193186
return LayerParameter_LayerType_SPLIT;
@@ -196,15 +189,6 @@ class SplitLayer : public Layer<Dtype> {
196189
virtual inline int MinTopBlobs() const { return 1; }
197190

198191
protected:
199-
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
200-
vector<Blob<Dtype>*>* top);
201-
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
202-
vector<Blob<Dtype>*>* top);
203-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
204-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
205-
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
206-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
207-
208192
int count_;
209193
};
210194

include/caffe/data_layers.hpp

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,21 @@ class HDF5OutputLayer : public Layer<Dtype> {
2828
explicit HDF5OutputLayer(const LayerParameter& param);
2929
virtual ~HDF5OutputLayer();
3030
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
31-
vector<Blob<Dtype>*>* top);
31+
vector<Blob<Dtype>*>* top) {}
3232
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
3333
vector<Blob<Dtype>*>* top);
3434
virtual void Backward(const vector<Blob<Dtype>*>& top,
3535
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
3636
return;
3737
}
38+
39+
virtual inline LayerParameter_LayerType type() const {
40+
return LayerParameter_LayerType_HDF5_OUTPUT;
41+
}
42+
// TODO: no limit on the number of blobs
43+
virtual inline int ExactNumBottomBlobs() const { return 2; }
44+
virtual inline int ExactNumTopBlobs() const { return 0; }
45+
3846
inline std::string file_name() const { return file_name_; }
3947

4048
protected:

include/caffe/filler.hpp

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,8 @@ class UniformFiller : public Filler<Dtype> {
5353
virtual void Fill(Blob<Dtype>* blob) {
5454
CHECK(blob->count());
5555
GetDevice<Dtype>(Caffe::CPU)->rng_uniform(blob->count(),
56-
Dtype(this->filler_param_.min()),
57-
Dtype(this->filler_param_.max()),
58-
blob->mutable_cpu_data());
56+
Dtype(this->filler_param_.min()), Dtype(this->filler_param_.max()),
57+
blob->mutable_cpu_data());
5958
CHECK_EQ(this->filler_param_.sparse(), -1)
6059
<< "Sparsity not supported by this Filler.";
6160
}
@@ -69,10 +68,8 @@ class GaussianFiller : public Filler<Dtype> {
6968
virtual void Fill(Blob<Dtype>* blob) {
7069
Dtype* data = blob->mutable_cpu_data();
7170
CHECK(blob->count());
72-
GetDevice<Dtype>(Caffe::CPU)->rng_gaussian(
73-
blob->count(),
74-
Dtype(this->filler_param_.mean()),
75-
Dtype(this->filler_param_.std()),
71+
GetDevice<Dtype>(Caffe::CPU)->rng_gaussian(blob->count(),
72+
Dtype(this->filler_param_.mean()), Dtype(this->filler_param_.std()),
7673
blob->mutable_cpu_data());
7774
int sparse = this->filler_param_.sparse();
7875
CHECK_GE(sparse, -1);

include/caffe/loss_layers.hpp

Lines changed: 22 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,11 @@ class AccuracyLayer : public Layer<Dtype> {
3131
: Layer<Dtype>(param) {}
3232
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
3333
vector<Blob<Dtype>*>* top);
34+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
35+
vector<Blob<Dtype>*>* top);
36+
virtual void Backward(const vector<Blob<Dtype>*>& top,
37+
const vector<bool>& propagate_down,
38+
vector<Blob<Dtype>*>* bottom) { NOT_IMPLEMENTED; }
3439

3540
virtual inline LayerParameter_LayerType type() const {
3641
return LayerParameter_LayerType_ACCURACY;
@@ -40,13 +45,6 @@ class AccuracyLayer : public Layer<Dtype> {
4045
virtual inline int ExactNumTopBlobs() const { return 1; }
4146

4247
protected:
43-
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
44-
vector<Blob<Dtype>*>* top);
45-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
46-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
47-
NOT_IMPLEMENTED;
48-
}
49-
5048
int top_k_;
5149
};
5250

@@ -112,15 +110,14 @@ class HingeLossLayer : public LossLayer<Dtype> {
112110
explicit HingeLossLayer(const LayerParameter& param)
113111
: LossLayer<Dtype>(param) {}
114112

113+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
114+
vector<Blob<Dtype>*>* top);
115+
virtual void Backward(const vector<Blob<Dtype>*>& top,
116+
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
117+
115118
virtual inline LayerParameter_LayerType type() const {
116119
return LayerParameter_LayerType_HINGE_LOSS;
117120
}
118-
119-
protected:
120-
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
121-
vector<Blob<Dtype>*>* top);
122-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
123-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
124121
};
125122

126123
/* InfogainLossLayer
@@ -154,16 +151,14 @@ class MultinomialLogisticLossLayer : public LossLayer<Dtype> {
154151
: LossLayer<Dtype>(param) {}
155152
virtual void FurtherSetUp(const vector<Blob<Dtype>*>& bottom,
156153
vector<Blob<Dtype>*>* top);
154+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
155+
vector<Blob<Dtype>*>* top);
156+
virtual void Backward(const vector<Blob<Dtype>*>& top,
157+
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
157158

158159
virtual inline LayerParameter_LayerType type() const {
159160
return LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS;
160161
}
161-
162-
protected:
163-
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
164-
vector<Blob<Dtype>*>* top);
165-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
166-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
167162
};
168163

169164
/* SigmoidCrossEntropyLossLayer
@@ -177,21 +172,16 @@ class SigmoidCrossEntropyLossLayer : public LossLayer<Dtype> {
177172
sigmoid_output_(new Blob<Dtype>()) {}
178173
virtual void FurtherSetUp(const vector<Blob<Dtype>*>& bottom,
179174
vector<Blob<Dtype>*>* top);
175+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
176+
vector<Blob<Dtype>*>* top);
177+
virtual void Backward(const vector<Blob<Dtype>*>& top,
178+
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
180179

181180
virtual inline LayerParameter_LayerType type() const {
182181
return LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS;
183182
}
184183

185184
protected:
186-
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
187-
vector<Blob<Dtype>*>* top);
188-
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
189-
vector<Blob<Dtype>*>* top);
190-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
191-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
192-
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
193-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
194-
195185
shared_ptr<SigmoidLayer<Dtype> > sigmoid_layer_;
196186
// sigmoid_output stores the output of the sigmoid layer.
197187
shared_ptr<Blob<Dtype> > sigmoid_output_;
@@ -218,10 +208,10 @@ class SoftmaxWithLossLayer : public Layer<Dtype> {
218208
: Layer<Dtype>(param), softmax_layer_(new SoftmaxLayer<Dtype>(param)) {}
219209
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
220210
vector<Blob<Dtype>*>* top);
221-
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
222-
vector<Blob<Dtype>*>* top);
223-
virtual void Backward(const vector<Blob<Dtype>*>& top,
224-
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
211+
virtual Dtype Forward(const vector<Blob<Dtype>*>& bottom,
212+
vector<Blob<Dtype>*>* top);
213+
virtual void Backward(const vector<Blob<Dtype>*>& top,
214+
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);
225215

226216
virtual inline LayerParameter_LayerType type() const {
227217
return LayerParameter_LayerType_SOFTMAX_LOSS;

include/caffe/syncedmem.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
#include <cstdlib>
55

66
#include "caffe/common.hpp"
7-
#include "caffe/util/math_functions.hpp"
87

98
namespace caffe {
109

include/caffe/test/test_gradient_check_util.hpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -227,15 +227,17 @@ Dtype GradientChecker<Dtype>::GetObjAndGradient(vector<Blob<Dtype>*>* top,
227227
loss += top_blob_data[j] * top_blob_data[j];
228228
}
229229
// set the diff: simply the data.
230-
caffe_copy(top_blob->count(), top_blob_data, top_blob_diff);
230+
GetDevice<Dtype>(Caffe::CPU)->copy(top_blob->count(), top_blob_data,
231+
top_blob_diff);
231232
}
232233
loss /= 2.;
233234
} else {
234235
// the loss will be the top_data_id-th element in the top_id-th blob.
235236
for (int i = 0; i < top->size(); ++i) {
236237
Blob<Dtype>* top_blob = (*top)[i];
237238
Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
238-
caffe_set(top_blob->count(), Dtype(0), top_blob_diff);
239+
GetDevice<Dtype>(Caffe::CPU)->set(top_blob->count(), Dtype(0),
240+
top_blob_diff);
239241
}
240242
loss = (*top)[top_id]->cpu_data()[top_data_id];
241243
(*top)[top_id]->mutable_cpu_diff()[top_data_id] = 1.;

src/caffe/blob.cpp

Lines changed: 15 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -167,20 +167,20 @@ void Blob<Dtype>::Update() {
167167
switch (data_->head()) {
168168
case SyncedMemory::HEAD_AT_CPU:
169169
// perform computation on CPU
170-
GetDevice<Dtype>(Caffe::CPU)->axpy(
171-
count_,
172-
Dtype(-1),
170+
GetDevice<Dtype>(Caffe::CPU)->axpy(count_, Dtype(-1),
173171
static_cast<const Dtype*>(diff_->cpu_data()),
174172
static_cast<Dtype*>(data_->mutable_cpu_data()));
175173
break;
176174
case SyncedMemory::HEAD_AT_GPU:
177175
case SyncedMemory::SYNCED:
176+
#ifndef CPU_ONLY
178177
// perform computation on GPU
179-
GetDevice<Dtype>(Caffe::GPU)->axpy(
180-
count_,
181-
Dtype(-1),
182-
reinterpret_cast<const Dtype*>(diff_->gpu_data()),
183-
reinterpret_cast<Dtype*>(data_->mutable_gpu_data()));
178+
GetDevice<Dtype>(Caffe::GPU)->axpy(count_, Dtype(-1),
179+
static_cast<const Dtype*>(diff_->gpu_data()),
180+
static_cast<Dtype*>(data_->mutable_gpu_data()));
181+
#else
182+
NO_GPU;
183+
#endif
184184
break;
185185
default:
186186
LOG(FATAL) << "Syncedmem not initialized.";
@@ -267,27 +267,13 @@ void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
267267
LOG(FATAL) << "Trying to copy blobs of different sizes.";
268268
}
269269
}
270-
switch (Caffe::mode()) {
271-
case Caffe::GPU:
272-
if (copy_diff) {
273-
caffe_copy(count_, source.gpu_diff(),
274-
static_cast<Dtype*>(diff_->mutable_gpu_data()));
275-
} else {
276-
caffe_copy(count_, source.gpu_data(),
277-
static_cast<Dtype*>(data_->mutable_gpu_data()));
278-
}
279-
break;
280-
case Caffe::CPU:
281-
if (copy_diff) {
282-
caffe_copy(count_, source.cpu_diff(),
283-
static_cast<Dtype*>(diff_->mutable_cpu_data()));
284-
} else {
285-
caffe_copy(count_, source.cpu_data(),
286-
static_cast<Dtype*>(data_->mutable_cpu_data()));
287-
}
288-
break;
289-
default:
290-
LOG(FATAL) << "Unknown caffe mode.";
270+
271+
if (copy_diff) {
272+
GetDevice<Dtype>()->copy(count_, source.const_diff(),
273+
static_cast<Dtype*>(diff_->mutable_data()));
274+
} else {
275+
GetDevice<Dtype>()->copy(count_, source.const_data(),
276+
static_cast<Dtype*>(data_->mutable_data()));
291277
}
292278
}
293279

src/caffe/devices/gpu.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ void GPUDevice<Dtype>::axpby(const int N, const Dtype alpha,
125125
}
126126

127127
template<typename Dtype>
128+
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
128129
void GPUDevice<Dtype>::copy(const int N, const Dtype *X, Dtype *Y) {
129130
CUDA_CHECK(cudaMemcpy(Y, X, sizeof(Dtype) * N, cudaMemcpyDefault));
130131
}

src/caffe/devices/gpu.cu

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -428,8 +428,10 @@ __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
428428
int c = index / (width * height);
429429
// compute the start and end of the output
430430
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
431+
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
431432
int w_col_end = min(w / stride + 1, width_col);
432433
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
434+
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
433435
int h_col_end = min(h / stride + 1, height_col);
434436
/*
435437
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {

0 commit comments

Comments
 (0)