@@ -78,10 +78,10 @@ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
7878 W_bound = numpy .sqrt (6. / (fan_in + fan_out ))
7979 self .W = theano .shared (
8080 numpy .asarray (
81- rng .uniform (low = - W_bound , high = W_bound , size = filter_shape ),
82- dtype = theano .config .floatX
81+ rng .uniform (low = - W_bound , high = W_bound , size = filter_shape ),
82+ dtype = theano .config .floatX
8383 ),
84- borrow = True
84+ borrow = True
8585 )
8686
8787 # the bias is a 1D tensor -- one bias per output feature map
@@ -90,17 +90,17 @@ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
9090
9191 # convolve input feature maps with filters
9292 conv_out = conv .conv2d (
93- input = input ,
94- filters = self .W ,
95- filter_shape = filter_shape ,
96- image_shape = image_shape
93+ input = input ,
94+ filters = self .W ,
95+ filter_shape = filter_shape ,
96+ image_shape = image_shape
9797 )
9898
9999 # downsample each feature map individually, using maxpooling
100100 pooled_out = downsample .max_pool_2d (
101- input = conv_out ,
102- ds = poolsize ,
103- ignore_border = True
101+ input = conv_out ,
102+ ds = poolsize ,
103+ ignore_border = True
104104 )
105105
106106 # add the bias term. Since the bias is a vector (1D array), we first
@@ -141,9 +141,9 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
141141 test_set_x , test_set_y = datasets [2 ]
142142
143143 # compute number of minibatches for training, validation and testing
144- n_train_batches = train_set_x .get_value (borrow = True ).shape [0 ]
145- n_valid_batches = valid_set_x .get_value (borrow = True ).shape [0 ]
146- n_test_batches = test_set_x .get_value (borrow = True ).shape [0 ]
144+ n_train_batches = train_set_x .get_value (borrow = True ).shape [0 ]
145+ n_valid_batches = valid_set_x .get_value (borrow = True ).shape [0 ]
146+ n_test_batches = test_set_x .get_value (borrow = True ).shape [0 ]
147147 n_train_batches /= batch_size
148148 n_valid_batches /= batch_size
149149 n_test_batches /= batch_size
@@ -171,10 +171,10 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
171171 # 4D output tensor is thus of shape (batch_size,nkerns[0],12,12)
172172 layer0 = LeNetConvPoolLayer (
173173 rng ,
174- input = layer0_input ,
175- image_shape = (batch_size , 1 , 28 , 28 ),
176- filter_shape = (nkerns [0 ], 1 , 5 , 5 ),
177- poolsize = (2 , 2 )
174+ input = layer0_input ,
175+ image_shape = (batch_size , 1 , 28 , 28 ),
176+ filter_shape = (nkerns [0 ], 1 , 5 , 5 ),
177+ poolsize = (2 , 2 )
178178 )
179179
180180 # Construct the second convolutional pooling layer
@@ -183,10 +183,10 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
183183 # 4D output tensor is thus of shape (nkerns[0],nkerns[1],4,4)
184184 layer1 = LeNetConvPoolLayer (
185185 rng ,
186- input = layer0 .output ,
187- image_shape = (batch_size , nkerns [0 ], 12 , 12 ),
188- filter_shape = (nkerns [1 ], nkerns [0 ], 5 , 5 ),
189- poolsize = (2 , 2 )
186+ input = layer0 .output ,
187+ image_shape = (batch_size , nkerns [0 ], 12 , 12 ),
188+ filter_shape = (nkerns [1 ], nkerns [0 ], 5 , 5 ),
189+ poolsize = (2 , 2 )
190190 )
191191
192192 # the HiddenLayer being fully-connected, it operates on 2D matrices of
@@ -197,14 +197,14 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
197197 # construct a fully-connected sigmoidal layer
198198 layer2 = HiddenLayer (
199199 rng ,
200- input = layer2_input ,
201- n_in = nkerns [1 ] * 4 * 4 ,
202- n_out = 500 ,
203- activation = T .tanh
200+ input = layer2_input ,
201+ n_in = nkerns [1 ] * 4 * 4 ,
202+ n_out = 500 ,
203+ activation = T .tanh
204204 )
205205
206206 # classify the values of the fully-connected sigmoidal layer
207- layer3 = LogisticRegression (input = layer2 .output , n_in = 500 , n_out = 10 )
207+ layer3 = LogisticRegression (input = layer2 .output , n_in = 500 , n_out = 10 )
208208
209209 # the cost we minimize during training is the NLL of the model
210210 cost = layer3 .negative_log_likelihood (y )
@@ -213,7 +213,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
213213 test_model = theano .function (
214214 [index ],
215215 layer3 .errors (y ),
216- givens = {
216+ givens = {
217217 x : test_set_x [index * batch_size : (index + 1 ) * batch_size ],
218218 y : test_set_y [index * batch_size : (index + 1 ) * batch_size ]
219219 }
@@ -222,7 +222,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
222222 validate_model = theano .function (
223223 [index ],
224224 layer3 .errors (y ),
225- givens = {
225+ givens = {
226226 x : valid_set_x [index * batch_size : (index + 1 ) * batch_size ],
227227 y : valid_set_y [index * batch_size : (index + 1 ) * batch_size ]
228228 }
@@ -247,8 +247,8 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
247247 train_model = theano .function (
248248 [index ],
249249 cost ,
250- updates = updates ,
251- givens = {
250+ updates = updates ,
251+ givens = {
252252 x : train_set_x [index * batch_size : (index + 1 ) * batch_size ],
253253 y : train_set_y [index * batch_size : (index + 1 ) * batch_size ]
254254 }
0 commit comments