11"""
22"""
3+ from __future__ import print_function , division
34import os
45import sys
56import timeit
@@ -61,9 +62,12 @@ def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
6162 theano_rng = MRG_RandomStreams (numpy_rng .randint (2 ** 30 ))
6263
6364 # allocate symbolic variables for the data
64- self .x = T .matrix ('x' ) # the data is presented as rasterized images
65- self .y = T .ivector ('y' ) # the labels are presented as 1D vector
66- # of [int] labels
65+
66+ # the data is presented as rasterized images
67+ self .x = T .matrix ('x' )
68+
69+ # the labels are presented as 1D vector of [int] labels
70+ self .y = T .ivector ('y' )
6771 # end-snippet-1
6872 # The DBN is an MLP, for which all weights of intermediate
6973 # layers are shared with a different RBM. We will first
@@ -156,8 +160,6 @@ def pretraining_functions(self, train_set_x, batch_size, k):
156160 index = T .lscalar ('index' ) # index to a minibatch
157161 learning_rate = T .scalar ('lr' ) # learning rate to use
158162
159- # number of batches
160- n_batches = train_set_x .get_value (borrow = True ).shape [0 ] / batch_size
161163 # begining of a batch, given `index`
162164 batch_begin = index * batch_size
163165 # ending of a batch given `index`
@@ -211,9 +213,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
211213
212214 # compute number of minibatches for training, validation and testing
213215 n_valid_batches = valid_set_x .get_value (borrow = True ).shape [0 ]
214- n_valid_batches /= batch_size
216+ n_valid_batches // = batch_size
215217 n_test_batches = test_set_x .get_value (borrow = True ).shape [0 ]
216- n_test_batches /= batch_size
218+ n_test_batches // = batch_size
217219
218220 index = T .lscalar ('index' ) # index to a [mini]batch
219221
@@ -307,11 +309,11 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
307309 test_set_x , test_set_y = datasets [2 ]
308310
309311 # compute number of minibatches for training, validation and testing
310- n_train_batches = train_set_x .get_value (borrow = True ).shape [0 ] / batch_size
312+ n_train_batches = train_set_x .get_value (borrow = True ).shape [0 ] // batch_size
311313
312314 # numpy random generator
313315 numpy_rng = numpy .random .RandomState (123 )
314- print '... building the model'
316+ print ( '... building the model' )
315317 # construct the Deep Belief Network
316318 dbn = DBN (numpy_rng = numpy_rng , n_ins = 28 * 28 ,
317319 hidden_layers_sizes = [1000 , 1000 , 1000 ],
@@ -321,14 +323,14 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
321323 #########################
322324 # PRETRAINING THE MODEL #
323325 #########################
324- print '... getting the pretraining functions'
326+ print ( '... getting the pretraining functions' )
325327 pretraining_fns = dbn .pretraining_functions (train_set_x = train_set_x ,
326328 batch_size = batch_size ,
327329 k = k )
328330
329- print '... pre-training the model'
331+ print ( '... pre-training the model' )
330332 start_time = timeit .default_timer ()
331- ## Pre-train layer-wise
333+ # Pre-train layer-wise
332334 for i in range (dbn .n_layers ):
333335 # go through pretraining epochs
334336 for epoch in range (pretraining_epochs ):
@@ -337,38 +339,40 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
337339 for batch_index in range (n_train_batches ):
338340 c .append (pretraining_fns [i ](index = batch_index ,
339341 lr = pretrain_lr ))
340- print 'Pre-training layer %i, epoch %d, cost ' % (i , epoch ),
341- print numpy .mean (c )
342+ print ( 'Pre-training layer %i, epoch %d, cost ' % (i , epoch ), end = ' ' )
343+ print ( numpy .mean (c ) )
342344
343345 end_time = timeit .default_timer ()
344346 # end-snippet-2
345- print >> sys .stderr , ('The pretraining code for file ' +
346- os .path .split (__file__ )[1 ] +
347- ' ran for %.2fm' % ((end_time - start_time ) / 60. ))
347+ print ('The pretraining code for file ' + os .path .split (__file__ )[1 ] +
348+ ' ran for %.2fm' % ((end_time - start_time ) / 60. ), file = sys .stderr )
348349 ########################
349350 # FINETUNING THE MODEL #
350351 ########################
351352
352353 # get the training, validation and testing function for the model
353- print '... getting the finetuning functions'
354+ print ( '... getting the finetuning functions' )
354355 train_fn , validate_model , test_model = dbn .build_finetune_functions (
355356 datasets = datasets ,
356357 batch_size = batch_size ,
357358 learning_rate = finetune_lr
358359 )
359360
360- print '... finetuning the model'
361+ print ( '... finetuning the model' )
361362 # early-stopping parameters
362- patience = 4 * n_train_batches # look as this many examples regardless
363- patience_increase = 2. # wait this much longer when a new best is
364- # found
365- improvement_threshold = 0.995 # a relative improvement of this much is
366- # considered significant
363+
364+ # look as this many examples regardless
365+ patience = 4 * n_train_batches
366+
367+ # wait this much longer when a new best is found
368+ patience_increase = 2.
369+
370+ # a relative improvement of this much is considered significant
371+ improvement_threshold = 0.995
372+
373+ # go through this many minibatches before checking the network on
374+ # the validation set; in this case we check every epoch
367375 validation_frequency = min (n_train_batches , patience / 2 )
368- # go through this many
369- # minibatches before checking the network
370- # on the validation set; in this case we
371- # check every epoch
372376
373377 best_validation_loss = numpy .inf
374378 test_score = 0.
@@ -381,31 +385,27 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
381385 epoch = epoch + 1
382386 for minibatch_index in range (n_train_batches ):
383387
384- minibatch_avg_cost = train_fn (minibatch_index )
388+ train_fn (minibatch_index )
385389 iter = (epoch - 1 ) * n_train_batches + minibatch_index
386390
387391 if (iter + 1 ) % validation_frequency == 0 :
388392
389393 validation_losses = validate_model ()
390394 this_validation_loss = numpy .mean (validation_losses )
391- print (
392- 'epoch %i, minibatch %i/%i, validation error %f %%'
393- % (
394- epoch ,
395- minibatch_index + 1 ,
396- n_train_batches ,
397- this_validation_loss * 100.
395+ print ('epoch %i, minibatch %i/%i, validation error %f %%' % (
396+ epoch ,
397+ minibatch_index + 1 ,
398+ n_train_batches ,
399+ this_validation_loss * 100.
398400 )
399401 )
400402
401403 # if we got the best validation score until now
402404 if this_validation_loss < best_validation_loss :
403405
404- #improve patience if loss improvement is good enough
405- if (
406- this_validation_loss < best_validation_loss *
407- improvement_threshold
408- ):
406+ # improve patience if loss improvement is good enough
407+ if (this_validation_loss < best_validation_loss *
408+ improvement_threshold ):
409409 patience = max (patience , iter * patience_increase )
410410
411411 # save best validation score and iteration number
@@ -418,24 +418,19 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
418418 print ((' epoch %i, minibatch %i/%i, test error of '
419419 'best model %f %%' ) %
420420 (epoch , minibatch_index + 1 , n_train_batches ,
421- test_score * 100. ))
421+ test_score * 100. ))
422422
423423 if patience <= iter :
424424 done_looping = True
425425 break
426426
427427 end_time = timeit .default_timer ()
428- print (
429- (
430- 'Optimization complete with best validation score of %f %%, '
431- 'obtained at iteration %i, '
432- 'with test performance %f %%'
433- ) % (best_validation_loss * 100. , best_iter + 1 , test_score * 100. )
434- )
435- print >> sys .stderr , ('The fine tuning code for file ' +
436- os .path .split (__file__ )[1 ] +
437- ' ran for %.2fm' % ((end_time - start_time )
438- / 60. ))
428+ print (('Optimization complete with best validation score of %f %%, '
429+ 'obtained at iteration %i, '
430+ 'with test performance %f %%'
431+ ) % (best_validation_loss * 100. , best_iter + 1 , test_score * 100. ))
432+ print ('The fine tuning code for file ' + os .path .split (__file__ )[1 ] +
433+ ' ran for %.2fm' % ((end_time - start_time ) / 60. ), file = sys .stderr )
439434
440435
441436if __name__ == '__main__' :
0 commit comments