diff --git a/code/DBN.py b/code/DBN.py index b0cc0569..b54ac5bc 100644 --- a/code/DBN.py +++ b/code/DBN.py @@ -2,7 +2,7 @@ """ import os import sys -import time +import timeit import numpy @@ -327,7 +327,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100, k=k) print '... pre-training the model' - start_time = time.clock() + start_time = timeit.default_timer() ## Pre-train layer-wise for i in xrange(dbn.n_layers): # go through pretraining epochs @@ -340,7 +340,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100, print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch), print numpy.mean(c) - end_time = time.clock() + end_time = timeit.default_timer() # end-snippet-2 print >> sys.stderr, ('The pretraining code for file ' + os.path.split(__file__)[1] + @@ -372,7 +372,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100, best_validation_loss = numpy.inf test_score = 0. - start_time = time.clock() + start_time = timeit.default_timer() done_looping = False epoch = 0 @@ -424,7 +424,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100, done_looping = True break - end_time = time.clock() + end_time = timeit.default_timer() print( ( 'Optimization complete with best validation score of %f %%, ' diff --git a/code/SdA.py b/code/SdA.py index fafa73b5..82660e99 100644 --- a/code/SdA.py +++ b/code/SdA.py @@ -31,7 +31,7 @@ """ import os import sys -import time +import timeit import numpy @@ -379,7 +379,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15, batch_size=batch_size) print '... pre-training the model' - start_time = time.clock() + start_time = timeit.default_timer() ## Pre-train layer-wise corruption_levels = [.1, .2, .3] for i in xrange(sda.n_layers): @@ -394,7 +394,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15, print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch), print numpy.mean(c) - end_time = time.clock() + end_time = timeit.default_timer() print >> sys.stderr, ('The pretraining code for file ' + os.path.split(__file__)[1] + @@ -427,7 +427,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15, best_validation_loss = numpy.inf test_score = 0. - start_time = time.clock() + start_time = timeit.default_timer() done_looping = False epoch = 0 @@ -471,7 +471,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15, done_looping = True break - end_time = time.clock() + end_time = timeit.default_timer() print( ( 'Optimization complete with best validation score of %f %%, ' diff --git a/code/cA.py b/code/cA.py index c7ccd2b0..e26a1ddf 100644 --- a/code/cA.py +++ b/code/cA.py @@ -30,7 +30,7 @@ """ import os import sys -import time +import timeit import numpy @@ -276,7 +276,7 @@ def test_cA(learning_rate=0.01, training_epochs=20, } ) - start_time = time.clock() + start_time = timeit.default_timer() ############ # TRAINING # @@ -293,7 +293,7 @@ def test_cA(learning_rate=0.01, training_epochs=20, print 'Training epoch %d, reconstruction cost ' % epoch, numpy.mean( c_array[0]), ' jacobian norm ', numpy.mean(numpy.sqrt(c_array[1])) - end_time = time.clock() + end_time = timeit.default_timer() training_time = (end_time - start_time) diff --git a/code/convolutional_mlp.py b/code/convolutional_mlp.py index 0d88240d..b5278583 100644 --- a/code/convolutional_mlp.py +++ b/code/convolutional_mlp.py @@ -23,7 +23,7 @@ """ import os import sys -import time +import timeit import numpy @@ -274,7 +274,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200, best_validation_loss = numpy.inf best_iter = 0 test_score = 0. - start_time = time.clock() + start_time = timeit.default_timer() epoch = 0 done_looping = False @@ -326,7 +326,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200, done_looping = True break - end_time = time.clock() + end_time = timeit.default_timer() print('Optimization complete.') print('Best validation score of %f %% obtained at iteration %i, ' 'with test performance %f %%' % diff --git a/code/dA.py b/code/dA.py index 19457aac..8ea94e33 100644 --- a/code/dA.py +++ b/code/dA.py @@ -32,7 +32,7 @@ import os import sys -import time +import timeit import numpy @@ -321,7 +321,7 @@ def test_dA(learning_rate=0.1, training_epochs=15, } ) - start_time = time.clock() + start_time = timeit.default_timer() ############ # TRAINING # @@ -336,7 +336,7 @@ def test_dA(learning_rate=0.1, training_epochs=15, print 'Training epoch %d, cost ' % epoch, numpy.mean(c) - end_time = time.clock() + end_time = timeit.default_timer() training_time = (end_time - start_time) @@ -379,7 +379,7 @@ def test_dA(learning_rate=0.1, training_epochs=15, } ) - start_time = time.clock() + start_time = timeit.default_timer() ############ # TRAINING # @@ -394,7 +394,7 @@ def test_dA(learning_rate=0.1, training_epochs=15, print 'Training epoch %d, cost ' % epoch, numpy.mean(c) - end_time = time.clock() + end_time = timeit.default_timer() training_time = (end_time - start_time) diff --git a/code/logistic_cg.py b/code/logistic_cg.py index 05f562a1..4b4f2172 100644 --- a/code/logistic_cg.py +++ b/code/logistic_cg.py @@ -38,7 +38,7 @@ import os import sys -import time +import timeit import numpy @@ -275,7 +275,7 @@ def callback(theta_value): # using scipy conjugate gradient optimizer import scipy.optimize print ("Optimizing using scipy.optimize.fmin_cg...") - start_time = time.clock() + start_time = timeit.default_timer() best_w_b = scipy.optimize.fmin_cg( f=train_fn, x0=numpy.zeros((n_in + 1) * n_out, dtype=x.dtype), @@ -284,7 +284,7 @@ def callback(theta_value): disp=0, maxiter=n_epochs ) - end_time = time.clock() + end_time = timeit.default_timer() print( ( 'Optimization complete with best validation score of %f %%, with ' diff --git a/code/logistic_sgd.py b/code/logistic_sgd.py index 599f5658..d197b960 100644 --- a/code/logistic_sgd.py +++ b/code/logistic_sgd.py @@ -38,7 +38,7 @@ import gzip import os import sys -import time +import timeit import numpy @@ -360,7 +360,7 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000, best_validation_loss = numpy.inf test_score = 0. - start_time = time.clock() + start_time = timeit.default_timer() done_looping = False epoch = 0 @@ -419,7 +419,7 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000, done_looping = True break - end_time = time.clock() + end_time = timeit.default_timer() print( ( 'Optimization complete with best validation score of %f %%,' diff --git a/code/lstm.py b/code/lstm.py index cc4ab748..b64970fb 100644 --- a/code/lstm.py +++ b/code/lstm.py @@ -543,7 +543,7 @@ def train_lstm( uidx = 0 # the number of update done estop = False # early stop - start_time = time.clock() + start_time = time.time() try: for eidx in xrange(max_epochs): n_samples = 0 @@ -622,7 +622,7 @@ def train_lstm( except KeyboardInterrupt: print "Training interupted" - end_time = time.clock() + end_time = time.time() if best_p is not None: zipp(best_p, tparams) else: diff --git a/code/mlp.py b/code/mlp.py index e4b95ea8..77d8002a 100644 --- a/code/mlp.py +++ b/code/mlp.py @@ -23,7 +23,7 @@ import os import sys -import time +import timeit import numpy @@ -336,7 +336,7 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000, best_validation_loss = numpy.inf best_iter = 0 test_score = 0. - start_time = time.clock() + start_time = timeit.default_timer() epoch = 0 done_looping = False @@ -391,7 +391,7 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000, done_looping = True break - end_time = time.clock() + end_time = timeit.default_timer() print(('Optimization complete. Best validation score of %f %% ' 'obtained at iteration %i, with test performance %f %%') % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) diff --git a/code/rbm.py b/code/rbm.py index 2c821fc9..1ba4c86d 100644 --- a/code/rbm.py +++ b/code/rbm.py @@ -4,7 +4,7 @@ contain hidden variables. Restricted Boltzmann Machines further restrict BMs to those without visible-visible and hidden-hidden connections. """ -import time +import timeit try: import PIL.Image as Image @@ -428,7 +428,7 @@ def test_rbm(learning_rate=0.1, training_epochs=15, ) plotting_time = 0. - start_time = time.clock() + start_time = timeit.default_timer() # go through training epochs for epoch in xrange(training_epochs): @@ -441,7 +441,7 @@ def test_rbm(learning_rate=0.1, training_epochs=15, print 'Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost) # Plot filters after each training epoch - plotting_start = time.clock() + plotting_start = timeit.default_timer() # Construct image from the weight matrix image = Image.fromarray( tile_raster_images( @@ -452,10 +452,10 @@ def test_rbm(learning_rate=0.1, training_epochs=15, ) ) image.save('filters_at_epoch_%i.png' % epoch) - plotting_stop = time.clock() + plotting_stop = timeit.default_timer() plotting_time += (plotting_stop - plotting_start) - end_time = time.clock() + end_time = timeit.default_timer() pretraining_time = (end_time - start_time) - plotting_time diff --git a/code/rnnslu.py b/code/rnnslu.py index 65363688..fad14db5 100644 --- a/code/rnnslu.py +++ b/code/rnnslu.py @@ -8,7 +8,7 @@ import stat import subprocess import sys -import time +import timeit import numpy @@ -318,13 +318,13 @@ def main(param=None): shuffle([train_lex, train_ne, train_y], param['seed']) param['ce'] = e - tic = time.time() + tic = timeit.default_timer() for i, (x, y) in enumerate(zip(train_lex, train_y)): rnn.train(x, y, param['win'], param['clr']) print '[learning] epoch %i >> %2.2f%%' % ( e, (i + 1) * 100. / nsentences), - print 'completed in %.2f (sec) <<\r' % (time.time() - tic), + print 'completed in %.2f (sec) <<\r' % (timeit.default_timer() - tic), sys.stdout.flush() # evaluation // back into the real world : idx -> words