Skip to content

Commit 61c1dda

Browse files
committed
Merge pull request lisa-lab#19 from lisa-lab/fix_iter_index
[WIP] Fix off-by-one error in numbering of epochs in most tutorials
2 parents 42cc08b + 2dd943a commit 61c1dda

File tree

7 files changed

+18
-16
lines changed

7 files changed

+18
-16
lines changed

code/DBN.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
359359
for minibatch_index in xrange(n_train_batches):
360360

361361
minibatch_avg_cost = train_fn(minibatch_index)
362-
iter = epoch * n_train_batches + minibatch_index
362+
iter = (epoch - 1) * n_train_batches + minibatch_index
363363

364364
if (iter + 1) % validation_frequency == 0:
365365

code/SdA.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -395,9 +395,10 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
395395
epoch = 0
396396

397397
while (epoch < training_epochs) and (not done_looping):
398+
epoch = epoch + 1
398399
for minibatch_index in xrange(n_train_batches):
399400
minibatch_avg_cost = train_fn(minibatch_index)
400-
iter = epoch * n_train_batches + minibatch_index
401+
iter = (epoch - 1) * n_train_batches + minibatch_index
401402

402403
if (iter + 1) % validation_frequency == 0:
403404
validation_losses = validate_model()
@@ -429,7 +430,6 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
429430
if patience <= iter:
430431
done_looping = True
431432
break
432-
epoch = epoch + 1
433433

434434
end_time = time.clock()
435435
print(('Optimization complete with best validation score of %f %%,'

code/convolutional_mlp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
246246
epoch = epoch + 1
247247
for minibatch_index in xrange(n_train_batches):
248248

249-
iter = epoch * n_train_batches + minibatch_index
249+
iter = (epoch - 1) * n_train_batches + minibatch_index
250250

251251
if iter % 100 == 0:
252252
print 'training @ iter = ', iter
@@ -296,7 +296,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
296296
' ran for %.2fm' % ((end_time - start_time) / 60.))
297297

298298
if __name__ == '__main__':
299-
evaluate_lenet5()
299+
evaluate_lenet5(n_epochs=1, batch_size=1)
300300

301301

302302
def experiment(state, channel):

code/logistic_sgd.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
323323

324324
minibatch_avg_cost = train_model(minibatch_index)
325325
# iteration number
326-
iter = epoch * n_train_batches + minibatch_index
326+
iter = (epoch - 1) * n_train_batches + minibatch_index
327327

328328
if (iter + 1) % validation_frequency == 0:
329329
# compute zero-one loss on validation set

code/mlp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
309309

310310
minibatch_avg_cost = train_model(minibatch_index)
311311
# iteration number
312-
iter = epoch * n_train_batches + minibatch_index
312+
iter = (epoch - 1) * n_train_batches + minibatch_index
313313

314314
if (iter + 1) % validation_frequency == 0:
315315
# compute zero-one loss on validation set

doc/gettingstarted.txt

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -576,17 +576,19 @@ of a strategy based on a geometrically increasing amount of patience.
576576
done_looping = False
577577
epoch = 0
578578
while (epoch < n_epochs) and (not done_looping):
579+
# Report "1" for first epoch, "n_epochs" for last epoch
579580
epoch = epoch + 1
580581
for minibatch_index in xrange(n_train_batches):
581582

582583
d_loss_wrt_params = ... # compute gradient
583584
params -= learning_rate * d_loss_wrt_params # gradient descent
584585

585-
# iteration number
586-
iter = epoch * n_train_batches + minibatch_index
586+
# iteration number. We want it to start at 0.
587+
iter = (epoch - 1) * n_train_batches + minibatch_index
587588
# note that if we do `iter % validation_frequency` it will be
588-
# true for iter = 0 which we do not want
589-
if iter and iter % validation_frequency == 0:
589+
# true for iter = 0 which we do not want. We want it true for
590+
# iter = validation_frequency - 1.
591+
if (iter + 1) % validation_frequency == 0:
590592

591593
this_validation_loss = ... # compute zero-one loss on validation set
592594

doc/logreg.txt

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -396,12 +396,12 @@ The output one should expect is of the form :
396396
.. code-block:: bash
397397

398398
...
399+
epoch 72, minibatch 83/83, validation error 7.510417 %
400+
epoch 72, minibatch 83/83, test error of best model 7.510417 %
399401
epoch 73, minibatch 83/83, validation error 7.500000 %
400-
epoch 73, minibatch 83/83, test error of best model 7.489583 %
401-
epoch 74, minibatch 83/83, validation error 7.479167 %
402-
epoch 74, minibatch 83/83, test error of best model 7.489583 %
403-
Optimization complete with best validation score of 7.479167 %,with test performance 7.489583 %
404-
The code run for 75 epochs, with 1.936983 epochs/sec
402+
epoch 73, minibatch 83/83, test error of best model 7.489583 %
403+
Optimization complete with best validation score of 7.500000 %,with test performance 7.489583 %
404+
The code run for 74 epochs, with 1.936983 epochs/sec
405405

406406

407407
On an Intel(R) Core(TM)2 Duo CPU E8400 @ 3.00 Ghz the code runs with

0 commit comments

Comments
 (0)