Skip to content

Commit f817e2e

Browse files
committed
merge
2 parents 8213e51 + 4b07e77 commit f817e2e

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

docs/mkdocs.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ pages:
3030
- Advanced Activations Layers: layers/advanced_activations.md
3131
- Normalization Layers: layers/normalization.md
3232
- Embedding Layers: layers/embeddings.md
33+
- Noise layers: layers/noise.md
3334
- Containers: layers/containers.md
3435
- Preprocessing:
3536
- Sequence Preprocessing: preprocessing/sequence.md

docs/sources/layers/noise.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,4 +36,3 @@ The Gaussian noise is only used at training time.
3636

3737
- __p__: float, drop probability as with Dropout.
3838

39-
---

examples/mnist_irnn.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from keras.utils import np_utils
1313

1414
'''
15-
This is a reproduction of the IRNN experiment
15+
This is a reproduction of the IRNN experiment
1616
with pixel-by-pixel sequential MNIST in
1717
"A Simple Way to Initialize Recurrent Networks of Rectified Linear Units "
1818
by Quoc V. Le, Navdeep Jaitly, Geoffrey E. Hinton
@@ -23,8 +23,8 @@
2323
Optimizer is replaced with RMSprop which yields more stable and steady
2424
improvement.
2525
26-
0.80 train/test accuracy and 0.55 train/test loss after 70 epochs
27-
(it's still underfitting at that point, though).
26+
Reaches 0.93 train/test accuracy after 900 epochs (which roughly corresponds
27+
to 1687500 steps in the original paper.)
2828
'''
2929

3030
batch_size = 32
@@ -64,7 +64,7 @@
6464
rmsprop = RMSprop(lr=learning_rate)
6565
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)
6666

67-
model.fit(X_train, Y_train, batch_size=16, nb_epoch=nb_epochs,
67+
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
6868
show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
6969

7070
scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
@@ -79,9 +79,9 @@
7979
rmsprop = RMSprop(lr=learning_rate)
8080
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)
8181

82-
model.fit(X_train, Y_train, batch_size=16, nb_epoch=nb_epochs,
82+
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
8383
show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
8484

8585
scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
8686
print('LSTM test score:', scores[0])
87-
print('LSTM test accuracy:', scores[1])
87+
print('LSTM test accuracy:', scores[1])

0 commit comments

Comments
 (0)