Skip to content

Commit b2a349c

Browse files
committed
Merge pull request lisa-lab#13 from nouiz/master
Try to make travis pass.
2 parents 07a4214 + cf8ba60 commit b2a349c

File tree

3 files changed

+39
-14
lines changed

3 files changed

+39
-14
lines changed

.travis.yml

Lines changed: 32 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,47 @@
11
# After changing this file, check it on:
22
# http://lint.travis-ci.org/
33

4-
language: python
5-
python:
6-
- "2.5"
4+
#We can't get scipy installed with the python language
5+
#So we will use the system python from the c language.
6+
language: c
7+
#language: python
8+
#python:
9+
# - "2.5"
710
# - "2.7"
811
# - "3.2"
912
# command to install dependencies
1013
before_install:
1114
#zlib1g-dev is needed to allow PIL to uncompress the dataset.
12-
- sudo apt-get install -qq libatlas3gf-base libatlas-dev zlib1g-dev zip unzip zlibc libzip-dev libjpeg8 libjpeg62-dev libfreetype6 libfreetype6-dev
15+
- sudo apt-get install -qq libatlas3gf-base libatlas-dev zlib1g-dev zip unzip zlibc libzip-dev libjpeg8 libjpeg62-dev libfreetype6 libfreetype6-dev python-numpy python-scipy python-pip python-nose python-yaml pyflakes python-imaging
1316

1417
install:
15-
- "pip install -q numpy --use-mirrors"
18+
# - "pip install -q numpy --use-mirrors"
1619
# Use Pillow instead of PIL as it is better packaged
17-
- "pip install -q Pillow --use-mirrors"
20+
# - "pip install -q Pillow --use-mirrors"
1821
#If we don't install numpy before SciPy 0.10.1, the SciPy installations fails.
1922
# - "pip install -q scipy --use-mirrors"
20-
- "pip install --no-deps git+git://github.com/Theano/Theano.git"
23+
- "sudo pip install --no-deps git+git://github.com/Theano/Theano.git"
24+
- "sudo pip install hg+http://hg.assembla.com/pylearn"
25+
26+
env:
27+
- PART="test.py:test_logistic_sgd test.py:test_logistic_cg test.py:test_mlp"
28+
- PART="test.py:test_convolutional_mlp test.py:test_dA"
29+
- PART="test.py:test_SdA"
30+
- PART="test.py:test_dbn"
31+
- PART="test.py:test_rbm"
32+
- PART="-e test.py"
33+
34+
#569.882s #10 code.test.test_rbm OK
35+
#298.992s #9 code.test.test_dbn OK
36+
#268.901s #8 code.test.test_SdA OK
37+
#67.292s #7 code.test.test_dA OK
38+
#27.485s #5 code.test.test_mlp OK
39+
#26.204s #6 code.test.test_convolutional_mlp OK
40+
#14.676s #4 code.test.test_logistic_cg OK
41+
#10.66s #3 code.test.test_logistic_sgd OK
42+
#5.795s #1 code.mcrbm.test_hmc.test_hmc OK
43+
#0.0s #2 code.mcrbm.test_mcrbm.test_reproduce_ranzato_hinton_2010 FAILED TEST
44+
2145
script:
2246
- cd data
2347
- ./download.sh
@@ -27,5 +51,5 @@ script:
2751
- ls
2852
- export THEANO_FLAGS=warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise
2953
- python --version
30-
- nosetests test.py
54+
- nosetests $PART
3155

code/mcrbm/__init__.py

Whitespace-only changes.

code/mlp.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def __init__(self, rng, input, n_in, n_out, W=None, b=None,
6262
6363
:type activation: theano.Op or function
6464
:param activation: Non linearity to be applied in the hidden
65-
layer
65+
layer
6666
"""
6767
self.input = input
6868

@@ -174,7 +174,7 @@ def __init__(self, rng, input, n_in, n_hidden, n_out):
174174

175175

176176
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
177-
dataset='../data/mnist.pkl.gz', batch_size=20):
177+
dataset='../data/mnist.pkl.gz', batch_size=20, n_hidden=500):
178178
"""
179179
Demonstrate stochastic gradient descent optimization for a multilayer
180180
perceptron
@@ -219,15 +219,16 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
219219
print '... building the model'
220220

221221
# allocate symbolic variables for the data
222-
index = T.lscalar() # index to a [mini]batch
222+
index = T.lscalar() # index to a [mini]batch
223223
x = T.matrix('x') # the data is presented as rasterized images
224224
y = T.ivector('y') # the labels are presented as 1D vector of
225225
# [int] labels
226226

227227
rng = numpy.random.RandomState(1234)
228228

229229
# construct the MLP class
230-
classifier = MLP(rng=rng, input=x, n_in=28 * 28, n_hidden=500, n_out=10)
230+
classifier = MLP(rng=rng, input=x, n_in=28 * 28,
231+
n_hidden=n_hidden, n_out=10)
231232

232233
# the cost we minimize during training is the negative log likelihood of
233234
# the model plus the regularization terms (L1 and L2); cost is expressed
@@ -259,10 +260,10 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
259260

260261
# specify how to update the parameters of the model as a dictionary
261262
updates = {}
262-
# given two list the zip A = [ a1,a2,a3,a4] and B = [b1,b2,b3,b4] of
263+
# given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
263264
# same length, zip generates a list C of same size, where each element
264265
# is a pair formed from the two lists :
265-
# C = [ (a1,b1), (a2,b2), (a3,b3) , (a4,b4) ]
266+
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
266267
for param, gparam in zip(classifier.params, gparams):
267268
updates[param] = param - learning_rate * gparam
268269

0 commit comments

Comments
 (0)