Skip to content

Commit b650b0b

Browse files
Joan Figuerola HurtadoJoan Figuerola Hurtado
authored andcommitted
Trying smaller images
1 parent 38de683 commit b650b0b

4 files changed

Lines changed: 769 additions & 27 deletions

File tree

code/convolutional_mlp_test.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
116116

117117
def evaluate_lenet5(learning_rate=0.1, n_epochs=100,
118118
dataset='mnist.pkl.gz',
119-
nkerns=[(96 / 2) , (256 / 2)], batch_size=100):
119+
nkerns=[(96 / 4) , (256 / 4)], batch_size=500):
120120

121121
""" Demonstrates lenet on MNIST dataset
122122
@@ -176,31 +176,32 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=100,
176176
# (28, 28) is the size of MNIST images.
177177
#layer0_input = x.reshape((batch_size, 1, 28, 28))
178178
#layer0_input = x.reshape((batch_size, 1, 256, 256))
179-
layer0_input = x.reshape((batch_size, 3, 256, 256))
179+
#layer0_input = x.reshape((batch_size, 3, 256, 256))
180+
layer0_input = x.reshape((batch_size, 3, 128, 128))
180181

181182
# Construct the first convolutional pooling layer:
182-
# filtering reduces the image size to (256-11+1 , 256-11+1) = (246, 246)
183-
# maxpooling reduces this further to (246/4, 246/4) = (61, 61)
184-
# 4D output tensor is thus of shape (batch_size, nkerns[0], 61, 61)
183+
# filtering reduces the image size to (128-11+1 , 128-11+1) = (118, 118)
184+
# maxpooling reduces this further to (128/4, 128/4) = (29, 29)
185+
# 4D output tensor is thus of shape (batch_size, nkerns[0], 29, 29)
185186

186187
layer0 = LeNetConvPoolLayer(
187188
rng,
188189
input=layer0_input,
189-
image_shape=(batch_size, 3, 256, 256),
190+
image_shape=(batch_size, 3, 128, 128),
190191
filter_shape=(nkerns[0], 3, 11, 11),
191192
poolsize=(4, 4)
192193
)
193194

194195
# Construct the second convolutional pooling layer
195-
# filtering reduces the image size to (61-5+1, 61-5+1) = (57, 57)
196-
# maxpooling reduces this further to (57/4, 57/4) = (14, 14)
197-
# 4D output tensor is thus of shape (batch_size, nkerns[1], 14, 14)
196+
# filtering reduces the image size to (29-5+1, 29-5+1) = (25, 25)
197+
# maxpooling reduces this further to (25/2, 25/2) = (12, 12)
198+
# 4D output tensor is thus of shape (batch_size, nkerns[1], 12, 12)
198199
layer1 = LeNetConvPoolLayer(
199200
rng,
200201
input=layer0.output,
201-
image_shape=(batch_size, nkerns[0], 61, 61),
202+
image_shape=(batch_size, nkerns[0], 29, 29),
202203
filter_shape=(nkerns[1], nkerns[0], 5, 5),
203-
poolsize=(4, 4)
204+
poolsize=(2, 2)
204205
)
205206

206207
# Construct the third convolutional pooling layer
@@ -227,15 +228,15 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=100,
227228
layer3 = HiddenLayer(
228229
rng,
229230
input=layer2_input,
230-
n_in=nkerns[1] * 14 * 14,
231-
n_out=100,
231+
n_in=nkerns[1] * 12 * 12,
232+
n_out=500,
232233
activation=T.tanh
233234
)
234235

235236
# classify the values of the fully-connected sigmoidal layer
236237
#layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
237238
#layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=2)
238-
layer4 = LogisticRegression(input=layer3.output, n_in=100, n_out=2)
239+
layer4 = LogisticRegression(input=layer3.output, n_in=500, n_out=2)
239240

240241
# the cost we minimize during training is the NLL of the model
241242
#cost = layer3.negative_log_likelihood(y)

0 commit comments

Comments
 (0)