3232from theano .tensor .signal import downsample
3333from theano .tensor .nnet import conv
3434
35- from logistic_sgd import LogisticRegression , load_data
35+ from logistic_sgd_test import LogisticRegression , load_data
3636from mlp import HiddenLayer
3737
3838
@@ -136,9 +136,8 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
136136 rng = numpy .random .RandomState (23455 )
137137
138138 datasets = load_data (dataset )
139- print datasets
140139
141- """ train_set_x, train_set_y = datasets[0]
140+ train_set_x , train_set_y = datasets [0 ]
142141 valid_set_x , valid_set_y = datasets [1 ]
143142 test_set_x , test_set_y = datasets [2 ]
144143
@@ -166,7 +165,8 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
166165 # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
167166 # to a 4D tensor, compatible with our LeNetConvPoolLayer
168167 # (28, 28) is the size of MNIST images.
169- layer0_input = x.reshape((batch_size, 1, 28, 28))
168+ #layer0_input = x.reshape((batch_size, 1, 28, 28))
169+ layer0_input = x .reshape ((batch_size , 1 , 256 , 256 ))
170170
171171 # Construct the first convolutional pooling layer:
172172 # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
@@ -175,10 +175,17 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
175175 layer0 = LeNetConvPoolLayer (
176176 rng ,
177177 input = layer0_input ,
178- image_shape=(batch_size, 1, 28, 28 ),
178+ image_shape = (batch_size , 1 , 256 , 256 ),
179179 filter_shape = (nkerns [0 ], 1 , 5 , 5 ),
180180 poolsize = (2 , 2 )
181181 )
182+ # layer0 = LeNetConvPoolLayer(
183+ # rng,
184+ # input=layer0_input,
185+ # image_shape=(batch_size, 1, 28, 28),
186+ # filter_shape=(nkerns[0], 1, 5, 5),
187+ # poolsize=(2, 2)
188+ # )
182189
183190 # Construct the second convolutional pooling layer
184191 # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
@@ -187,10 +194,25 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
187194 layer1 = LeNetConvPoolLayer (
188195 rng ,
189196 input = layer0 .output ,
190- image_shape=(batch_size, nkerns[0], 12, 12 ),
197+ image_shape = (batch_size , nkerns [0 ], 126 , 126 ),
191198 filter_shape = (nkerns [1 ], nkerns [0 ], 5 , 5 ),
192199 poolsize = (2 , 2 )
193200 )
201+ # layer1 = LeNetConvPoolLayer(
202+ # rng,
203+ # input=layer0.output,
204+ # image_shape=(batch_size, nkerns[0], 12, 12),
205+ # filter_shape=(nkerns[1], nkerns[0], 5, 5),
206+ # poolsize=(2, 2)
207+ # )
208+
209+ # layer1 = LeNetConvPoolLayer(
210+ # rng,
211+ # input=layer0.output,
212+ # image_shape=(batch_size, nkerns[0], 126, 126),
213+ # filter_shape=(nkerns[1], nkerns[0], 5, 5),
214+ # poolsize=(2, 2)
215+ # )
194216
195217 # the HiddenLayer being fully-connected, it operates on 2D matrices of
196218 # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
@@ -202,13 +224,22 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
202224 layer2 = HiddenLayer (
203225 rng ,
204226 input = layer2_input ,
205- n_in=nkerns[1] * 4 * 4 ,
227+ n_in = nkerns [1 ] * 61 * 61 ,
206228 n_out = 500 ,
207229 activation = T .tanh
208230 )
209231
232+ # layer2 = HiddenLayer(
233+ # rng,
234+ # input=layer2_input,
235+ # n_in=nkerns[1] * 4 * 4,
236+ # n_out=500,
237+ # activation=T.tanh
238+ # )
239+
210240 # classify the values of the fully-connected sigmoidal layer
211- layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
241+ #layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
242+ layer3 = LogisticRegression (input = layer2 .output , n_in = 500 , n_out = 2 )
212243
213244 # the cost we minimize during training is the NLL of the model
214245 cost = layer3 .negative_log_likelihood (y )
@@ -338,7 +369,6 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
338369 print >> sys .stderr , ('The code for file ' +
339370 os .path .split (__file__ )[1 ] +
340371 ' ran for %.2fm' % ((end_time - start_time ) / 60. ))
341- """
342372if __name__ == '__main__' :
343373 evaluate_lenet5 ()
344374
0 commit comments