@@ -184,25 +184,25 @@ one of Figure 1. The input consists of 3 features maps (an RGB color image) of s
184184 rng = numpy.random.RandomState(23455)
185185
186186 # instantiate 4D tensor for input
187- input = T.tensor4(name = 'input')
187+ input = T.tensor4(name= 'input')
188188
189189 # initialize shared variable for weights.
190190 w_shp = (2, 3, 9, 9)
191- w_bound = numpy.sqrt(3 * 9 * 9)
191+ w_bound = numpy.sqrt(3 * 9 * 9)
192192 W = theano.shared( numpy.asarray(
193193 rng.uniform(
194194 low=-1.0 / w_bound,
195195 high=1.0 / w_bound,
196196 size=w_shp),
197- dtype=input.dtype),name ='W')
197+ dtype=input.dtype), name ='W')
198198
199199 # initialize shared variable for bias (1D tensor) with random values
200200 # IMPORTANT: biases are usually initialized to zero. However in this
201201 # particular application, we simply apply the convolutional layer to
202202 # an image without learning the parameters. We therefore initialize
203203 # them to random values to "simulate" learning.
204204 b_shp = (2,)
205- b = theano.shared( numpy.asarray(
205+ b = theano.shared(numpy.asarray(
206206 rng.uniform(low=-.5, high=.5, size=b_shp),
207207 dtype=input.dtype), name ='b')
208208
@@ -246,19 +246,19 @@ Let's have a little bit of fun with this...
246246
247247 # open random image of dimensions 639x516
248248 img = Image.open(open('images/3wolfmoon.jpg'))
249- img = numpy.asarray(img, dtype='float64')/ 256.
249+ img = numpy.asarray(img, dtype='float64') / 256.
250250
251- # put image in 4D tensor of shape (1,3, height,width)
252- img_ = img.swapaxes(0,2).swapaxes(1,2).reshape(1,3, 639,516)
251+ # put image in 4D tensor of shape (1, 3, height, width)
252+ img_ = img.swapaxes(0, 2).swapaxes(1, 2).reshape(1, 3, 639, 516)
253253 filtered_img = f(img_)
254254
255255 # plot original image and first and second components of output
256- pylab.subplot(1,3, 1); pylab.axis('off'); pylab.imshow(img)
256+ pylab.subplot(1, 3, 1); pylab.axis('off'); pylab.imshow(img)
257257 pylab.gray();
258258 # recall that the convOp output (filtered image) is actually a "minibatch",
259259 # of size 1 here, so we take index 0 in the first dimension:
260- pylab.subplot(1,3, 2); pylab.axis('off'); pylab.imshow(filtered_img[0,0,:, :])
261- pylab.subplot(1,3, 3); pylab.axis('off'); pylab.imshow(filtered_img[0,1,:, :])
260+ pylab.subplot(1, 3, 2); pylab.axis('off'); pylab.imshow(filtered_img[0, 0, :, :])
261+ pylab.subplot(1, 3, 3); pylab.axis('off'); pylab.imshow(filtered_img[0, 1, :, :])
262262 pylab.show()
263263
264264
@@ -309,44 +309,44 @@ An example is worth a thousand words:
309309 from theano.tensor.signal import downsample
310310
311311 input = T.dtensor4('input')
312- maxpool_shape = (2,2)
312+ maxpool_shape = (2, 2)
313313 pool_out = downsample.max_pool_2d(input, maxpool_shape, ignore_border=True)
314314 f = theano.function([input],pool_out)
315315
316- invals = numpy.random.RandomState(1).rand(3,2,5, 5)
316+ invals = numpy.random.RandomState(1).rand(3, 2, 5, 5)
317317 print 'With ignore_border set to True:'
318- print 'invals[0,0,:, :] =\n', invals[0,0,:, :]
319- print 'output[0,0,:, :] =\n', f(invals)[0,0,:, :]
318+ print 'invals[0, 0, :, :] =\n', invals[0, 0, :, :]
319+ print 'output[0, 0, :, :] =\n', f(invals)[0, 0, :, :]
320320
321321 pool_out = downsample.max_pool_2d(input, maxpool_shape, ignore_border=False)
322322 f = theano.function([input],pool_out)
323323 print 'With ignore_border set to False:'
324- print 'invals[1,0,:, :] =\n ', invals[1,0,:, :]
325- print 'output[1,0,:, :] =\n ', f(invals)[1,0,:, :]
324+ print 'invals[1, 0, :, :] =\n ', invals[1, 0, :, :]
325+ print 'output[1, 0, :, :] =\n ', f(invals)[1, 0, :, :]
326326
327327This should generate the following output:
328328
329329.. code-block:: bash
330330
331331 With ignore_border set to True:
332- invals[0,0,:, :] =
332+ invals[0, 0, :, :] =
333333 [[ 4.17022005e-01 7.20324493e-01 1.14374817e-04 3.02332573e-01 1.46755891e-01]
334334 [ 9.23385948e-02 1.86260211e-01 3.45560727e-01 3.96767474e-01 5.38816734e-01]
335335 [ 4.19194514e-01 6.85219500e-01 2.04452250e-01 8.78117436e-01 2.73875932e-02]
336336 [ 6.70467510e-01 4.17304802e-01 5.58689828e-01 1.40386939e-01 1.98101489e-01]
337337 [ 8.00744569e-01 9.68261576e-01 3.13424178e-01 6.92322616e-01 8.76389152e-01]]
338- output[0,0,:, :] =
338+ output[0, 0, :, :] =
339339 [[ 0.72032449 0.39676747]
340340 [ 0.6852195 0.87811744]]
341341
342342 With ignore_border set to False:
343- invals[1,0,:, :] =
343+ invals[1, 0, :, :] =
344344 [[ 0.01936696 0.67883553 0.21162812 0.26554666 0.49157316]
345345 [ 0.05336255 0.57411761 0.14672857 0.58930554 0.69975836]
346346 [ 0.10233443 0.41405599 0.69440016 0.41417927 0.04995346]
347347 [ 0.53589641 0.66379465 0.51488911 0.94459476 0.58655504]
348348 [ 0.90340192 0.1374747 0.13927635 0.80739129 0.39767684]]
349- output[1,0,:, :] =
349+ output[1, 0, :, :] =
350350 [[ 0.67883553 0.58930554 0.69975836]
351351 [ 0.66379465 0.94459476 0.58655504]
352352 [ 0.90340192 0.80739129 0.39767684]]
@@ -389,7 +389,7 @@ layer.
389389
390390 class LeNetConvPoolLayer(object):
391391
392- def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2,2)):
392+ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
393393 """
394394 Allocate a LeNetConvPoolLayer with shared variable internal parameters.
395395
@@ -410,21 +410,21 @@ layer.
410410 :type poolsize: tuple or list of length 2
411411 :param poolsize: the downsampling (pooling) factor (#rows,#cols)
412412 """
413- assert image_shape[1]== filter_shape[1]
413+ assert image_shape[1] == filter_shape[1]
414414 self.input = input
415415
416416 # initialize weight values: the fan-in of each hidden neuron is
417417 # restricted by the size of the receptive fields.
418418 fan_in = numpy.prod(filter_shape[1:])
419- W_values = numpy.asarray( rng.uniform( \
420- low = -numpy.sqrt(3./fan_in), \
421- high = numpy.sqrt(3./fan_in), \
422- size = filter_shape), dtype = theano.config.floatX)
423- self.W = theano.shared(value = W_values, name = 'W')
419+ W_values = numpy.asarray(rng.uniform(
420+ low= -numpy.sqrt(3./fan_in),
421+ high= numpy.sqrt(3./fan_in),
422+ size= filter_shape), dtype= theano.config.floatX)
423+ self.W = theano.shared(value= W_values, name= 'W')
424424
425425 # the bias is a 1D tensor -- one bias per output feature map
426- b_values = numpy.zeros((filter_shape[0],), dtype= theano.config.floatX)
427- self.b = theano.shared(value= b_values, name = 'b')
426+ b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
427+ self.b = theano.shared(value=b_values, name= 'b')
428428
429429 # convolve input feature maps with filters
430430 conv_out = conv.conv2d(input, self.W,
@@ -434,7 +434,7 @@ layer.
434434 pooled_out = downsample.max_pool_2d(conv_out, poolsize, ignore_border=True)
435435
436436 # add the bias term. Since the bias is a vector (1D array), we first
437- # reshape it to a tensor of shape (1,n_filters,1, 1). Each bias will thus
437+ # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will thus
438438 # be broadcasted across mini-batches and feature map width & height
439439 self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
440440
@@ -454,8 +454,8 @@ instantiate the network as follows.
454454 learning_rate = 0.1
455455 rng = numpy.random.RandomState(23455)
456456
457- ishape = (28,28) # this is the size of MNIST images
458- batch_size = 20 # sized of the minibatch
457+ ishape = (28, 28) # this is the size of MNIST images
458+ batch_size = 20 # sized of the minibatch
459459
460460 # allocate symbolic variables for the data
461461 x = theano.floatX.xmatrix(theano.config.floatX) # rasterized images
@@ -474,26 +474,26 @@ instantiate the network as follows.
474474 # maxpooling reduces this further to (24/2,24/2) = (12,12)
475475 # 4D output tensor is thus of shape (20,20,12,12)
476476 layer0 = LeNetConvPoolLayer(rng, input=layer0_input,
477- image_shape=(batch_size,1, 28,28),
478- filter_shape=(20,1,5, 5), poolsize=(2,2))
477+ image_shape=(batch_size, 1, 28, 28),
478+ filter_shape=(20, 1, 5, 5), poolsize=(2, 2))
479479
480480 # Construct the second convolutional pooling layer
481- # filtering reduces the image size to (12-5+1,12-5+ 1)=(8,8)
482- # maxpooling reduces this further to (8/2,8/2) = (4,4)
481+ # filtering reduces the image size to (12 - 5 + 1, 12 - 5 + 1)=(8, 8)
482+ # maxpooling reduces this further to (8/2,8/2) = (4, 4)
483483 # 4D output tensor is thus of shape (20,50,4,4)
484484 layer1 = LeNetConvPoolLayer(rng, input=layer0.output,
485- image_shape=(batch_size,20,12,12),
486- filter_shape=(50,20,5, 5), poolsize=(2,2))
485+ image_shape=(batch_size, 20, 12, 12),
486+ filter_shape=(50, 20, 5, 5), poolsize=(2, 2))
487487
488488 # the SigmoidalLayer being fully-connected, it operates on 2D matrices of
489489 # shape (batch_size,num_pixels) (i.e matrix of rasterized images).
490- # This will generate a matrix of shape (20,32*4* 4) = (20,512)
490+ # This will generate a matrix of shape (20, 32 * 4 * 4) = (20, 512)
491491 layer2_input = layer1.output.flatten(2)
492492
493493 # construct a fully-connected sigmoidal layer
494494 layer2 = HiddenLayer(rng, input=layer2_input,
495- n_in=50*4* 4, n_out=500,
496- activation = T.tanh )
495+ n_in=50 * 4 * 4, n_out=500,
496+ activation= T.tanh )
497497
498498 # classify the values of the fully-connected sigmoidal layer
499499 layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
@@ -503,10 +503,10 @@ instantiate the network as follows.
503503 cost = layer3.negative_log_likelihood(y)
504504
505505 # create a function to compute the mistakes that are made by the model
506- test_model = theano.function([x,y], layer3.errors(y))
506+ test_model = theano.function([x, y], layer3.errors(y))
507507
508508 # create a list of all model parameters to be fit by gradient descent
509- params = layer3.params+ layer2.params+ layer1.params + layer0.params
509+ params = layer3.params + layer2.params + layer1.params + layer0.params
510510
511511 # create a list of gradients for all model parameters
512512 grads = T.grad(cost, params)
@@ -520,8 +520,8 @@ instantiate the network as follows.
520520 updates[param_i] = param_i - learning_rate * grad_i
521521 train_model = theano.function([index], cost, updates = updates,
522522 givens={
523- x:train_set_x[index* batch_size:(index+1)* batch_size],
524- y:train_set_y[index* batch_size:(index+1)* batch_size]})
523+ x: train_set_x[index * batch_size: (index + 1) * batch_size],
524+ y: train_set_y[index * batch_size: (index + 1) * batch_size]})
525525
526526
527527
0 commit comments