Skip to content

Commit 2165101

Browse files
committed
pep8
1 parent e2184b2 commit 2165101

1 file changed

Lines changed: 20 additions & 20 deletions

File tree

doc/mlp.txt

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ layer on top.
106106
.. code-block:: python
107107

108108
class HiddenLayer(object):
109-
def __init__(self, rng, input, n_in, n_out, activation = T.tanh):
109+
def __init__(self, rng, input, n_in, n_out, activation=T.tanh):
110110
"""
111111
Typical hidden layer of a MLP: units are fully-connected and have
112112
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
@@ -162,17 +162,17 @@ both upward (activations flowing from inputs to outputs) and backward
162162
# should use 4 times larger initial weights for sigmoid
163163
# compared to tanh
164164
# We have no info for other function, so we use the same as tanh.
165-
W_values = numpy.asarray( rng.uniform(
166-
low = - numpy.sqrt(6./(n_in+n_out)),
167-
high = numpy.sqrt(6./(n_in+n_out)),
168-
size = (n_in, n_out)), dtype = theano.config.floatX)
165+
W_values = numpy.asarray(rng.uniform(
166+
low=-numpy.sqrt(6. / (n_in + n_out)),
167+
high=numpy.sqrt(6. / (n_in + n_out)),
168+
size=(n_in, n_out)), dtype=theano.config.floatX)
169169
if activation == theano.tensor.nnet.sigmoid:
170170
W_values *= 4
171171

172-
self.W = theano.shared(value = W_values, name ='W')
172+
self.W = theano.shared(value=W_values, name='W')
173173

174-
b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
175-
self.b = theano.shared(value= b_values, name ='b')
174+
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
175+
self.b = theano.shared(value=b_values, name='b')
176176

177177

178178
Note that we used a given non-linear function as the activation function of the hidden layer. By default this is ``tanh``, but in many cases we might want
@@ -239,9 +239,9 @@ the ``MLP`` class :
239239
# The logistic regression layer gets as input the hidden units
240240
# of the hidden layer
241241
self.logRegressionLayer = LogisticRegression(
242-
input = self.hiddenLayer.output,
243-
n_in = n_hidden,
244-
n_out = n_out)
242+
input=self.hiddenLayer.output,
243+
n_in=n_hidden,
244+
n_out=n_out)
245245

246246

247247
In this tutorial we will also use L1 and L2 regularization (see
@@ -257,8 +257,8 @@ norm of the weights :math:`W^{(1)}, W^{(2)}`.
257257

258258
# square of L2 norm ; one regularization option is to enforce
259259
# square of L2 norm to be small
260-
self.L2_sqr = (self.hiddenLayer.W**2).sum() \
261-
+ (self.logRegressionLayer.W**2).sum()
260+
self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \
261+
+ (self.logRegressionLayer.W ** 2).sum()
262262

263263
# negative log likelihood of the MLP is given by the negative
264264
# log likelihood of the output of the model, computed in the
@@ -312,22 +312,22 @@ at each step.
312312

313313
# specify how to update the parameters of the model as a dictionary
314314
updates = {}
315-
# given two list the zip A = [ a1,a2,a3,a4] and B = [b1,b2,b3,b4] of
315+
# given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
316316
# same length, zip generates a list C of same size, where each element
317317
# is a pair formed from the two lists :
318-
# C = [ (a1,b1), (a2,b2), (a3,b3) , (a4,b4) ]
318+
# C = [(a1, b1), (a2, b2), (a3, b3) , (a4, b4)]
319319
for param, gparam in zip(classifier.params, gparams):
320-
updates[param] = param - learning_rate*gparam
320+
updates[param] = param - learning_rate * gparam
321321

322322

323323
# compiling a Theano function `train_model` that returns the cost, but
324324
# in the same time updates the parameter of the model based on the rules
325325
# defined in `updates`
326-
train_model =theano.function( inputs = [index], outputs = cost,
327-
updates = updates,
326+
train_model = theano.function(inputs=[index], outputs=cost,
327+
updates=updates,
328328
givens={
329-
x:train_set_x[index*batch_size:(index+1)*batch_size],
330-
y:train_set_y[index*batch_size:(index+1)*batch_size]})
329+
x: train_set_x[index * batch_size:(index + 1) * batch_size],
330+
y: train_set_y[index * batch_size:(index + 1) * batch_size]})
331331

332332

333333

0 commit comments

Comments
 (0)