Skip to content

Commit cee690d

Browse files
committed
Fix a bunch of flake8/pep8 errors
1 parent 70e0f7d commit cee690d

14 files changed

Lines changed: 101 additions & 119 deletions

File tree

code/DBN.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
"""
22
"""
3-
import cPickle
4-
import gzip
53
import os
64
import sys
75
import time
@@ -372,7 +370,6 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
372370
# on the validation set; in this case we
373371
# check every epoch
374372

375-
best_params = None
376373
best_validation_loss = numpy.inf
377374
test_score = 0.
378375
start_time = time.clock()
@@ -430,9 +427,10 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
430427
end_time = time.clock()
431428
print(
432429
(
433-
'Optimization complete with best validation score of %f %%,'
430+
'Optimization complete with best validation score of %f %%, '
431+
'obtained at iteration %i, '
434432
'with test performance %f %%'
435-
) % (best_validation_loss * 100., test_score * 100.)
433+
) % (best_validation_loss * 100., best_iter + 1, test_score * 100.)
436434
)
437435
print >> sys.stderr, ('The fine tuning code for file ' +
438436
os.path.split(__file__)[1] +

code/SdA.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,6 @@
2929
Systems 19, 2007
3030
3131
"""
32-
import cPickle
33-
import gzip
3432
import os
3533
import sys
3634
import time
@@ -202,8 +200,6 @@ def pretraining_functions(self, train_set_x, batch_size):
202200
index = T.lscalar('index') # index to a minibatch
203201
corruption_level = T.scalar('corruption') # % of corruption to use
204202
learning_rate = T.scalar('lr') # learning rate to use
205-
# number of batches
206-
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
207203
# begining of a batch, given `index`
208204
batch_begin = index * batch_size
209205
# ending of a batch given `index`
@@ -429,7 +425,6 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
429425
# on the validation set; in this case we
430426
# check every epoch
431427

432-
best_params = None
433428
best_validation_loss = numpy.inf
434429
test_score = 0.
435430
start_time = time.clock()
@@ -479,10 +474,11 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
479474
end_time = time.clock()
480475
print(
481476
(
482-
'Optimization complete with best validation score of %f %%,'
477+
'Optimization complete with best validation score of %f %%, '
478+
'on iteration %i, '
483479
'with test performance %f %%'
484480
)
485-
% (best_validation_loss * 100., test_score * 100.)
481+
% (best_validation_loss * 100., best_iter + 1, test_score * 100.)
486482
)
487483
print >> sys.stderr, ('The training code for file ' +
488484
os.path.split(__file__)[1] +

code/cA.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@
1212
squared Frobenius norm of the Jacobian of the hidden mapping h with
1313
respect to the visible units yields the contractive auto-encoder:
1414
15-
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)] + \| \frac{\partial h(x)}{\partial x} \|^2
15+
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
16+
+ \| \frac{\partial h(x)}{\partial x} \|^2
1617
1718
References :
1819
- S. Rifai, P. Vincent, X. Muller, X. Glorot, Y. Bengio: Contractive
@@ -27,8 +28,6 @@
2728
Systems 19, 2007
2829
2930
"""
30-
import cPickle
31-
import gzip
3231
import os
3332
import sys
3433
import time
@@ -79,11 +78,11 @@ class cA(object):
7978

8079
def __init__(self, numpy_rng, input=None, n_visible=784, n_hidden=100,
8180
n_batchsize=1, W=None, bhid=None, bvis=None):
82-
"""Initialize the cA class by specifying the number of visible units (the
83-
dimension d of the input ), the number of hidden units ( the dimension
84-
d' of the latent or hidden space ) and the contraction level. The
85-
constructor also receives symbolic variables for the input, weights and
86-
bias.
81+
"""Initialize the cA class by specifying the number of visible units
82+
(the dimension d of the input), the number of hidden units (the
83+
dimension d' of the latent or hidden space) and the contraction level.
84+
The constructor also receives symbolic variables for the input, weights
85+
and bias.
8786
8887
:type numpy_rng: numpy.random.RandomState
8988
:param numpy_rng: number random generator used to generate weights
@@ -161,7 +160,7 @@ def __init__(self, numpy_rng, input=None, n_visible=784, n_hidden=100,
161160
self.W_prime = self.W.T
162161

163162
# if no input is given, generate a variable representing the input
164-
if input == None:
163+
if input is None:
165164
# we use a matrix because we expect a minibatch of several
166165
# examples, each example being a row
167166
self.x = T.dmatrix(name='input')

code/convolutional_mlp.py

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,6 @@
2121
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
2222
2323
"""
24-
import cPickle
25-
import gzip
2624
import os
2725
import sys
2826
import time
@@ -53,14 +51,14 @@ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
5351
5452
:type filter_shape: tuple or list of length 4
5553
:param filter_shape: (number of filters, num input feature maps,
56-
filter height,filter width)
54+
filter height, filter width)
5755
5856
:type image_shape: tuple or list of length 4
5957
:param image_shape: (batch size, num input feature maps,
6058
image height, image width)
6159
6260
:type poolsize: tuple or list of length 2
63-
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
61+
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
6462
"""
6563

6664
assert image_shape[1] == filter_shape[1]
@@ -104,7 +102,7 @@ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
104102
)
105103

106104
# add the bias term. Since the bias is a vector (1D array), we first
107-
# reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
105+
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
108106
# thus be broadcasted across mini-batches and feature map
109107
# width & height
110108
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
@@ -155,21 +153,21 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
155153
x = T.matrix('x') # the data is presented as rasterized images
156154
y = T.ivector('y') # the labels are presented as 1D vector of
157155
# [int] labels
158-
ishape = (28, 28) # this is the size of MNIST images
159156

160157
######################
161158
# BUILD ACTUAL MODEL #
162159
######################
163160
print '... building the model'
164161

165-
# Reshape matrix of rasterized images of shape (batch_size,28*28)
162+
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
166163
# to a 4D tensor, compatible with our LeNetConvPoolLayer
164+
# (28, 28) is the size of MNIST images.
167165
layer0_input = x.reshape((batch_size, 1, 28, 28))
168166

169167
# Construct the first convolutional pooling layer:
170-
# filtering reduces the image size to (28-5+1,28-5+1)=(24,24)
171-
# maxpooling reduces this further to (24/2,24/2) = (12,12)
172-
# 4D output tensor is thus of shape (batch_size,nkerns[0],12,12)
168+
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
169+
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
170+
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
173171
layer0 = LeNetConvPoolLayer(
174172
rng,
175173
input=layer0_input,
@@ -179,9 +177,9 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
179177
)
180178

181179
# Construct the second convolutional pooling layer
182-
# filtering reduces the image size to (12-5+1,12-5+1)=(8,8)
183-
# maxpooling reduces this further to (8/2,8/2) = (4,4)
184-
# 4D output tensor is thus of shape (nkerns[0],nkerns[1],4,4)
180+
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
181+
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
182+
# 4D output tensor is thus of shape (nkerns[0], nkerns[1], 4, 4)
185183
layer1 = LeNetConvPoolLayer(
186184
rng,
187185
input=layer0.output,
@@ -240,7 +238,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
240238
# SGD Since this model has many parameters, it would be tedious to
241239
# manually create an update rule for each model parameter. We thus
242240
# create the updates list by automatically looping over all
243-
# (params[i],grads[i]) pairs.
241+
# (params[i], grads[i]) pairs.
244242
updates = [
245243
(param_i, param_i - learning_rate * grad_i)
246244
for param_i, grad_i in zip(params, grads)
@@ -273,7 +271,6 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
273271
# on the validation set; in this case we
274272
# check every epoch
275273

276-
best_params = None
277274
best_validation_loss = numpy.inf
278275
best_iter = 0
279276
test_score = 0.
@@ -331,7 +328,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
331328

332329
end_time = time.clock()
333330
print('Optimization complete.')
334-
print('Best validation score of %f %% obtained at iteration %i,'
331+
print('Best validation score of %f %% obtained at iteration %i, '
335332
'with test performance %f %%' %
336333
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
337334
print >> sys.stderr, ('The code for file ' +

code/dA.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,6 @@
3030
3131
"""
3232

33-
import cPickle
34-
import gzip
3533
import os
3634
import sys
3735
import time
@@ -185,7 +183,7 @@ def __init__(
185183
self.W_prime = self.W.T
186184
self.theano_rng = theano_rng
187185
# if no input is given, generate a variable representing the input
188-
if input == None:
186+
if input is None:
189187
# we use a matrix because we expect a minibatch of several
190188
# examples, each example being a row
191189
self.x = T.dmatrix(name='input')

code/hmc/hmc.py

Lines changed: 46 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88
from theano import tensor as TT
99
import theano
1010

11-
sharedX = lambda X, name: \
12-
shared(numpy.asarray(X, dtype=theano.config.floatX), name=name)
11+
sharedX = (lambda X, name:
12+
shared(numpy.asarray(X, dtype=theano.config.floatX), name=name))
1313

1414

1515
def kinetic_energy(vel):
@@ -145,13 +145,14 @@ def leapfrog(pos, vel, step):
145145

146146
# perform leapfrog updates: the scan op is used to repeatedly compute
147147
# vel(t + (m-1/2)*stepsize) and pos(t + m*stepsize) for m in [2,n_steps].
148-
(all_pos, all_vel), scan_updates = theano.scan(leapfrog,
149-
outputs_info=[
150-
dict(initial=pos_full_step),
151-
dict(initial=vel_half_step),
152-
],
153-
non_sequences=[stepsize],
154-
n_steps=n_steps - 1)
148+
(all_pos, all_vel), scan_updates = theano.scan(
149+
leapfrog,
150+
outputs_info=[
151+
dict(initial=pos_full_step),
152+
dict(initial=vel_half_step),
153+
],
154+
non_sequences=[stepsize],
155+
n_steps=n_steps - 1)
155156
final_pos = all_pos[-1]
156157
final_vel = all_vel[-1]
157158
# NOTE: Scan always returns an updates dictionary, in case the
@@ -171,6 +172,7 @@ def leapfrog(pos, vel, step):
171172
# return new proposal state
172173
return final_pos, final_vel
173174

175+
174176
# start-snippet-1
175177
def hmc_move(s_rng, positions, energy_fn, stepsize, n_steps):
176178
"""
@@ -224,10 +226,11 @@ def hmc_move(s_rng, positions, energy_fn, stepsize, n_steps):
224226
# end-snippet-4
225227
return accept, final_pos
226228

229+
227230
# start-snippet-5
228231
def hmc_updates(positions, stepsize, avg_acceptance_rate, final_pos, accept,
229-
target_acceptance_rate, stepsize_inc, stepsize_dec,
230-
stepsize_min, stepsize_max, avg_acceptance_slowness):
232+
target_acceptance_rate, stepsize_inc, stepsize_dec,
233+
stepsize_min, stepsize_max, avg_acceptance_slowness):
231234
"""This function is executed after `n_steps` of HMC sampling
232235
(`hmc_move` function). It creates the updates dictionary used by
233236
the `simulate` function. It takes care of updating: the position
@@ -293,14 +296,15 @@ def hmc_updates(positions, stepsize, avg_acceptance_rate, final_pos, accept,
293296
# perform exponential moving average
294297
mean_dtype = theano.scalar.upcast(accept.dtype, avg_acceptance_rate.dtype)
295298
new_acceptance_rate = TT.add(
296-
avg_acceptance_slowness * avg_acceptance_rate,
297-
(1.0 - avg_acceptance_slowness) * accept.mean(dtype=mean_dtype))
299+
avg_acceptance_slowness * avg_acceptance_rate,
300+
(1.0 - avg_acceptance_slowness) * accept.mean(dtype=mean_dtype))
298301
# end-snippet-6 start-snippet-8
299302
return [(positions, new_positions),
300303
(stepsize, new_stepsize),
301304
(avg_acceptance_rate, new_acceptance_rate)]
302305
# end-snippet-8
303306

307+
304308
class HMC_sampler(object):
305309
"""
306310
Convenience wrapper for performing Hybrid Monte Carlo (HMC). It creates the
@@ -322,11 +326,11 @@ def __init__(self, **kwargs):
322326

323327
@classmethod
324328
def new_from_shared_positions(
325-
cls,
326-
shared_positions,
329+
cls,
330+
shared_positions,
327331
energy_fn,
328-
initial_stepsize=0.01,
329-
target_acceptance_rate=.9,
332+
initial_stepsize=0.01,
333+
target_acceptance_rate=.9,
330334
n_steps=20,
331335
stepsize_dec=0.98,
332336
stepsize_min=0.001,
@@ -350,8 +354,6 @@ def new_from_shared_positions(
350354
sampling to work.
351355
352356
"""
353-
batchsize = shared_positions.shape[0]
354-
355357
# allocate shared variables
356358
stepsize = sharedX(initial_stepsize, 'hmc_stepsize')
357359
avg_acceptance_rate = sharedX(target_acceptance_rate,
@@ -360,40 +362,40 @@ def new_from_shared_positions(
360362

361363
# define graph for an `n_steps` HMC simulation
362364
accept, final_pos = hmc_move(
363-
s_rng,
364-
shared_positions,
365-
energy_fn,
366-
stepsize,
367-
n_steps)
365+
s_rng,
366+
shared_positions,
367+
energy_fn,
368+
stepsize,
369+
n_steps)
368370

369371
# define the dictionary of updates, to apply on every `simulate` call
370372
simulate_updates = hmc_updates(
371-
shared_positions,
372-
stepsize,
373-
avg_acceptance_rate,
374-
final_pos=final_pos,
375-
accept=accept,
376-
stepsize_min=stepsize_min,
377-
stepsize_max=stepsize_max,
378-
stepsize_inc=stepsize_inc,
379-
stepsize_dec=stepsize_dec,
380-
target_acceptance_rate=target_acceptance_rate,
381-
avg_acceptance_slowness=avg_acceptance_slowness)
373+
shared_positions,
374+
stepsize,
375+
avg_acceptance_rate,
376+
final_pos=final_pos,
377+
accept=accept,
378+
stepsize_min=stepsize_min,
379+
stepsize_max=stepsize_max,
380+
stepsize_inc=stepsize_inc,
381+
stepsize_dec=stepsize_dec,
382+
target_acceptance_rate=target_acceptance_rate,
383+
avg_acceptance_slowness=avg_acceptance_slowness)
382384

383385
# compile theano function
384386
simulate = function([], [], updates=simulate_updates)
385387

386388
# create HMC_sampler object with the following attributes ...
387389
return cls(
388-
positions=shared_positions,
389-
stepsize=stepsize,
390-
stepsize_min=stepsize_min,
391-
stepsize_max=stepsize_max,
392-
avg_acceptance_rate=avg_acceptance_rate,
393-
target_acceptance_rate=target_acceptance_rate,
394-
s_rng=s_rng,
395-
_updates=simulate_updates,
396-
simulate=simulate)
390+
positions=shared_positions,
391+
stepsize=stepsize,
392+
stepsize_min=stepsize_min,
393+
stepsize_max=stepsize_max,
394+
avg_acceptance_rate=avg_acceptance_rate,
395+
target_acceptance_rate=target_acceptance_rate,
396+
s_rng=s_rng,
397+
_updates=simulate_updates,
398+
simulate=simulate)
397399

398400
def draw(self, **kwargs):
399401
"""

0 commit comments

Comments
 (0)