Skip to content

Commit 0440226

Browse files
committed
Use list of pairs for updates, not dict, to remove warning
1 parent 400c637 commit 0440226

File tree

16 files changed

+55
-51
lines changed

16 files changed

+55
-51
lines changed

code/DBN.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -220,9 +220,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
220220
gparams = T.grad(self.finetune_cost, self.params)
221221

222222
# compute list of fine-tuning updates
223-
updates = {}
223+
updates = []
224224
for param, gparam in zip(self.params, gparams):
225-
updates[param] = param - gparam * learning_rate
225+
updates.append((param, param - gparam * learning_rate))
226226

227227
train_fn = theano.function(inputs=[index],
228228
outputs=self.finetune_cost,

code/SdA.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -252,9 +252,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
252252
gparams = T.grad(self.finetune_cost, self.params)
253253

254254
# compute list of fine-tuning updates
255-
updates = {}
255+
updates = []
256256
for param, gparam in zip(self.params, gparams):
257-
updates[param] = param - gparam * learning_rate
257+
updates.append((param, param - gparam * learning_rate))
258258

259259
train_fn = theano.function(inputs=[index],
260260
outputs=self.finetune_cost,

code/cA.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -213,9 +213,9 @@ def get_cost_updates(self, contraction_level, learning_rate):
213213
# to its parameters
214214
gparams = T.grad(cost, self.params)
215215
# generate the list of updates
216-
updates = {}
216+
updates = []
217217
for param, gparam in zip(self.params, gparams):
218-
updates[param] = param - learning_rate * gparam
218+
updates.append((param, param - learning_rate * gparam))
219219

220220
return (cost, updates)
221221

code/convolutional_mlp.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,11 +206,11 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
206206
# train_model is a function that updates the model parameters by
207207
# SGD Since this model has many parameters, it would be tedious to
208208
# manually create an update rule for each model parameter. We thus
209-
# create the updates dictionary by automatically looping over all
209+
# create the updates list by automatically looping over all
210210
# (params[i],grads[i]) pairs.
211-
updates = {}
211+
updates = []
212212
for param_i, grad_i in zip(params, grads):
213-
updates[param_i] = param_i - learning_rate * grad_i
213+
updates.append((param_i, param_i - learning_rate * grad_i))
214214

215215
train_model = theano.function([index], cost, updates=updates,
216216
givens={

code/dA.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -229,9 +229,9 @@ def get_cost_updates(self, corruption_level, learning_rate):
229229
# to its parameters
230230
gparams = T.grad(cost, self.params)
231231
# generate the list of updates
232-
updates = {}
232+
updates = []
233233
for param, gparam in zip(self.params, gparams):
234-
updates[param] = param - learning_rate * gparam
234+
updates.append((param, param - learning_rate * gparam))
235235

236236
return (cost, updates)
237237

code/logistic_sgd.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -279,9 +279,10 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
279279
g_W = T.grad(cost=cost, wrt=classifier.W)
280280
g_b = T.grad(cost=cost, wrt=classifier.b)
281281

282-
# specify how to update the parameters of the model as a dictionary
283-
updates = {classifier.W: classifier.W - learning_rate * g_W,
284-
classifier.b: classifier.b - learning_rate * g_b}
282+
# specify how to update the parameters of the model as a list of
283+
# (variable, update expression) pairs.
284+
updates = [(classifier.W, classifier.W - learning_rate * g_W),
285+
(classifier.b, classifier.b - learning_rate * g_b)]
285286

286287
# compiling a Theano function `train_model` that returns the cost, but in
287288
# the same time updates the parameter of the model based on the rules

code/mlp.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -258,14 +258,15 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
258258
gparam = T.grad(cost, param)
259259
gparams.append(gparam)
260260

261-
# specify how to update the parameters of the model as a dictionary
262-
updates = {}
261+
# specify how to update the parameters of the model as a list of
262+
# (variable, update expression) pairs
263+
updates = []
263264
# given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
264265
# same length, zip generates a list C of same size, where each element
265266
# is a pair formed from the two lists :
266267
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
267268
for param, gparam in zip(classifier.params, gparams):
268-
updates[param] = param - learning_rate * gparam
269+
updates.append((param, param - learning_rate * gparam))
269270

270271
# compiling a Theano function `train_model` that returns the cost, but
271272
# in the same time updates the parameter of the model based on the rules

doc/DBN.txt

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,7 @@ Theano variable to it that has a default value.
307307
# get the cost and the updates list
308308
# using CD-k here (persisent=None) for training each RBM.
309309
# TODO: change cost function to reconstruction error
310-
cost,updates = rbm.cd(learning_rate, persistent=None, k)
310+
cost, updates = rbm.cd(learning_rate, persistent=None, k)
311311

312312
# compile the Theano function; check if k is also a Theano
313313
# variable, if so added to the inputs of the function
@@ -316,10 +316,10 @@ Theano variable to it that has a default value.
316316
else:
317317
inputs = index, theano.Param(learning_rate, default=0.1)]
318318
fn = theano.function(inputs=inputs,
319-
outputs=cost,
320-
updates=updates,
321-
givens={self.x: train_set_x[batch_begin:
322-
batch_end]})
319+
outputs=cost,
320+
updates=updates,
321+
givens={self.x: train_set_x[batch_begin:
322+
batch_end]})
323323
# append `fn` to the list of functions
324324
pretrain_fns.append(fn)
325325

@@ -369,9 +369,9 @@ and a ``test_model`` function).
369369
gparams = T.grad(self.finetune_cost, self.params)
370370

371371
# compute list of fine-tuning updates
372-
updates = {}
372+
updates = []
373373
for param, gparam in zip(self.params, gparams):
374-
updates[param] = param - gparam*learning_rate
374+
updates.append((param, param - gparam * learning_rate))
375375

376376
train_fn = theano.function(inputs=[index],
377377
outputs= self.finetune_cost,

doc/LICENSE.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
LICENSE
44
=======
55

6-
Copyright (c) 2008--2009, Theano Development Team
6+
Copyright (c) 2008--2013, Theano Development Team
77
All rights reserved.
88

99
Redistribution and use in source and binary forms, with or without

doc/SdA.txt

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ during training we associate a Theano variable to them.
249249
pretrain_fns = []
250250
for dA in self.dA_layers:
251251
# get the cost and the updates list
252-
cost,updates = dA.get_cost_updates(corruption_level, learning_rate)
252+
cost, updates = dA.get_cost_updates(corruption_level, learning_rate)
253253
# compile the theano function
254254
fn = theano.function(inputs=[index,
255255
theano.Param(corruption_level, default=0.2),
@@ -271,7 +271,7 @@ in mind when working with Theano.
271271

272272
In the same fashion we build a method for constructing function required
273273
during finetuning ( a ``train_model``, a ``validate_model`` and a
274-
``test_model`` funcion).
274+
``test_model`` function).
275275

276276
.. code-block:: python
277277

@@ -309,9 +309,9 @@ during finetuning ( a ``train_model``, a ``validate_model`` and a
309309
gparams = T.grad(self.finetune_cost, self.params)
310310

311311
# compute list of fine-tuning updates
312-
updates = {}
312+
updates = []
313313
for param, gparam in zip(self.params, gparams):
314-
updates[param] = param - gparam * learning_rate
314+
updates.append((param, param - gparam * learning_rate))
315315

316316
train_fn = theano.function(inputs=[index],
317317
outputs=self.finetune_cost,

0 commit comments

Comments
 (0)