@@ -307,7 +307,7 @@ Theano variable to it that has a default value.
307307 # get the cost and the updates list
308308 # using CD-k here (persisent=None) for training each RBM.
309309 # TODO: change cost function to reconstruction error
310- cost,updates = rbm.cd(learning_rate, persistent=None, k)
310+ cost, updates = rbm.cd(learning_rate, persistent=None, k)
311311
312312 # compile the Theano function; check if k is also a Theano
313313 # variable, if so added to the inputs of the function
@@ -316,10 +316,10 @@ Theano variable to it that has a default value.
316316 else:
317317 inputs = index, theano.Param(learning_rate, default=0.1)]
318318 fn = theano.function(inputs=inputs,
319- outputs=cost,
320- updates=updates,
321- givens={self.x: train_set_x[batch_begin:
322- batch_end]})
319+ outputs=cost,
320+ updates=updates,
321+ givens={self.x: train_set_x[batch_begin:
322+ batch_end]})
323323 # append `fn` to the list of functions
324324 pretrain_fns.append(fn)
325325
@@ -369,9 +369,9 @@ and a ``test_model`` function).
369369 gparams = T.grad(self.finetune_cost, self.params)
370370
371371 # compute list of fine-tuning updates
372- updates = {}
372+ updates = []
373373 for param, gparam in zip(self.params, gparams):
374- updates[ param] = param - gparam* learning_rate
374+ updates.append(( param, param - gparam * learning_rate))
375375
376376 train_fn = theano.function(inputs=[index],
377377 outputs= self.finetune_cost,
0 commit comments