@@ -414,24 +414,24 @@ algorithm in Theano can be done as follows :
414414
415415.. code-block:: python
416416
417- # Minibatch Stochastic Gradient Descent
418-
419- # assume loss is a symbolic description of the loss function given
420- # the symbolic variables params (shared variable), x_batch, y_batch;
417+ # Minibatch Stochastic Gradient Descent
418+
419+ # assume loss is a symbolic description of the loss function given
420+ # the symbolic variables params (shared variable), x_batch, y_batch;
421421
422- # compute gradient of loss with respect to params
423- d_loss_wrt_params = T.grad(loss, params)
422+ # compute gradient of loss with respect to params
423+ d_loss_wrt_params = T.grad(loss, params)
424424
425- # compile the MSGD step into a theano function
426- updates = { params: params - learning_rate * d_loss_wrt_params}
427- MSGD = theano.function([x_batch,y_batch], loss, updates = updates)
428-
429- for (x_batch, y_batch) in train_batches:
430- # here x_batch and y_batch are elements of train_batches and
431- # therefore numpy arrays; function MSGD also updates the params
432- print('Current loss is ', MSGD(x_batch, y_batch))
433- if <stopping condition is met> :
434- return params
425+ # compile the MSGD step into a theano function
426+ updates = { params: params - learning_rate * d_loss_wrt_params}
427+ MSGD = theano.function([x_batch,y_batch], loss, updates = updates)
428+
429+ for (x_batch, y_batch) in train_batches:
430+ # here x_batch and y_batch are elements of train_batches and
431+ # therefore numpy arrays; function MSGD also updates the params
432+ print('Current loss is ', MSGD(x_batch, y_batch))
433+ if stopping_condition_is_met :
434+ return params
435435
436436
437437.. index:: Regularization
0 commit comments