@@ -195,10 +195,10 @@ respectively.
195195 rval2: dictionary
196196 Dictionary of updates for the Scan Op
197197 """
198- # from pos(t) and vel(t- eps/2), compute vel(t+ eps/ 2)
198+ # from pos(t) and vel(t - eps/2), compute vel(t + eps / 2)
199199 dE_dpos = TT.grad(energy_fn(pos).sum(), pos)
200200 new_vel = vel - step * dE_dpos
201- # from vel(t+ eps/ 2) compute pos(t+ eps)
201+ # from vel(t + eps / 2) compute pos(t + eps)
202202 new_pos = pos + step * new_vel
203203
204204 return [new_pos, new_vel],{}
@@ -238,10 +238,10 @@ and full-step of :math:`s`, and then scan over the `leapfrog` method
238238 def leapfrog(pos, vel, step):
239239 """ ... """
240240
241- # compute velocity at time-step: t + stepsize/ 2
241+ # compute velocity at time-step: t + stepsize / 2
242242 initial_energy = energy_fn(initial_pos)
243243 dE_dpos = TT.grad(initial_energy.sum(), initial_pos)
244- vel_half_step = initial_vel - 0.5* stepsize* dE_dpos
244+ vel_half_step = initial_vel - 0.5 * stepsize * dE_dpos
245245
246246 # compute position at time-step: t + stepsize
247247 pos_full_step = initial_pos + stepsize * vel_half_step
@@ -346,8 +346,8 @@ We then accept/reject the proposed state based on the Metropolis algorithm.
346346
347347 # accept/reject the proposed move based on the joint distribution
348348 accept = metropolis_hastings_accept(
349- energy_prev = hamiltonian(positions, initial_vel, energy_fn),
350- energy_next = hamiltonian(final_pos, final_vel, energy_fn),
349+ energy_prev= hamiltonian(positions, initial_vel, energy_fn),
350+ energy_next= hamiltonian(final_pos, final_vel, energy_fn),
351351 s_rng=s_rng)
352352
353353where `metropolis\_hastings\_accept` and `hamiltonian` are helper functions,
@@ -387,7 +387,7 @@ defined as follows.
387387
388388 def kinetic_energy(vel):
389389 """ ... """
390- return 0.5 * (vel** 2).sum(axis=1)
390+ return 0.5 * (vel ** 2).sum(axis=1)
391391
392392`hmc\_move` finally returns the tuple `(accept, final\_pos)`. `accept` is a
393393symbolic boolean variable indicating whether or not the new state `final_pos`
@@ -415,7 +415,7 @@ state.
415415
416416 ## POSITION UPDATES ##
417417 # broadcast `accept` scalar to tensor with the same dimensions as final_pos.
418- accept_matrix = accept.dimshuffle(0, *(('x',)* (final_pos.ndim- 1)))
418+ accept_matrix = accept.dimshuffle(0, *(('x',) * (final_pos.ndim - 1)))
419419 # if accept is True, update to `final_pos` else stay put
420420 new_positions = TT.switch(accept_matrix, final_pos, positions)
421421
@@ -506,11 +506,11 @@ elements are:
506506 @classmethod
507507 def new_from_shared_positions(cls, shared_positions, energy_fn,
508508 initial_stepsize=0.01, target_acceptance_rate=.9, n_steps=20,
509- stepsize_dec = 0.98,
510- stepsize_min = 0.001,
511- stepsize_max = 0.25,
512- stepsize_inc = 1.02,
513- avg_acceptance_slowness = 0.9, # used in geometric avg. 1.0 would be not moving at all
509+ stepsize_dec= 0.98,
510+ stepsize_min= 0.001,
511+ stepsize_max= 0.25,
512+ stepsize_inc= 1.02,
513+ avg_acceptance_slowness= 0.9, # used in geometric avg. 1.0 would be not moving at all
514514 seed=12345):
515515 """
516516 :param shared_positions: theano ndarray shared var with many particle [initial] positions
@@ -616,7 +616,7 @@ compare the empirical mean and covariance matrix to their true values.
616616
617617 # Define energy function for a multi-variate Gaussian
618618 def gaussian_energy(x):
619- return 0.5 * (TT.dot((x- mu),cov_inv)*(x- mu)).sum(axis=1)
619+ return 0.5 * (TT.dot((x - mu), cov_inv) * (x - mu)).sum(axis=1)
620620
621621 # Declared shared random variable for positions
622622 position = shared(rng.randn(batchsize, dim).astype(theano.config.floatX))
@@ -626,11 +626,11 @@ compare the empirical mean and covariance matrix to their true values.
626626 initial_stepsize=1e-3, stepsize_max=0.5)
627627
628628 # Start with a burn-in process
629- garbage = [sampler.draw() for r in xrange(burnin)] #burn-in
629+ garbage = [sampler.draw() for r in xrange(burnin)] #burn-in
630630 # Draw `n_samples`: result is a 3D tensor of dim [n_samples, batchsize, dim]
631631 _samples = np.asarray([sampler.draw() for r in xrange(n_samples)])
632632 # Flatten to [n_samples * batchsize, dim]
633- samples = _samples.T.reshape(dim,-1).T
633+ samples = _samples.T.reshape(dim, -1).T
634634
635635 print '****** TARGET VALUES ******'
636636 print 'target mean:', mu
0 commit comments