@@ -50,6 +50,13 @@ def __init__(self, input=None, n_visible=784, n_hidden=500, \
5050 self .n_hidden = n_hidden
5151
5252
53+ if numpy_rng is None :
54+ # create a number generator
55+ numpy_rng = numpy .random .RandomState (1234 )
56+
57+ if theano_rng is None :
58+ theano_rng = RandomStreams (numpy_rng .randint (2 ** 30 ))
59+
5360 if W is None :
5461 # W is initialized with `initial_W` which is uniformely sampled
5562 # from -4*sqrt(6./(n_visible+n_hidden)) and 4*sqrt(6./(n_hidden+n_visible))
@@ -73,13 +80,6 @@ def __init__(self, input=None, n_visible=784, n_hidden=500, \
7380 vbias = theano .shared (value = numpy .zeros (n_visible ,
7481 dtype = theano .config .floatX ),name = 'vbias' )
7582
76- if numpy_rng is None :
77- # create a number generator
78- numpy_rng = numpy .random .RandomState (1234 )
79-
80- if theano_rng is None :
81- theano_rng = RandomStreams (numpy_rng .randint (2 ** 30 ))
82-
8383
8484 # initialize input layer for standalone RBM or layer0 of DBN
8585 self .input = input
@@ -93,10 +93,6 @@ def __init__(self, input=None, n_visible=784, n_hidden=500, \
9393 # **** WARNING: It is not a good idea to put things in this list
9494 # other than shared variables created in this function.
9595 self .params = [self .W , self .hbias , self .vbias ]
96- # cast batch_size to floatX, because its type is int64,
97- # and otherwise the gradients are upcasted to float64,
98- # even when floatX == float32
99- self .batch_size = T .cast (self .input .shape [0 ], dtype = theano .config .floatX )
10096
10197
10298 def free_energy (self , v_sample ):
0 commit comments