@@ -20,25 +20,24 @@ def test_mlp():
2020
2121
2222def test_convolutional_mlp ():
23- convolutional_mlp .evaluate_lenet5 (n_epochs = 5 ,nkerns = [5 ,5 ])
23+ convolutional_mlp .evaluate_lenet5 (n_epochs = 5 , nkerns = [5 , 5 ])
2424
2525
2626def test_dA ():
27- dA .test_dA (training_epochs = 3 , output_folder = 'tmp_dA_plots' )
27+ dA .test_dA (training_epochs = 3 , output_folder = 'tmp_dA_plots' )
2828
2929
3030def test_SdA ():
31- SdA .test_SdA (pretraining_epochs = 2 , training_epochs = 3 , batch_size = 300 )
31+ SdA .test_SdA (pretraining_epochs = 2 , training_epochs = 3 , batch_size = 300 )
3232
3333
3434def test_dbn ():
35- DBN .test_DBN (pretraining_epochs = 1 , training_epochs = 2 , batch_size = 300 )
36-
35+ DBN .test_DBN (pretraining_epochs = 1 , training_epochs = 2 , batch_size = 300 )
3736
3837
3938def test_rbm ():
40- rbm .test_rbm (training_epochs = 1 , batch_size = 300 , n_chains = 1 , n_samples = 1 ,
41- output_folder = 'tmp_rbm_plots' )
39+ rbm .test_rbm (training_epochs = 1 , batch_size = 300 , n_chains = 1 , n_samples = 1 ,
40+ output_folder = 'tmp_rbm_plots' )
4241
4342
4443def speed ():
@@ -47,15 +46,16 @@ def speed():
4746 I want it to be compatible with python2.4 so using try: finaly: is not an option.
4847 """
4948
50- algo = ['logistic_sgd' ,'logistic_cg' ,'mlp' ,'convolutional_mlp' ,'dA' ,'SdA' ,'DBN' ,'rbm' ]
51- to_exec = [True ]* len (algo )
49+ algo = ['logistic_sgd' , 'logistic_cg' , 'mlp' , 'convolutional_mlp' ,
50+ 'dA' , 'SdA' , 'DBN' , 'rbm' ]
51+ to_exec = [True ] * len (algo )
5252# to_exec=[False]*len(algo)
5353# to_exec[-1]=True
54- do_float64 = True
55- do_float32 = True
56- do_gpu = True
54+ do_float64 = True
55+ do_float32 = True
56+ do_gpu = True
5757
58- algo_executed = [s for idx ,s in enumerate (algo ) if to_exec [idx ]]
58+ algo_executed = [s for idx , s in enumerate (algo ) if to_exec [idx ]]
5959 #Timming expected are from the buildbot that have
6060 # an i7-920 @ 2.67GHz with hyperthread enabled for the cpu
6161 # and an GeForce GTX 285 for the GPU.
@@ -80,112 +80,137 @@ def speed():
8080# sda dbn rbm
8181#expected/get [0.82492841, 0.75984178, 0.65092691, 1.04930573, 0.93125138
8282# 1.35324519 1.7356905 1.12937868]
83- expected_times_gpu = numpy .asarray ([3.07663488 , 7.55523491 , 18.99226785 , 9.1 , 24.13007045 ,
83+ expected_times_gpu = numpy .asarray ([3.07663488 , 7.55523491 , 18.99226785 ,
84+ 9.1 , 24.13007045 ,
8485 18.3 , 53.4 , 285.4 ])
85- expected_times_64 = [s for idx ,s in enumerate (expected_times_64 ) if to_exec [idx ]]
86- expected_times_32 = [s for idx ,s in enumerate (expected_times_32 ) if to_exec [idx ]]
87- expected_times_gpu = [s for idx ,s in enumerate (expected_times_gpu ) if to_exec [idx ]]
88-
89- def time_test (m ,l ,idx ,f ,** kwargs ):
86+ expected_times_64 = [s for idx , s in enumerate (expected_times_64 )
87+ if to_exec [idx ]]
88+ expected_times_32 = [s for idx , s in enumerate (expected_times_32 )
89+ if to_exec [idx ]]
90+ expected_times_gpu = [s for idx , s in enumerate (expected_times_gpu )
91+ if to_exec [idx ]]
92+
93+ def time_test (m , l , idx , f , ** kwargs ):
9094 if not to_exec [idx ]:
9195 return
9296 print algo [idx ]
93- ts = m .call_time
97+ ts = m .call_time
9498 try :
9599 f (** kwargs )
96100 except Exception , e :
97101 print >> sys .stderr , 'test' , algo [idx ], 'FAILED' , e
98102 l .append (numpy .nan )
99103 return
100- te = m .call_time
101- l .append (te - ts )
104+ te = m .call_time
105+ l .append (te - ts )
102106
103107 def do_tests ():
104- m = theano .compile .mode .get_default_mode ()
105- l = []
106- time_test (m ,l ,0 , logistic_sgd .sgd_optimization_mnist ,n_epochs = 30 )
107- time_test (m ,l ,1 , logistic_cg .cg_optimization_mnist ,n_epochs = 30 )
108- time_test (m ,l ,2 , mlp .test_mlp , n_epochs = 5 )
109- time_test (m ,l ,3 , convolutional_mlp .evaluate_lenet5 , n_epochs = 5 ,nkerns = [5 ,5 ])
110- time_test (m ,l ,4 , dA .test_dA , training_epochs = 2 , output_folder = 'tmp_dA_plots' )
111- time_test (m ,l ,5 , SdA .test_SdA , pretraining_epochs = 1 , training_epochs = 2 , batch_size = 300 )
112- time_test (m ,l ,6 , DBN .test_DBN , pretraining_epochs = 1 , training_epochs = 2 , batch_size = 300 )
113- time_test (m ,l ,7 , rbm .test_rbm , training_epochs = 1 , batch_size = 300 , n_chains = 1 , n_samples = 1 , output_folder = 'tmp_rbm_plots' )
108+ m = theano .compile .mode .get_default_mode ()
109+ l = []
110+ time_test (m , l , 0 , logistic_sgd .sgd_optimization_mnist , n_epochs = 30 )
111+ time_test (m , l , 1 , logistic_cg .cg_optimization_mnist , n_epochs = 30 )
112+ time_test (m , l , 2 , mlp .test_mlp , n_epochs = 5 )
113+ time_test (m , l , 3 , convolutional_mlp .evaluate_lenet5 , n_epochs = 5 ,
114+ nkerns = [5 , 5 ])
115+ time_test (m , l , 4 , dA .test_dA , training_epochs = 2 ,
116+ output_folde = 'tmp_dA_plots' )
117+ time_test (m , l , 5 , SdA .test_SdA , pretraining_epochs = 1 ,
118+ training_epochs = 2 , batch_size = 300 )
119+ time_test (m , l , 6 , DBN .test_DBN , pretraining_epochs = 1 ,
120+ training_epochs = 2 , batch_size = 300 )
121+ time_test (m , l , 7 , rbm .test_rbm , training_epochs = 1 , batch_size = 300 ,
122+ n_chains = 1 , n_samples = 1 , output_folder = 'tmp_rbm_plots' )
114123 return numpy .asarray (l )
115124
116-
117125 #test in float64 in FAST_RUN mode on the cpu
126+ import theano
118127 if do_float64 :
119- theano .config .floatX = 'float64'
120- theano .config .mode = 'FAST_RUN'
121- float64_times = do_tests ()
128+ theano .config .floatX = 'float64'
129+ theano .config .mode = 'FAST_RUN'
130+ float64_times = do_tests ()
122131 print >> sys .stderr , algo_executed
123- print >> sys .stderr , 'float64 times' ,float64_times
124- print >> sys .stderr , 'float64 expected' ,expected_times_64
125- print >> sys .stderr , 'float64 % expected/get' ,expected_times_64 / float64_times
132+ print >> sys .stderr , 'float64 times' , float64_times
133+ print >> sys .stderr , 'float64 expected' , expected_times_64
134+ print >> sys .stderr , 'float64 % expected/get' , (
135+ expected_times_64 / float64_times )
126136
127137 #test in float32 in FAST_RUN mode on the cpu
128- theano .config .floatX = 'float32'
138+ theano .config .floatX = 'float32'
129139 if do_float32 :
130- float32_times = do_tests ()
140+ float32_times = do_tests ()
131141 print >> sys .stderr , algo_executed
132- print >> sys .stderr , 'float32 times' ,float32_times
133- print >> sys .stderr , 'float32 expected' ,expected_times_32
134- print >> sys .stderr , 'float32 % expected/get' ,expected_times_32 / float32_times
142+ print >> sys .stderr , 'float32 times' , float32_times
143+ print >> sys .stderr , 'float32 expected' , expected_times_32
144+ print >> sys .stderr , 'float32 % expected/get' , (
145+ expected_times_32 / float32_times )
135146
136147 if do_float64 :
137- print >> sys .stderr , 'float64/float32' ,float64_times / float32_times
148+ print >> sys .stderr , 'float64/float32' , (
149+ float64_times / float32_times )
138150 print >> sys .stderr
139151 print >> sys .stderr , 'Duplicate the timing to have everything in one place'
140152 print >> sys .stderr , algo_executed
141- print >> sys .stderr , 'float64 times' ,float64_times
142- print >> sys .stderr , 'float64 expected' ,expected_times_64
143- print >> sys .stderr , 'float64 % expected/get' ,expected_times_64 / float64_times
144- print >> sys .stderr , 'float32 times' ,float32_times
145- print >> sys .stderr , 'float32 expected' ,expected_times_32
146- print >> sys .stderr , 'float32 % expected/get' ,expected_times_32 / float32_times
147-
148- print >> sys .stderr , 'float64/float32' ,float64_times / float32_times
149- print >> sys .stderr , 'expected float64/float32' ,expected_times_64 / float32_times
153+ print >> sys .stderr , 'float64 times' , float64_times
154+ print >> sys .stderr , 'float64 expected' , expected_times_64
155+ print >> sys .stderr , 'float64 % expected/get' , (
156+ expected_times_64 / float64_times )
157+ print >> sys .stderr , 'float32 times' , float32_times
158+ print >> sys .stderr , 'float32 expected' , expected_times_32
159+ print >> sys .stderr , 'float32 % expected/get' , (
160+ expected_times_32 / float32_times )
161+
162+ print >> sys .stderr , 'float64/float32' , (
163+ float64_times / float32_times )
164+ print >> sys .stderr , 'expected float64/float32' , (
165+ expected_times_64 / float32_times )
150166
151167 #test in float32 in FAST_RUN mode on the gpu
152168 import theano .sandbox .cuda
153169 if do_gpu :
154170 theano .sandbox .cuda .use ('gpu' )
155- gpu_times = do_tests ()
171+ gpu_times = do_tests ()
156172 print >> sys .stderr , algo_executed
157- print >> sys .stderr , 'gpu times' ,gpu_times
158- print >> sys .stderr , 'gpu expected' ,expected_times_gpu
159- print >> sys .stderr , 'gpu % expected/get' ,expected_times_gpu / gpu_times
173+ print >> sys .stderr , 'gpu times' , gpu_times
174+ print >> sys .stderr , 'gpu expected' , expected_times_gpu
175+ print >> sys .stderr , 'gpu % expected/get' , (
176+ expected_times_gpu / gpu_times )
177+
160178 if do_float64 :
161- print >> sys .stderr , 'float64/gpu' ,float64_times / gpu_times
179+ print >> sys .stderr , 'float64/gpu' , float64_times / gpu_times
162180
163181 if (do_float64 + do_float32 + do_gpu ) > 1 :
164182 print >> sys .stderr
165183 print >> sys .stderr , 'Duplicate the timing to have everything in one place'
166184 print >> sys .stderr , algo_executed
167185 if do_float64 :
168- print >> sys .stderr , 'float64 times' ,float64_times
169- print >> sys .stderr , 'float64 expected' ,expected_times_64
170- print >> sys .stderr , 'float64 % expected/get' ,expected_times_64 / float64_times
186+ print >> sys .stderr , 'float64 times' , float64_times
187+ print >> sys .stderr , 'float64 expected' , expected_times_64
188+ print >> sys .stderr , 'float64 % expected/get' , (
189+ expected_times_64 / float64_times )
171190 if do_float32 :
172- print >> sys .stderr , 'float32 times' ,float32_times
173- print >> sys .stderr , 'float32 expected' ,expected_times_32
174- print >> sys .stderr , 'float32 % expected/get' ,expected_times_32 / float32_times
191+ print >> sys .stderr , 'float32 times' , float32_times
192+ print >> sys .stderr , 'float32 expected' , expected_times_32
193+ print >> sys .stderr , 'float32 % expected/get' , (
194+ expected_times_32 / float32_times )
175195 if do_gpu :
176- print >> sys .stderr , 'gpu times' ,gpu_times
177- print >> sys .stderr , 'gpu expected' ,expected_times_gpu
178- print >> sys .stderr , 'gpu % expected/get' ,expected_times_gpu / gpu_times
196+ print >> sys .stderr , 'gpu times' , gpu_times
197+ print >> sys .stderr , 'gpu expected' , expected_times_gpu
198+ print >> sys .stderr , 'gpu % expected/get' , (
199+ expected_times_gpu / gpu_times )
179200
180201 if do_float64 and do_float32 :
181- print >> sys .stderr , 'float64/float32' ,float64_times / float32_times
182- print >> sys .stderr , 'expected float64/float32' ,expected_times_64 / float32_times
202+ print >> sys .stderr , 'float64/float32' , (
203+ float64_times / float32_times )
204+ print >> sys .stderr , 'expected float64/float32' , (
205+ expected_times_64 / float32_times )
183206 if do_float64 and do_gpu :
184- print >> sys .stderr , 'float64/gpu' ,float64_times / gpu_times
185- print >> sys .stderr , 'expected float64/gpu' ,expected_times_64 / gpu_times
207+ print >> sys .stderr , 'float64/gpu' , float64_times / gpu_times
208+ print >> sys .stderr , 'expected float64/gpu' , (
209+ expected_times_64 / gpu_times )
186210 if do_float32 and do_gpu :
187- print >> sys .stderr , 'float32/gpu' ,float32_times / gpu_times
188- print >> sys .stderr , 'expected float32/gpu' ,expected_times_32 / gpu_times
211+ print >> sys .stderr , 'float32/gpu' , float32_times / gpu_times
212+ print >> sys .stderr , 'expected float32/gpu' , (
213+ expected_times_32 / gpu_times )
189214
190215 def compare (x , y ):
191216 ratio = x / y
0 commit comments