@@ -242,7 +242,6 @@ def _grab_root(seqlen,one_sample,prev_sample):
242242 sequences = [seqlens , roots .dimshuffle (1 ,0 ,2 )],
243243 outputs_info = [tensor .alloc (0. , options ['dim_proj' ])],
244244 name = 'grab_root_%s' % prefix )
245- #roots = roots.dimshuffle('x', 0, 1)
246245 else :
247246 roots = roots [seqlens ] # there should be only one, so it's fine.
248247
@@ -309,8 +308,6 @@ def build_model(tparams, options):
309308 emb = tparams ['Wemb' ][x .flatten ()].reshape ([n_timesteps , n_samples , options ['dim_proj' ]])
310309 proj = get_layer (options ['encoder' ])[1 ](tparams , emb , options , prefix = options ['encoder' ], mask = mask )
311310 if options ['encoder' ] == 'lstm' :
312- #proj = proj[-1]
313- #proj = proj.mean(axis=0)
314311 proj = (proj * mask [:,:,None ]).sum (axis = 0 )
315312 proj = proj / mask .sum (axis = 0 )[:,None ]
316313 if options ['use_dropout' ]:
@@ -344,17 +341,11 @@ def pred_probs(f_pred_prob, prepare_data, data, iterator, verbose=False):
344341
345342def pred_error (f_pred , prepare_data , data , iterator , verbose = False ):
346343 valid_err = 0
347- #valid_class_counts = numpy.zeros(3)
348- #valid_class_corrects = numpy.zeros(3)
349344 for _ , valid_index in iterator :
350345 x , mask , y = prepare_data ([data [0 ][t ] for t in valid_index ], numpy .array (data [1 ])[valid_index ], maxlen = None )
351346 preds = f_pred (x ,mask )
352347 targets = numpy .array (data [1 ])[valid_index ]
353- #for cc in xrange(3):
354- # valid_class_counts[cc] += numpy.sum(targets == cc)
355- # valid_class_corrects[cc] += (preds[numpy.where(targets == cc)] == targets[numpy.where(targets == cc)]).sum()
356348 valid_err += (preds == targets ).sum ()
357- #valid_err = 1. - (valid_class_corrects.astype('float32') / valid_class_counts.astype('float32')).mean()
358349 valid_err = 1. - numpy .float32 (valid_err ) / len (data [0 ])
359350
360351 return valid_err
@@ -464,8 +455,6 @@ def train(dim_proj=100,
464455 if numpy .mod (uidx , saveFreq ) == 0 :
465456 print 'Saving...' ,
466457
467- #import ipdb; ipdb.set_trace()
468-
469458 if best_p != None :
470459 params = best_p
471460 else :
@@ -476,13 +465,7 @@ def train(dim_proj=100,
476465
477466 if numpy .mod (uidx , validFreq ) == 0 :
478467 use_noise .set_value (0. )
479- train_err = 0
480- #for _, tindex in kf:
481- # x, mask = prepare_data(train[0][train_index])
482- # train_err += (f_pred(x, mask) == train[1][tindex]).sum()
483- #train_err = 1. - numpy.float32(train_err) / train[0].shape[0]
484-
485- #train_err = pred_error(f_pred, prepare_data, train, kf)
468+ train_err = pred_error (f_pred , prepare_data , train , kf )
486469 valid_err = pred_error (f_pred , prepare_data , valid , kf_valid )
487470 test_err = pred_error (f_pred , prepare_data , test , kf_test )
488471
@@ -500,8 +483,6 @@ def train(dim_proj=100,
500483
501484 print 'Train ' , train_err , 'Valid ' , valid_err , 'Test ' , test_err
502485
503- #print 'Epoch ', eidx, 'Update ', uidx, 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
504-
505486 print 'Seen %d samples' % n_samples
506487
507488 if estop :
@@ -513,8 +494,7 @@ def train(dim_proj=100,
513494 zipp (best_p , tparams )
514495
515496 use_noise .set_value (0. )
516- train_err = 0
517- #train_err = pred_error(f_pred, prepare_data, train, kf)
497+ train_err = pred_error (f_pred , prepare_data , train , kf )
518498 valid_err = pred_error (f_pred , prepare_data , valid , kf_valid )
519499 test_err = pred_error (f_pred , prepare_data , test , kf_test )
520500
@@ -557,7 +537,6 @@ def main(job_id, params):
557537 'dim-proj' : [128 ],
558538 'n-words' : [10000 ],
559539 'optimizer' : ['adadelta' ],
560- #'activ': ['lambda x: tensor.maximum(0.,x)'],
561540 'activ' : ['lambda x: tensor.tanh(x)' ],
562541 'decay-c' : [0. ],
563542 'use-dropout' : [1 ],
0 commit comments