Skip to content

Commit 171baab

Browse files
committed
try use eval instead of function
1 parent 3f705f4 commit 171baab

File tree

3 files changed

+12
-21
lines changed

3 files changed

+12
-21
lines changed

code/DBN.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import theano.tensor as T
1313
from theano.tensor.shared_randomstreams import RandomStreams
1414

15-
from logistic_sgd import LogisticRegression, load_data
15+
from logistic_sgd_kaggle import LogisticRegression, load_data
1616
from mlp import HiddenLayer
1717
from rbm import RBM
1818

code/convolutional_mlp_kaggle.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
from theano.tensor.signal import downsample
3535
from theano.tensor.nnet import conv
3636

37-
from logistic_sgd import LogisticRegression, load_data
37+
from logistic_sgd_kaggle import LogisticRegression, load_data
3838
from mlp import HiddenLayer
3939

4040

@@ -196,10 +196,6 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
196196
x: test_set_x[index * batch_size: (index + 1) * batch_size],
197197
y: test_set_y[index * batch_size: (index + 1) * batch_size]})
198198

199-
predict_model = theano.function([index], layer3.predict(),
200-
givens={
201-
x: predict_set_x[index * batch_size: (index + 1) * batch_size]})
202-
203199
validate_model = theano.function([index], layer3.errors(y),
204200
givens={
205201
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
@@ -286,8 +282,8 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
286282
test_losses = [test_model(i) for i in xrange(n_test_batches)]
287283
test_score = numpy.mean(test_losses)
288284

289-
predict_res_array = [predict_model(i) for i in xrange(n_predict_batches)]
290-
print predict_res_array;
285+
predict_res_array = [layer3.y_pred.eval({input:predict_set_x[i * batch_size: (i + 1) * batch_size]})
286+
291287
f = open("predict_res","w+");
292288
for y_pred_item_array in predict_res_array:
293289
for y_pred_item in y_pred_item_array:

code/logistic_sgd_kaggle.py

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -121,11 +121,9 @@ def negative_log_likelihood(self, y):
121121
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
122122
# the mean (across minibatch examples) of the elements in v,
123123
# i.e., the mean log-likelihood across the minibatch.
124-
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
125124

126-
def predict(self):
127-
return T.mul(self.y_pred,1)
128-
125+
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]) + 0.0001*T.sum(self.W **2)
126+
129127
def errors(self, y):
130128
"""Return a float representing the number of errors in the minibatch
131129
over the total number of examples of the minibatch ; zero one
@@ -194,7 +192,7 @@ def load_data(dataset):
194192
test_set_size = 1000;
195193
predict_set_size = 28000;
196194

197-
debug = "false";
195+
debug = "true";
198196
if debug == "true":
199197
train_set_size = 3600;
200198
valid_set_size = 500;
@@ -285,7 +283,7 @@ def shared_dataset(data_xy, borrow=True):
285283

286284
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
287285
dataset='../data/mnist.pkl.gz',
288-
batch_size=600):
286+
batch_size=500):
289287
"""
290288
Demonstrate stochastic gradient descent optimization of a log-linear
291289
model
@@ -344,11 +342,6 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
344342
x: test_set_x[index * batch_size: (index + 1) * batch_size],
345343
y: test_set_y[index * batch_size: (index + 1) * batch_size]})
346344

347-
release_output = theano.function(inputs=[index],
348-
outputs=classifier.release_output(),
349-
givens={
350-
x: predict_set_x[index:]})
351-
352345
validate_model = theano.function(inputs=[index],
353346
outputs=classifier.errors(y),
354347
givens={
@@ -428,9 +421,11 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
428421
test_losses = [test_model(i)
429422
for i in xrange(n_test_batches)]
430423
test_score = numpy.mean(test_losses)
424+
425+
y_pred_show = classifier.y_pred.eval({input:predict_set_x})
431426

432-
y_pred_show = [release_output(i)
433-
for i in xrange(n_predict_batches)]
427+
y_pred_show = [classifier.y_pred.eval({input:predict_set_x[index * batch_size: (index + 1) * batch_size]})
428+
for index in xrange(n_predict_batches)]
434429

435430
print y_pred_show;
436431
f = open("predict_res","w+");

0 commit comments

Comments
 (0)