Skip to content

Commit 9286d19

Browse files
committed
Revert " try use eval instead of function"
This reverts commit 171baab.
1 parent 171baab commit 9286d19

File tree

3 files changed

+21
-12
lines changed

3 files changed

+21
-12
lines changed

code/DBN.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import theano.tensor as T
1313
from theano.tensor.shared_randomstreams import RandomStreams
1414

15-
from logistic_sgd_kaggle import LogisticRegression, load_data
15+
from logistic_sgd import LogisticRegression, load_data
1616
from mlp import HiddenLayer
1717
from rbm import RBM
1818

code/convolutional_mlp_kaggle.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
from theano.tensor.signal import downsample
3535
from theano.tensor.nnet import conv
3636

37-
from logistic_sgd_kaggle import LogisticRegression, load_data
37+
from logistic_sgd import LogisticRegression, load_data
3838
from mlp import HiddenLayer
3939

4040

@@ -196,6 +196,10 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
196196
x: test_set_x[index * batch_size: (index + 1) * batch_size],
197197
y: test_set_y[index * batch_size: (index + 1) * batch_size]})
198198

199+
predict_model = theano.function([index], layer3.predict(),
200+
givens={
201+
x: predict_set_x[index * batch_size: (index + 1) * batch_size]})
202+
199203
validate_model = theano.function([index], layer3.errors(y),
200204
givens={
201205
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
@@ -282,8 +286,8 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
282286
test_losses = [test_model(i) for i in xrange(n_test_batches)]
283287
test_score = numpy.mean(test_losses)
284288

285-
predict_res_array = [layer3.y_pred.eval({input:predict_set_x[i * batch_size: (i + 1) * batch_size]})
286-
289+
predict_res_array = [predict_model(i) for i in xrange(n_predict_batches)]
290+
print predict_res_array;
287291
f = open("predict_res","w+");
288292
for y_pred_item_array in predict_res_array:
289293
for y_pred_item in y_pred_item_array:

code/logistic_sgd_kaggle.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -121,9 +121,11 @@ def negative_log_likelihood(self, y):
121121
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
122122
# the mean (across minibatch examples) of the elements in v,
123123
# i.e., the mean log-likelihood across the minibatch.
124+
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
124125

125-
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]) + 0.0001*T.sum(self.W **2)
126-
126+
def predict(self):
127+
return T.mul(self.y_pred,1)
128+
127129
def errors(self, y):
128130
"""Return a float representing the number of errors in the minibatch
129131
over the total number of examples of the minibatch ; zero one
@@ -192,7 +194,7 @@ def load_data(dataset):
192194
test_set_size = 1000;
193195
predict_set_size = 28000;
194196

195-
debug = "true";
197+
debug = "false";
196198
if debug == "true":
197199
train_set_size = 3600;
198200
valid_set_size = 500;
@@ -283,7 +285,7 @@ def shared_dataset(data_xy, borrow=True):
283285

284286
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
285287
dataset='../data/mnist.pkl.gz',
286-
batch_size=500):
288+
batch_size=600):
287289
"""
288290
Demonstrate stochastic gradient descent optimization of a log-linear
289291
model
@@ -342,6 +344,11 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
342344
x: test_set_x[index * batch_size: (index + 1) * batch_size],
343345
y: test_set_y[index * batch_size: (index + 1) * batch_size]})
344346

347+
release_output = theano.function(inputs=[index],
348+
outputs=classifier.release_output(),
349+
givens={
350+
x: predict_set_x[index:]})
351+
345352
validate_model = theano.function(inputs=[index],
346353
outputs=classifier.errors(y),
347354
givens={
@@ -421,11 +428,9 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
421428
test_losses = [test_model(i)
422429
for i in xrange(n_test_batches)]
423430
test_score = numpy.mean(test_losses)
424-
425-
y_pred_show = classifier.y_pred.eval({input:predict_set_x})
426431

427-
y_pred_show = [classifier.y_pred.eval({input:predict_set_x[index * batch_size: (index + 1) * batch_size]})
428-
for index in xrange(n_predict_batches)]
432+
y_pred_show = [release_output(i)
433+
for i in xrange(n_predict_batches)]
429434

430435
print y_pred_show;
431436
f = open("predict_res","w+");

0 commit comments

Comments
 (0)