|
48 | 48 | net = Network([30, 60, 30]) |
49 | 49 | net.SGD(encoded_training_data, 6, 10, 0.01, 0.05) |
50 | 50 |
|
51 | | - |
52 | | -print "\nComparing theories" |
| 51 | +print """\nBaseline for comparison: decompress with the first autoencoder""" |
| 52 | +print """and compress with the second autoencoder""" |
53 | 53 | encoded_test_1 = [sigmoid_vec(np.dot(ae_1.weights[0], x)+ae_1.biases[0]) |
54 | 54 | for x in test] |
55 | 55 | encoded_test_2 = [sigmoid_vec(np.dot(ae_2.weights[0], x)+ae_2.biases[0]) |
56 | 56 | for x in test] |
57 | 57 | test_data = zip(encoded_test_1, encoded_test_2) |
| 58 | +net_baseline = Network([30, 784, 30]) |
| 59 | +net_baseline.biases[0] = ae_1.biases[1] |
| 60 | +net_baseline.weights[0] = ae_1.weights[1] |
| 61 | +net_baseline.biases[1] = ae_2.biases[0] |
| 62 | +net_baseline.weights[1] = ae_2.weights[0] |
| 63 | +error_baseline = sum(np.linalg.norm(net_baseline.feedforward(x)-y, 1) |
| 64 | + for (x, y) in test_data) |
| 65 | +print "Baseline average l1 error per training image: %s" % (error_baseline / SIZE,) |
| 66 | + |
| 67 | +print "\nComparing theories with a simple interconversion" |
58 | 68 | print "Mean desired output activation: %s" % ( |
59 | 69 | sum(y.mean() for _, y in test_data) / SIZE,) |
60 | 70 | error = sum(np.linalg.norm(net.feedforward(x)-y, 1) for (x, y) in test_data) |
61 | 71 | print "Average l1 error per training image: %s" % (error / SIZE,) |
| 72 | + |
0 commit comments