|
2 | 2 | "cells": [ |
3 | 3 | { |
4 | 4 | "cell_type": "code", |
5 | | - "execution_count": 63, |
| 5 | + "execution_count": 187, |
6 | 6 | "metadata": { |
7 | 7 | "collapsed": false |
8 | 8 | }, |
9 | | - "outputs": [ |
10 | | - { |
11 | | - "name": "stdout", |
12 | | - "output_type": "stream", |
13 | | - "text": [ |
14 | | - "[0, 1, 0, 1, 1] [0]\n" |
15 | | - ] |
16 | | - } |
17 | | - ], |
| 9 | + "outputs": [], |
18 | 10 | "source": [ |
19 | 11 | "import numpy as np\n", |
20 | 12 | "import random\n", |
|
25 | 17 | "\n", |
26 | 18 | " # 5 features\n", |
27 | 19 | " features = []\n", |
28 | | - " features.append([[0, 0, 0, 0, 0], [0]])\n", |
29 | | - " features.append([[0, 0, 0, 0, 1], [0]])\n", |
30 | | - " features.append([[0, 0, 0, 1, 1], [0]])\n", |
31 | | - " features.append([[0, 0, 1, 1, 1], [0]])\n", |
32 | | - " features.append([[0, 1, 1, 1, 1], [0]])\n", |
33 | | - " features.append([[1, 1, 1, 1, 0], [0]])\n", |
34 | | - " features.append([[1, 1, 1, 0, 0], [0]])\n", |
35 | | - " features.append([[1, 1, 0, 0, 0], [0]])\n", |
36 | | - " features.append([[1, 0, 0, 0, 0], [0]])\n", |
37 | | - " features.append([[1, 0, 0, 1, 0], [0]])\n", |
38 | | - " features.append([[1, 0, 1, 1, 0], [0]])\n", |
39 | | - " features.append([[1, 1, 0, 1, 0], [0]])\n", |
40 | | - " features.append([[0, 1, 0, 1, 1], [0]])\n", |
41 | | - " features.append([[0, 0, 1, 0, 1], [0]])\n", |
| 20 | + " features.append([[0, 0, 0, 0, 0], [0,1]])\n", |
| 21 | + " features.append([[0, 0, 0, 0, 1], [0,1]])\n", |
| 22 | + " features.append([[0, 0, 0, 1, 1], [0,1]])\n", |
| 23 | + " features.append([[0, 0, 1, 1, 1], [0,1]])\n", |
| 24 | + " features.append([[0, 1, 1, 1, 1], [0,1]])\n", |
| 25 | + " features.append([[1, 1, 1, 1, 0], [0,1]])\n", |
| 26 | + " features.append([[1, 1, 1, 0, 0], [0,1]])\n", |
| 27 | + " features.append([[1, 1, 0, 0, 0], [0,1]])\n", |
| 28 | + "# features.append([[1, 0, 0, 0, 0], [0,1]])\n", |
| 29 | + "# features.append([[1, 0, 0, 1, 0], [0,1]])\n", |
| 30 | + "# features.append([[1, 0, 1, 1, 0], [0,1]])\n", |
| 31 | + "# features.append([[1, 1, 0, 1, 0], [0,1]])\n", |
| 32 | + "# features.append([[0, 1, 0, 1, 1], [0,1]])\n", |
| 33 | + "# features.append([[0, 0, 1, 0, 1], [0,1]])\n", |
| 34 | + "\n", |
42 | 35 | " # output of [1] of positions [0,4]==1\n", |
43 | | - " features.append([[1, 0, 0, 0, 1], [1]])\n", |
44 | | - " features.append([[1, 1, 0, 0, 1], [1]])\n", |
45 | | - " features.append([[1, 1, 1, 0, 1], [1]])\n", |
46 | | - " features.append([[1, 1, 1, 1, 1], [1]])\n", |
47 | | - " features.append([[1, 0, 0, 1, 1], [1]])\n", |
48 | | - " features.append([[1, 0, 1, 1, 1], [1]])\n", |
49 | | - " features.append([[1, 1, 0, 1, 1], [1]])\n", |
50 | | - " features.append([[1, 0, 1, 0, 1], [1]])\n", |
| 36 | + " features.append([[1, 0, 0, 0, 1], [1,0]])\n", |
| 37 | + " features.append([[1, 1, 0, 0, 1], [1,0]])\n", |
| 38 | + " features.append([[1, 1, 1, 0, 1], [1,0]])\n", |
| 39 | + " features.append([[1, 1, 1, 1, 1], [1,0]])\n", |
| 40 | + " features.append([[1, 0, 0, 1, 1], [1,0]])\n", |
| 41 | + "# features.append([[1, 0, 1, 1, 1], [1,0]])\n", |
| 42 | + "# features.append([[1, 1, 0, 1, 1], [1,0]])\n", |
| 43 | + "# features.append([[1, 0, 1, 0, 1], [1,0]])\n", |
51 | 44 | "\n", |
52 | 45 | " random.shuffle(features)\n", |
53 | 46 | " features = np.array(features)\n", |
|
62 | 55 | " return train_x,train_y,test_x,test_y\n", |
63 | 56 | "\n", |
64 | 57 | "if __name__ == '__main__':\n", |
65 | | - " train_x,train_y,test_x,test_y = create_feature_sets_and_labels()\n", |
66 | | - "\n", |
67 | | - "print(train_x[0], train_y[0]" |
| 58 | + " train_x,train_y,test_x,test_y = create_feature_sets_and_labels()\n" |
68 | 59 | ] |
69 | 60 | }, |
70 | 61 | { |
71 | 62 | "cell_type": "code", |
72 | | - "execution_count": 79, |
| 63 | + "execution_count": null, |
73 | 64 | "metadata": { |
74 | 65 | "collapsed": false |
75 | 66 | }, |
76 | | - "outputs": [ |
77 | | - { |
78 | | - "name": "stdout", |
79 | | - "output_type": "stream", |
80 | | - "text": [ |
81 | | - "Epoch 3 completed out of 10 cost: 0.0\n", |
82 | | - "Epoch 5 completed out of 10 cost: 0.0\n", |
83 | | - "Epoch 7 completed out of 10 cost: 0.0\n", |
84 | | - "Epoch 9 completed out of 10 cost: 0.0\n", |
85 | | - "Accuracy: 1.0\n" |
86 | | - ] |
87 | | - } |
88 | | - ], |
| 67 | + "outputs": [], |
89 | 68 | "source": [ |
90 | 69 | "import tensorflow as tf\n", |
91 | 70 | "import numpy as np\n", |
|
95 | 74 | "n_nodes_hl1 = 20\n", |
96 | 75 | "n_nodes_hl2 = 20\n", |
97 | 76 | "\n", |
98 | | - "n_classes = 1\n", |
99 | | - "hm_epochs = 10\n", |
| 77 | + "n_classes = 2\n", |
| 78 | + "hm_epochs = 50\n", |
| 79 | + "batch_size = 4\n", |
100 | 80 | "\n", |
101 | 81 | "x = tf.placeholder('float')\n", |
102 | 82 | "y = tf.placeholder('float')\n", |
|
150 | 130 | " i+=batch_size\n", |
151 | 131 | " last_cost = c\n", |
152 | 132 | "\n", |
153 | | - " if (epoch% 2) == 0 and epoch > 1:\n", |
154 | | - " print('Epoch', epoch+1, 'completed out of',hm_epochs,'cost:', last_cost)\n", |
| 133 | + " if (epoch% 10) == 0 and epoch > 1:\n", |
| 134 | + " print('Epoch', epoch, 'completed out of',hm_epochs,'cost:', last_cost)\n", |
155 | 135 | "\n", |
156 | 136 | " correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n", |
157 | 137 | " accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n", |
158 | | - "\n", |
159 | 138 | " print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))\n", |
160 | 139 | "\n", |
161 | | - "train_neural_network(x)" |
| 140 | + " output = prediction.eval(feed_dict = {x: [[1, 0, 0, 0, 0]]})\n", |
| 141 | + " print(output)\n", |
| 142 | + " print(tf.sigmoid(output[0][0]).eval(), tf.sigmoid(output[0][1]).eval())\n", |
| 143 | + " \n", |
| 144 | + "train_neural_network(x)\n" |
162 | 145 | ] |
163 | 146 | }, |
164 | 147 | { |
|
0 commit comments