Skip to content

Commit 64e09a3

Browse files
author
ugik
committed
Tensorflow 2-layer perceptron toy example
1 parent d88dac1 commit 64e09a3

File tree

2 files changed

+73
-56
lines changed

2 files changed

+73
-56
lines changed

.ipynb_checkpoints/Tensorflow ANN-checkpoint.ipynb

Lines changed: 36 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2,20 +2,18 @@
22
"cells": [
33
{
44
"cell_type": "code",
5-
"execution_count": 187,
5+
"execution_count": 193,
66
"metadata": {
77
"collapsed": false
88
},
99
"outputs": [],
1010
"source": [
1111
"import numpy as np\n",
1212
"import random\n",
13-
"from collections import Counter\n",
14-
"\n",
1513
"\n",
1614
"def create_feature_sets_and_labels(test_size = 0.2):\n",
1715
"\n",
18-
" # 5 features\n",
16+
" # known patterns (5 features) output of [1] of positions [0,4]==1\n",
1917
" features = []\n",
2018
" features.append([[0, 0, 0, 0, 0], [0,1]])\n",
2119
" features.append([[0, 0, 0, 0, 1], [0,1]])\n",
@@ -25,37 +23,38 @@
2523
" features.append([[1, 1, 1, 1, 0], [0,1]])\n",
2624
" features.append([[1, 1, 1, 0, 0], [0,1]])\n",
2725
" features.append([[1, 1, 0, 0, 0], [0,1]])\n",
28-
"# features.append([[1, 0, 0, 0, 0], [0,1]])\n",
29-
"# features.append([[1, 0, 0, 1, 0], [0,1]])\n",
30-
"# features.append([[1, 0, 1, 1, 0], [0,1]])\n",
31-
"# features.append([[1, 1, 0, 1, 0], [0,1]])\n",
32-
"# features.append([[0, 1, 0, 1, 1], [0,1]])\n",
33-
"# features.append([[0, 0, 1, 0, 1], [0,1]])\n",
3426
"\n",
35-
" # output of [1] of positions [0,4]==1\n",
3627
" features.append([[1, 0, 0, 0, 1], [1,0]])\n",
3728
" features.append([[1, 1, 0, 0, 1], [1,0]])\n",
3829
" features.append([[1, 1, 1, 0, 1], [1,0]])\n",
3930
" features.append([[1, 1, 1, 1, 1], [1,0]])\n",
4031
" features.append([[1, 0, 0, 1, 1], [1,0]])\n",
41-
"# features.append([[1, 0, 1, 1, 1], [1,0]])\n",
42-
"# features.append([[1, 1, 0, 1, 1], [1,0]])\n",
43-
"# features.append([[1, 0, 1, 0, 1], [1,0]])\n",
4432
"\n",
33+
"# unknown patterns\n",
34+
"# features.append([[1, 0, 0, 0, 0], [0,1]])\n",
35+
"# features.append([[1, 0, 0, 1, 0], [0,1]])\n",
36+
"# features.append([[1, 0, 1, 1, 0], [0,1]])\n",
37+
"# features.append([[1, 1, 0, 1, 0], [0,1]])\n",
38+
"# features.append([[0, 1, 0, 1, 1], [0,1]])\n",
39+
"# features.append([[0, 0, 1, 0, 1], [0,1]])\n",
40+
"# features.append([[1, 0, 1, 1, 1], [1,0]])\n",
41+
"# features.append([[1, 1, 0, 1, 1], [1,0]])\n",
42+
"# features.append([[1, 0, 1, 0, 1], [1,0]])\n",
43+
"\n",
44+
" # shuffle out features and turn into np.array\n",
4545
" random.shuffle(features)\n",
4646
" features = np.array(features)\n",
4747
"\n",
48+
" # split a portion of the features into tests\n",
4849
" testing_size = int(test_size*len(features))\n",
4950
"\n",
51+
" # create train and test lists\n",
5052
" train_x = list(features[:,0][:-testing_size])\n",
5153
" train_y = list(features[:,1][:-testing_size])\n",
5254
" test_x = list(features[:,0][-testing_size:])\n",
5355
" test_y = list(features[:,1][-testing_size:])\n",
5456
"\n",
55-
" return train_x,train_y,test_x,test_y\n",
56-
"\n",
57-
"if __name__ == '__main__':\n",
58-
" train_x,train_y,test_x,test_y = create_feature_sets_and_labels()\n"
57+
" return train_x,train_y,test_x,test_y\n"
5958
]
6059
},
6160
{
@@ -71,16 +70,20 @@
7170
"\n",
7271
"train_x,train_y,test_x,test_y = create_feature_sets_and_labels()\n",
7372
"\n",
73+
"# hidden layers and their nodes\n",
7474
"n_nodes_hl1 = 20\n",
7575
"n_nodes_hl2 = 20\n",
7676
"\n",
77+
"# classes in our output\n",
7778
"n_classes = 2\n",
79+
"# iterations and batch-size to build out model\n",
7880
"hm_epochs = 50\n",
7981
"batch_size = 4\n",
8082
"\n",
8183
"x = tf.placeholder('float')\n",
8284
"y = tf.placeholder('float')\n",
8385
"\n",
86+
"# random weights and bias for our layers\n",
8487
"hidden_1_layer = {'f_fum':n_nodes_hl1,\n",
8588
" 'weight':tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),\n",
8689
" 'bias':tf.Variable(tf.random_normal([n_nodes_hl1]))}\n",
@@ -94,6 +97,7 @@
9497
" 'bias':tf.Variable(tf.random_normal([n_classes])),}\n",
9598
" \n",
9699
"\n",
100+
"# our predictive model's definition\n",
97101
"def neural_network_model(data):\n",
98102
"\n",
99103
" l1 = tf.add(tf.matmul(data,hidden_1_layer['weight']), hidden_1_layer['bias'])\n",
@@ -106,19 +110,26 @@
106110
"\n",
107111
" return output\n",
108112
"\n",
113+
"# training our model\n",
109114
"def train_neural_network(x):\n",
115+
" # use the model definition\n",
110116
" prediction = neural_network_model(x)\n",
111117
"\n",
118+
" # formula for cost (error)\n",
112119
" cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )\n",
113-
" #optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999).minimize(cost)\n",
120+
" # optimize for cost using GradientDescent\n",
114121
" optimizer = tf.train.GradientDescentOptimizer(1).minimize(cost)\n",
115122
"\n",
123+
" # Tensorflow session\n",
116124
" with tf.Session() as sess:\n",
125+
" # initialize our variables\n",
117126
" sess.run(tf.global_variables_initializer())\n",
118-
" \n",
127+
"\n",
128+
" # loop through specified number of iterations\n",
119129
" for epoch in range(hm_epochs):\n",
120130
" epoch_loss = 0\n",
121131
" i=0\n",
132+
" # handle batch sized chunks of training data\n",
122133
" while i < len(train_x):\n",
123134
" start = i\n",
124135
" end = i+batch_size\n",
@@ -130,15 +141,19 @@
130141
" i+=batch_size\n",
131142
" last_cost = c\n",
132143
"\n",
133-
" if (epoch% 10) == 0 and epoch > 1:\n",
144+
" # print cost updates along the way\n",
145+
" if (epoch% (hm_epochs/5)) == 0:\n",
134146
" print('Epoch', epoch, 'completed out of',hm_epochs,'cost:', last_cost)\n",
135147
"\n",
148+
" # print accuracy of our model against known test data (a subset of the shuffled known patterns)\n",
136149
" correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n",
137150
" accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n",
138151
" print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))\n",
139152
"\n",
153+
" # print a prediction using our model\n",
140154
" output = prediction.eval(feed_dict = {x: [[1, 0, 0, 0, 0]]})\n",
141155
" print(output)\n",
156+
" # normalize the prediction values\n",
142157
" print(tf.sigmoid(output[0][0]).eval(), tf.sigmoid(output[0][1]).eval())\n",
143158
" \n",
144159
"train_neural_network(x)\n"

Tensorflow ANN.ipynb

Lines changed: 37 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -2,20 +2,18 @@
22
"cells": [
33
{
44
"cell_type": "code",
5-
"execution_count": 187,
5+
"execution_count": 193,
66
"metadata": {
77
"collapsed": false
88
},
99
"outputs": [],
1010
"source": [
1111
"import numpy as np\n",
1212
"import random\n",
13-
"from collections import Counter\n",
14-
"\n",
1513
"\n",
1614
"def create_feature_sets_and_labels(test_size = 0.2):\n",
1715
"\n",
18-
" # 5 features\n",
16+
" # known patterns (5 features) output of [1] of positions [0,4]==1\n",
1917
" features = []\n",
2018
" features.append([[0, 0, 0, 0, 0], [0,1]])\n",
2119
" features.append([[0, 0, 0, 0, 1], [0,1]])\n",
@@ -25,37 +23,38 @@
2523
" features.append([[1, 1, 1, 1, 0], [0,1]])\n",
2624
" features.append([[1, 1, 1, 0, 0], [0,1]])\n",
2725
" features.append([[1, 1, 0, 0, 0], [0,1]])\n",
28-
"# features.append([[1, 0, 0, 0, 0], [0,1]])\n",
29-
"# features.append([[1, 0, 0, 1, 0], [0,1]])\n",
30-
"# features.append([[1, 0, 1, 1, 0], [0,1]])\n",
31-
"# features.append([[1, 1, 0, 1, 0], [0,1]])\n",
32-
"# features.append([[0, 1, 0, 1, 1], [0,1]])\n",
33-
"# features.append([[0, 0, 1, 0, 1], [0,1]])\n",
3426
"\n",
35-
" # output of [1] of positions [0,4]==1\n",
3627
" features.append([[1, 0, 0, 0, 1], [1,0]])\n",
3728
" features.append([[1, 1, 0, 0, 1], [1,0]])\n",
3829
" features.append([[1, 1, 1, 0, 1], [1,0]])\n",
3930
" features.append([[1, 1, 1, 1, 1], [1,0]])\n",
4031
" features.append([[1, 0, 0, 1, 1], [1,0]])\n",
41-
"# features.append([[1, 0, 1, 1, 1], [1,0]])\n",
42-
"# features.append([[1, 1, 0, 1, 1], [1,0]])\n",
43-
"# features.append([[1, 0, 1, 0, 1], [1,0]])\n",
4432
"\n",
33+
"# unknown patterns\n",
34+
"# features.append([[1, 0, 0, 0, 0], [0,1]])\n",
35+
"# features.append([[1, 0, 0, 1, 0], [0,1]])\n",
36+
"# features.append([[1, 0, 1, 1, 0], [0,1]])\n",
37+
"# features.append([[1, 1, 0, 1, 0], [0,1]])\n",
38+
"# features.append([[0, 1, 0, 1, 1], [0,1]])\n",
39+
"# features.append([[0, 0, 1, 0, 1], [0,1]])\n",
40+
"# features.append([[1, 0, 1, 1, 1], [1,0]])\n",
41+
"# features.append([[1, 1, 0, 1, 1], [1,0]])\n",
42+
"# features.append([[1, 0, 1, 0, 1], [1,0]])\n",
43+
"\n",
44+
" # shuffle out features and turn into np.array\n",
4545
" random.shuffle(features)\n",
4646
" features = np.array(features)\n",
4747
"\n",
48+
" # split a portion of the features into tests\n",
4849
" testing_size = int(test_size*len(features))\n",
4950
"\n",
51+
" # create train and test lists\n",
5052
" train_x = list(features[:,0][:-testing_size])\n",
5153
" train_y = list(features[:,1][:-testing_size])\n",
5254
" test_x = list(features[:,0][-testing_size:])\n",
5355
" test_y = list(features[:,1][-testing_size:])\n",
5456
"\n",
55-
" return train_x,train_y,test_x,test_y\n",
56-
"\n",
57-
"if __name__ == '__main__':\n",
58-
" train_x,train_y,test_x,test_y = create_feature_sets_and_labels()\n"
57+
" return train_x,train_y,test_x,test_y\n"
5958
]
6059
},
6160
{
@@ -64,36 +63,27 @@
6463
"metadata": {
6564
"collapsed": false
6665
},
67-
"outputs": [
68-
{
69-
"name": "stdout",
70-
"output_type": "stream",
71-
"text": [
72-
"Epoch 10 completed out of 50 cost: 0.3681\n",
73-
"Epoch 20 completed out of 50 cost: 0.0166706\n",
74-
"Epoch 30 completed out of 50 cost: 0.00777522\n",
75-
"Epoch 40 completed out of 50 cost: 0.00493255\n",
76-
"Accuracy: 1.0\n",
77-
"[[-0.26135445 4.12279558]]\n"
78-
]
79-
}
80-
],
66+
"outputs": [],
8167
"source": [
8268
"import tensorflow as tf\n",
8369
"import numpy as np\n",
8470
"\n",
8571
"train_x,train_y,test_x,test_y = create_feature_sets_and_labels()\n",
8672
"\n",
73+
"# hidden layers and their nodes\n",
8774
"n_nodes_hl1 = 20\n",
8875
"n_nodes_hl2 = 20\n",
8976
"\n",
77+
"# classes in our output\n",
9078
"n_classes = 2\n",
79+
"# iterations and batch-size to build out model\n",
9180
"hm_epochs = 50\n",
9281
"batch_size = 4\n",
9382
"\n",
9483
"x = tf.placeholder('float')\n",
9584
"y = tf.placeholder('float')\n",
9685
"\n",
86+
"# random weights and bias for our layers\n",
9787
"hidden_1_layer = {'f_fum':n_nodes_hl1,\n",
9888
" 'weight':tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),\n",
9989
" 'bias':tf.Variable(tf.random_normal([n_nodes_hl1]))}\n",
@@ -107,6 +97,7 @@
10797
" 'bias':tf.Variable(tf.random_normal([n_classes])),}\n",
10898
" \n",
10999
"\n",
100+
"# our predictive model's definition\n",
110101
"def neural_network_model(data):\n",
111102
"\n",
112103
" l1 = tf.add(tf.matmul(data,hidden_1_layer['weight']), hidden_1_layer['bias'])\n",
@@ -119,19 +110,26 @@
119110
"\n",
120111
" return output\n",
121112
"\n",
113+
"# training our model\n",
122114
"def train_neural_network(x):\n",
115+
" # use the model definition\n",
123116
" prediction = neural_network_model(x)\n",
124117
"\n",
118+
" # formula for cost (error)\n",
125119
" cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )\n",
126-
" #optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999).minimize(cost)\n",
120+
" # optimize for cost using GradientDescent\n",
127121
" optimizer = tf.train.GradientDescentOptimizer(1).minimize(cost)\n",
128122
"\n",
123+
" # Tensorflow session\n",
129124
" with tf.Session() as sess:\n",
125+
" # initialize our variables\n",
130126
" sess.run(tf.global_variables_initializer())\n",
131-
" \n",
127+
"\n",
128+
" # loop through specified number of iterations\n",
132129
" for epoch in range(hm_epochs):\n",
133130
" epoch_loss = 0\n",
134131
" i=0\n",
132+
" # handle batch sized chunks of training data\n",
135133
" while i < len(train_x):\n",
136134
" start = i\n",
137135
" end = i+batch_size\n",
@@ -143,15 +141,19 @@
143141
" i+=batch_size\n",
144142
" last_cost = c\n",
145143
"\n",
146-
" if (epoch% 10) == 0 and epoch > 1:\n",
144+
" # print cost updates along the way\n",
145+
" if (epoch% (hm_epochs/5)) == 0:\n",
147146
" print('Epoch', epoch, 'completed out of',hm_epochs,'cost:', last_cost)\n",
148147
"\n",
148+
" # print accuracy of our model against known test data (a subset of the shuffled known patterns)\n",
149149
" correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n",
150150
" accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n",
151151
" print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))\n",
152152
"\n",
153+
" # print a prediction using our model\n",
153154
" output = prediction.eval(feed_dict = {x: [[1, 0, 0, 0, 0]]})\n",
154155
" print(output)\n",
156+
" # normalize the prediction values\n",
155157
" print(tf.sigmoid(output[0][0]).eval(), tf.sigmoid(output[0][1]).eval())\n",
156158
" \n",
157159
"train_neural_network(x)\n"

0 commit comments

Comments
 (0)