|  | 
| 2 | 2 |  "cells": [ | 
| 3 | 3 |   { | 
| 4 | 4 |    "cell_type": "code", | 
| 5 |  | -   "execution_count": 187, | 
|  | 5 | +   "execution_count": 193, | 
| 6 | 6 |    "metadata": { | 
| 7 | 7 |     "collapsed": false | 
| 8 | 8 |    }, | 
| 9 | 9 |    "outputs": [], | 
| 10 | 10 |    "source": [ | 
| 11 | 11 |     "import numpy as np\n", | 
| 12 | 12 |     "import random\n", | 
| 13 |  | -    "from collections import Counter\n", | 
| 14 |  | -    "\n", | 
| 15 | 13 |     "\n", | 
| 16 | 14 |     "def create_feature_sets_and_labels(test_size = 0.2):\n", | 
| 17 | 15 |     "\n", | 
| 18 |  | -    "    # 5 features\n", | 
|  | 16 | +    "    # known patterns (5 features) output of [1] of positions [0,4]==1\n", | 
| 19 | 17 |     "    features = []\n", | 
| 20 | 18 |     "    features.append([[0, 0, 0, 0, 0], [0,1]])\n", | 
| 21 | 19 |     "    features.append([[0, 0, 0, 0, 1], [0,1]])\n", | 
|  | 
| 25 | 23 |     "    features.append([[1, 1, 1, 1, 0], [0,1]])\n", | 
| 26 | 24 |     "    features.append([[1, 1, 1, 0, 0], [0,1]])\n", | 
| 27 | 25 |     "    features.append([[1, 1, 0, 0, 0], [0,1]])\n", | 
| 28 |  | -    "#    features.append([[1, 0, 0, 0, 0], [0,1]])\n", | 
| 29 |  | -    "#    features.append([[1, 0, 0, 1, 0], [0,1]])\n", | 
| 30 |  | -    "#    features.append([[1, 0, 1, 1, 0], [0,1]])\n", | 
| 31 |  | -    "#    features.append([[1, 1, 0, 1, 0], [0,1]])\n", | 
| 32 |  | -    "#    features.append([[0, 1, 0, 1, 1], [0,1]])\n", | 
| 33 |  | -    "#    features.append([[0, 0, 1, 0, 1], [0,1]])\n", | 
| 34 | 26 |     "\n", | 
| 35 |  | -    "    # output of [1] of positions [0,4]==1\n", | 
| 36 | 27 |     "    features.append([[1, 0, 0, 0, 1], [1,0]])\n", | 
| 37 | 28 |     "    features.append([[1, 1, 0, 0, 1], [1,0]])\n", | 
| 38 | 29 |     "    features.append([[1, 1, 1, 0, 1], [1,0]])\n", | 
| 39 | 30 |     "    features.append([[1, 1, 1, 1, 1], [1,0]])\n", | 
| 40 | 31 |     "    features.append([[1, 0, 0, 1, 1], [1,0]])\n", | 
| 41 |  | -    "#   features.append([[1, 0, 1, 1, 1], [1,0]])\n", | 
| 42 |  | -    "#   features.append([[1, 1, 0, 1, 1], [1,0]])\n", | 
| 43 |  | -    "#   features.append([[1, 0, 1, 0, 1], [1,0]])\n", | 
| 44 | 32 |     "\n", | 
|  | 33 | +    "# unknown patterns\n", | 
|  | 34 | +    "#    features.append([[1, 0, 0, 0, 0], [0,1]])\n", | 
|  | 35 | +    "#    features.append([[1, 0, 0, 1, 0], [0,1]])\n", | 
|  | 36 | +    "#    features.append([[1, 0, 1, 1, 0], [0,1]])\n", | 
|  | 37 | +    "#    features.append([[1, 1, 0, 1, 0], [0,1]])\n", | 
|  | 38 | +    "#    features.append([[0, 1, 0, 1, 1], [0,1]])\n", | 
|  | 39 | +    "#    features.append([[0, 0, 1, 0, 1], [0,1]])\n", | 
|  | 40 | +    "#    features.append([[1, 0, 1, 1, 1], [1,0]])\n", | 
|  | 41 | +    "#    features.append([[1, 1, 0, 1, 1], [1,0]])\n", | 
|  | 42 | +    "#    features.append([[1, 0, 1, 0, 1], [1,0]])\n", | 
|  | 43 | +    "\n", | 
|  | 44 | +    "    # shuffle out features and turn into np.array\n", | 
| 45 | 45 |     "    random.shuffle(features)\n", | 
| 46 | 46 |     "    features = np.array(features)\n", | 
| 47 | 47 |     "\n", | 
|  | 48 | +    "    # split a portion of the features into tests\n", | 
| 48 | 49 |     "    testing_size = int(test_size*len(features))\n", | 
| 49 | 50 |     "\n", | 
|  | 51 | +    "    # create train and test lists\n", | 
| 50 | 52 |     "    train_x = list(features[:,0][:-testing_size])\n", | 
| 51 | 53 |     "    train_y = list(features[:,1][:-testing_size])\n", | 
| 52 | 54 |     "    test_x = list(features[:,0][-testing_size:])\n", | 
| 53 | 55 |     "    test_y = list(features[:,1][-testing_size:])\n", | 
| 54 | 56 |     "\n", | 
| 55 |  | -    "    return train_x,train_y,test_x,test_y\n", | 
| 56 |  | -    "\n", | 
| 57 |  | -    "if __name__ == '__main__':\n", | 
| 58 |  | -    "    train_x,train_y,test_x,test_y = create_feature_sets_and_labels()\n" | 
|  | 57 | +    "    return train_x,train_y,test_x,test_y\n" | 
| 59 | 58 |    ] | 
| 60 | 59 |   }, | 
| 61 | 60 |   { | 
|  | 
| 64 | 63 |    "metadata": { | 
| 65 | 64 |     "collapsed": false | 
| 66 | 65 |    }, | 
| 67 |  | -   "outputs": [ | 
| 68 |  | -    { | 
| 69 |  | -     "name": "stdout", | 
| 70 |  | -     "output_type": "stream", | 
| 71 |  | -     "text": [ | 
| 72 |  | -      "Epoch 10 completed out of 50 cost: 0.3681\n", | 
| 73 |  | -      "Epoch 20 completed out of 50 cost: 0.0166706\n", | 
| 74 |  | -      "Epoch 30 completed out of 50 cost: 0.00777522\n", | 
| 75 |  | -      "Epoch 40 completed out of 50 cost: 0.00493255\n", | 
| 76 |  | -      "Accuracy: 1.0\n", | 
| 77 |  | -      "[[-0.26135445  4.12279558]]\n" | 
| 78 |  | -     ] | 
| 79 |  | -    } | 
| 80 |  | -   ], | 
|  | 66 | +   "outputs": [], | 
| 81 | 67 |    "source": [ | 
| 82 | 68 |     "import tensorflow as tf\n", | 
| 83 | 69 |     "import numpy as np\n", | 
| 84 | 70 |     "\n", | 
| 85 | 71 |     "train_x,train_y,test_x,test_y = create_feature_sets_and_labels()\n", | 
| 86 | 72 |     "\n", | 
|  | 73 | +    "# hidden layers and their nodes\n", | 
| 87 | 74 |     "n_nodes_hl1 = 20\n", | 
| 88 | 75 |     "n_nodes_hl2 = 20\n", | 
| 89 | 76 |     "\n", | 
|  | 77 | +    "# classes in our output\n", | 
| 90 | 78 |     "n_classes = 2\n", | 
|  | 79 | +    "# iterations and batch-size to build out model\n", | 
| 91 | 80 |     "hm_epochs = 50\n", | 
| 92 | 81 |     "batch_size = 4\n", | 
| 93 | 82 |     "\n", | 
| 94 | 83 |     "x = tf.placeholder('float')\n", | 
| 95 | 84 |     "y = tf.placeholder('float')\n", | 
| 96 | 85 |     "\n", | 
|  | 86 | +    "# random weights and bias for our layers\n", | 
| 97 | 87 |     "hidden_1_layer = {'f_fum':n_nodes_hl1,\n", | 
| 98 | 88 |     "                  'weight':tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),\n", | 
| 99 | 89 |     "                  'bias':tf.Variable(tf.random_normal([n_nodes_hl1]))}\n", | 
|  | 
| 107 | 97 |     "                'bias':tf.Variable(tf.random_normal([n_classes])),}\n", | 
| 108 | 98 |     "                       \n", | 
| 109 | 99 |     "\n", | 
|  | 100 | +    "# our predictive model's definition\n", | 
| 110 | 101 |     "def neural_network_model(data):\n", | 
| 111 | 102 |     "\n", | 
| 112 | 103 |     "    l1 = tf.add(tf.matmul(data,hidden_1_layer['weight']), hidden_1_layer['bias'])\n", | 
|  | 
| 119 | 110 |     "\n", | 
| 120 | 111 |     "    return output\n", | 
| 121 | 112 |     "\n", | 
|  | 113 | +    "# training our model\n", | 
| 122 | 114 |     "def train_neural_network(x):\n", | 
|  | 115 | +    "    # use the model definition\n", | 
| 123 | 116 |     "    prediction = neural_network_model(x)\n", | 
| 124 | 117 |     "\n", | 
|  | 118 | +    "    # formula for cost (error)\n", | 
| 125 | 119 |     "    cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )\n", | 
| 126 |  | -    "    #optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999).minimize(cost)\n", | 
|  | 120 | +    "    # optimize for cost using GradientDescent\n", | 
| 127 | 121 |     "    optimizer = tf.train.GradientDescentOptimizer(1).minimize(cost)\n", | 
| 128 | 122 |     "\n", | 
|  | 123 | +    "    # Tensorflow session\n", | 
| 129 | 124 |     "    with tf.Session() as sess:\n", | 
|  | 125 | +    "        # initialize our variables\n", | 
| 130 | 126 |     "        sess.run(tf.global_variables_initializer())\n", | 
| 131 |  | -    "  \n", | 
|  | 127 | +    "\n", | 
|  | 128 | +    "        # loop through specified number of iterations\n", | 
| 132 | 129 |     "        for epoch in range(hm_epochs):\n", | 
| 133 | 130 |     "            epoch_loss = 0\n", | 
| 134 | 131 |     "            i=0\n", | 
|  | 132 | +    "            # handle batch sized chunks of training data\n", | 
| 135 | 133 |     "            while i < len(train_x):\n", | 
| 136 | 134 |     "                start = i\n", | 
| 137 | 135 |     "                end = i+batch_size\n", | 
|  | 
| 143 | 141 |     "                i+=batch_size\n", | 
| 144 | 142 |     "                last_cost = c\n", | 
| 145 | 143 |     "\n", | 
| 146 |  | -    "            if (epoch% 10) == 0 and epoch > 1:\n", | 
|  | 144 | +    "            # print cost updates along the way\n", | 
|  | 145 | +    "            if (epoch% (hm_epochs/5)) == 0:\n", | 
| 147 | 146 |     "                print('Epoch', epoch, 'completed out of',hm_epochs,'cost:', last_cost)\n", | 
| 148 | 147 |     "\n", | 
|  | 148 | +    "        # print accuracy of our model against known test data (a subset of the shuffled known patterns)\n", | 
| 149 | 149 |     "        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n", | 
| 150 | 150 |     "        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n", | 
| 151 | 151 |     "        print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))\n", | 
| 152 | 152 |     "\n", | 
|  | 153 | +    "        # print a prediction using our model\n", | 
| 153 | 154 |     "        output = prediction.eval(feed_dict = {x: [[1, 0, 0, 0, 0]]})\n", | 
| 154 | 155 |     "        print(output)\n", | 
|  | 156 | +    "        # normalize the prediction values\n", | 
| 155 | 157 |     "        print(tf.sigmoid(output[0][0]).eval(), tf.sigmoid(output[0][1]).eval())\n", | 
| 156 | 158 |     "        \n", | 
| 157 | 159 |     "train_neural_network(x)\n" | 
|  | 
0 commit comments