| 
2 | 2 |  "cells": [  | 
3 | 3 |   {  | 
4 | 4 |    "cell_type": "code",  | 
5 |  | -   "execution_count": 29,  | 
 | 5 | +   "execution_count": 36,  | 
6 | 6 |    "metadata": {  | 
7 | 7 |     "collapsed": false  | 
8 | 8 |    },  | 
9 |  | -   "outputs": [  | 
10 |  | -    {  | 
11 |  | -     "data": {  | 
12 |  | -      "text/plain": [  | 
13 |  | -       "'1.0.1'"  | 
14 |  | -      ]  | 
15 |  | -     },  | 
16 |  | -     "execution_count": 29,  | 
17 |  | -     "metadata": {},  | 
18 |  | -     "output_type": "execute_result"  | 
19 |  | -    }  | 
20 |  | -   ],  | 
21 |  | -   "source": [  | 
22 |  | -    "import tensorflow as tf\n",  | 
23 |  | -    "tf.__version__"  | 
24 |  | -   ]  | 
25 |  | -  },  | 
26 |  | -  {  | 
27 |  | -   "cell_type": "code",  | 
28 |  | -   "execution_count": 1,  | 
29 |  | -   "metadata": {  | 
30 |  | -    "collapsed": false  | 
31 |  | -   },  | 
32 |  | -   "outputs": [  | 
33 |  | -    {  | 
34 |  | -     "name": "stdout",  | 
35 |  | -     "output_type": "stream",  | 
36 |  | -     "text": [  | 
37 |  | -      "hdf5 is not supported on this machine (please install/reinstall h5py for optimal experience)\n"  | 
38 |  | -     ]  | 
39 |  | -    }  | 
40 |  | -   ],  | 
 | 9 | +   "outputs": [],  | 
41 | 10 |    "source": [  | 
42 | 11 |     "import numpy as np\n",  | 
43 | 12 |     "import tflearn\n",  | 
44 | 13 |     "import random\n",  | 
45 | 14 |     "\n",  | 
46 |  | -    "def create_feature_sets_and_labels(test_size = 0.2):\n",  | 
 | 15 | +    "def create_feature_sets_and_labels():\n",  | 
47 | 16 |     "\n",  | 
48 | 17 |     "    # known patterns (5 features) output of [1] of positions [0,4]==1\n",  | 
49 | 18 |     "    features = []\n",  | 
 | 
70 | 39 |     "    features.append([[1, 1, 1, 1, 1], [1,0]])\n",  | 
71 | 40 |     "    features.append([[1, 0, 0, 1, 1], [1,0]])\n",  | 
72 | 41 |     "\n",  | 
73 |  | -    "    # shuffle out features and turn into np.array\n",  | 
 | 42 | +    "    # shuffle our features and turn into np.array\n",  | 
74 | 43 |     "    random.shuffle(features)\n",  | 
75 | 44 |     "    features = np.array(features)\n",  | 
76 | 45 |     "\n",  | 
77 |  | -    "    # split a portion of the features into tests\n",  | 
78 |  | -    "    testing_size = int(test_size*len(features))\n",  | 
79 |  | -    "\n",  | 
80 | 46 |     "    # create train and test lists\n",  | 
81 |  | -    "    train_x = list(features[:,0][:-testing_size])\n",  | 
82 |  | -    "    train_y = list(features[:,1][:-testing_size])\n",  | 
83 |  | -    "    test_x = list(features[:,0][-testing_size:])\n",  | 
84 |  | -    "    test_y = list(features[:,1][-testing_size:])\n",  | 
 | 47 | +    "    train_x = list(features[:,0])\n",  | 
 | 48 | +    "    train_y = list(features[:,1])\n",  | 
85 | 49 |     "\n",  | 
86 |  | -    "    return train_x, train_y, test_x, test_y"  | 
 | 50 | +    "    return train_x, train_y"  | 
87 | 51 |    ]  | 
88 | 52 |   },  | 
89 | 53 |   {  | 
90 | 54 |    "cell_type": "code",  | 
91 |  | -   "execution_count": 2,  | 
 | 55 | +   "execution_count": 37,  | 
92 | 56 |    "metadata": {  | 
93 | 57 |     "collapsed": false  | 
94 | 58 |    },  | 
95 | 59 |    "outputs": [],  | 
96 | 60 |    "source": [  | 
97 |  | -    "train_x, train_y, test_x, test_y = create_feature_sets_and_labels()"  | 
 | 61 | +    "train_x, train_y = create_feature_sets_and_labels()"  | 
98 | 62 |    ]  | 
99 | 63 |   },  | 
100 | 64 |   {  | 
101 | 65 |    "cell_type": "code",  | 
102 |  | -   "execution_count": 32,  | 
 | 66 | +   "execution_count": 38,  | 
103 | 67 |    "metadata": {  | 
104 | 68 |     "collapsed": false  | 
105 | 69 |    },  | 
 | 
108 | 72 |      "name": "stdout",  | 
109 | 73 |      "output_type": "stream",  | 
110 | 74 |      "text": [  | 
111 |  | -      "Training Step: 999  | total loss: \u001b[1m\u001b[32m0.00214\u001b[0m\u001b[0m | time: 0.003s\n",  | 
112 |  | -      "| Adam | epoch: 500 | loss: 0.00214 - acc: 1.0000 -- iter: 16/18\n",  | 
113 |  | -      "Training Step: 1000  | total loss: \u001b[1m\u001b[32m0.00209\u001b[0m\u001b[0m | time: 0.006s\n",  | 
114 |  | -      "| Adam | epoch: 500 | loss: 0.00209 - acc: 1.0000 -- iter: 18/18\n",  | 
115 |  | -      "--\n",  | 
116 |  | -      "INFO:tensorflow:/home/gk/gensim/notebooks/ANN.model is not in all_model_checkpoint_paths. Manually adding it.\n"  | 
 | 75 | +      "Training Step: 999  | total loss: \u001b[1m\u001b[32m0.00434\u001b[0m\u001b[0m | time: 0.003s\n",  | 
 | 76 | +      "| Adam | epoch: 500 | loss: 0.00434 - acc: 0.9999 -- iter: 16/22\n",  | 
 | 77 | +      "Training Step: 1000  | total loss: \u001b[1m\u001b[32m0.00429\u001b[0m\u001b[0m | time: 0.006s\n",  | 
 | 78 | +      "| Adam | epoch: 500 | loss: 0.00429 - acc: 0.9999 -- iter: 22/22\n",  | 
 | 79 | +      "--\n"  | 
117 | 80 |      ]  | 
118 | 81 |     }  | 
119 | 82 |    ],  | 
 | 
0 commit comments