Skip to content

Commit 8255501

Browse files
committed
clean tensorflow udacity 2
1 parent 5b1dc39 commit 8255501

9 files changed

+5876
-829
lines changed

TensorFlow/Udacity/.ipynb_checkpoints/2_fullyconnected-checkpoint.ipynb

Lines changed: 333 additions & 224 deletions
Large diffs are not rendered by default.

TensorFlow/Udacity/.ipynb_checkpoints/2_ref_fullyconnected-checkpoint.ipynb

Lines changed: 721 additions & 0 deletions
Large diffs are not rendered by default.

TensorFlow/Udacity/.ipynb_checkpoints/3_ref_regularization-checkpoint.ipynb

Lines changed: 1839 additions & 0 deletions
Large diffs are not rendered by default.

TensorFlow/Udacity/.ipynb_checkpoints/3_regularization-checkpoint.ipynb

Lines changed: 1304 additions & 0 deletions
Large diffs are not rendered by default.

TensorFlow/Udacity/1_notmnist.ipynb

Lines changed: 51 additions & 51 deletions
Large diffs are not rendered by default.

TensorFlow/Udacity/2_fullyconnected.ipynb

Lines changed: 333 additions & 224 deletions
Large diffs are not rendered by default.

TensorFlow/Udacity/2_ref_fullyconnected.ipynb

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -384,15 +384,13 @@
384384
" # arrays.\n",
385385
" _, l, predictions = session.run([optimizer, loss, train_prediction])\n",
386386
" if (step % 100 == 0):\n",
387-
" print('Loss at step %d: %f' % (step, l))\n",
388-
" print('Training accuracy: %.1f%%' % accuracy(\n",
389-
" predictions, train_labels[:train_subset, :]))\n",
390-
" # Calling .eval() on valid_prediction is basically like calling run(), but\n",
391-
" # just to get that one numpy array. Note that it recomputes all its graph\n",
392-
" # dependencies.\n",
393-
" print('Validation accuracy: %.1f%%' % accuracy(\n",
394-
" valid_prediction.eval(), valid_labels))\n",
395-
" print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))"
387+
" print('Loss at step %d: %f' % (step, l))\n",
388+
" print('Training accuracy: %.1f%%' % accuracy(predictions, train_labels[:train_subset, :]))\n",
389+
" # Calling .eval() on valid_prediction is basically like calling run(), but\n",
390+
" # just to get that one numpy array. Note that it recomputes all its graph\n",
391+
" # dependencies.\n",
392+
" print('Validation accuracy: %.1f%%' % accuracy(valid_prediction.eval(), valid_labels))\n",
393+
" print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))"
396394
]
397395
},
398396
{

TensorFlow/Udacity/3_ref_regularization.ipynb

Lines changed: 1 addition & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -629,44 +629,7 @@
629629
},
630630
"outputs": [],
631631
"source": [
632-
"batch_size = 128\n",
633-
"num_hidden_nodes = 1024\n",
634-
"\n",
635-
"graph = tf.Graph()\n",
636-
"with graph.as_default():\n",
637-
"\n",
638-
" # Input data. For the training data, we use a placeholder that will be fed\n",
639-
" # at run time with a training minibatch.\n",
640-
" tf_train_dataset = tf.placeholder(tf.float32,\n",
641-
" shape=(batch_size, image_size * image_size))\n",
642-
" tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
643-
" tf_valid_dataset = tf.constant(valid_dataset)\n",
644-
" tf_test_dataset = tf.constant(test_dataset)\n",
645-
" beta_regul = tf.placeholder(tf.float32)\n",
646-
" \n",
647-
" # Variables.\n",
648-
" weights1 = tf.Variable(\n",
649-
" tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
650-
" biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
651-
" weights2 = tf.Variable(\n",
652-
" tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
653-
" biases2 = tf.Variable(tf.zeros([num_labels]))\n",
654-
" \n",
655-
" # Training computation.\n",
656-
" lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
657-
" logits = tf.matmul(lay1_train, weights2) + biases2\n",
658-
" loss = tf.reduce_mean(\n",
659-
" tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
660-
" \n",
661-
" # Optimizer.\n",
662-
" optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
663-
" \n",
664-
" # Predictions for the training, validation, and test data.\n",
665-
" train_prediction = tf.nn.softmax(logits)\n",
666-
" lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
667-
" valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
668-
" lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
669-
" test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
632+
"-"
670633
]
671634
},
672635
{

0 commit comments

Comments
 (0)