Skip to content

Commit e2f5f92

Browse files
committed
first commit
0 parents  commit e2f5f92

17 files changed

+1332
-0
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
MNIST_data
2+
*.pyc
3+
*.pyo

1-basics.py

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
"""Summary of tensorflow basics.
2+
3+
Parag K. Mital, Jan 2016."""
4+
# %% Import tensorflow and pyplot
5+
import tensorflow as tf
6+
import matplotlib.pyplot as plt
7+
8+
# %% tf.Graph represents a collection of tf.Operations
9+
# You can create operations by writing out equations.
10+
# By default, there is a graph: tf.get_default_graph()
11+
# and any new operations are added to this graph.
12+
# The result of a tf.Operation is a tf.Tensor, which holds
13+
# the values.
14+
15+
# %% First a tf.Tensor
16+
n_values = 32
17+
x = tf.linspace(-3.0, 3.0, n_values)
18+
19+
# %% Construct a tf.Session to execut the graph.
20+
sess = tf.Session()
21+
result = sess.run(x)
22+
23+
# %% Alternatively pass a session to the eval fn:
24+
x.eval(session=sess)
25+
# x.eval() does not work, as it requires a session!
26+
27+
# %% We can setup an interactive session if we don't
28+
# want to keep passing the session around:
29+
sess.close()
30+
sess = tf.InteractiveSession()
31+
32+
# %% Now this will work!
33+
x.eval()
34+
35+
# %% Now a tf.Operation
36+
# We'll use our values from [-3, 3] to create a Gaussian Distribution
37+
sigma = 1.0
38+
mean = 0.0
39+
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
40+
(2.0 * tf.pow(sigma, 2.0)))) *
41+
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
42+
43+
# %% By default, new operations are added to the default Graph
44+
assert z.graph is tf.get_default_graph()
45+
46+
# %% Execute the graph and plot the result
47+
plt.plot(z.eval())
48+
49+
# %% We can find out the shape of a tensor like so:
50+
print(z.get_shape())
51+
52+
# %% Or in a more friendly format
53+
print(z.get_shape().as_list())
54+
55+
# %% Sometimes we may not know the shape of a tensor
56+
# until it is computed in the graph. In that case
57+
# we should use the tf.shape fn, which will return a
58+
# Tensor which can be eval'ed, rather than a discrete
59+
# value of tf.Dimension
60+
print(tf.shape(z).eval())
61+
62+
# %% We can combine tensors like so:
63+
print(tf.pack([tf.shape(z), tf.shape(z), [3], [4]]).eval())
64+
65+
# %% Let's multiply the two to get a 2d gaussian
66+
z_2d = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
67+
68+
# %% Execute the graph and store the value that `out` represents in `result`.
69+
plt.imshow(z_2d.eval())
70+
71+
# %% For fun let's create a gabor patch:
72+
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
73+
y = tf.reshape(tf.ones_like(x), [1, n_values])
74+
z = tf.mul(tf.matmul(x, y), z_2d)
75+
plt.imshow(z.eval())
76+
77+
# %% We can also list all the operations of a graph:
78+
ops = tf.get_default_graph().get_operations()
79+
print([op.name for op in ops])
80+
81+
# %% Lets try creating a generic function for computing the same thing:
82+
def gabor(n_values=32, sigma=1.0, mean=0.0):
83+
x = tf.linspace(-3.0, 3.0, n_values)
84+
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
85+
(2.0 * tf.pow(sigma, 2.0)))) *
86+
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
87+
gauss_kernel = tf.matmul(
88+
tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
89+
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
90+
y = tf.reshape(tf.ones_like(x), [1, n_values])
91+
gabor_kernel = tf.mul(tf.matmul(x, y), gauss_kernel)
92+
return gabor_kernel
93+
94+
# %% Confirm this does something:
95+
plt.imshow(gabor().eval())
96+
97+
# %% And another function which can convolve
98+
def convolve(img, W):
99+
# The W matrix is only 2D
100+
# But conv2d will need a tensor which is 4d:
101+
# height x width x n_input x n_output
102+
if len(W.get_shape()) == 2:
103+
dims = W.get_shape().as_list() + [1, 1]
104+
W = tf.reshape(W, dims)
105+
106+
if len(img.get_shape()) == 2:
107+
# num x height x width x channels
108+
dims = [1] + img.get_shape().as_list() + [1]
109+
img = tf.reshape(img, dims)
110+
elif len(img.get_shape()) == 3:
111+
dims = [1] + img.get_shape().as_list()
112+
img = tf.reshape(img, dims)
113+
# if the image is 3 channels, then our convolution
114+
# kernel needs to be repeated for each input channel
115+
W = tf.concat(2, [W, W, W])
116+
117+
# Stride is how many values to skip for the dimensions of
118+
# num, height, width, channels
119+
convolved = tf.nn.conv2d(img, W,
120+
strides=[1, 1, 1, 1], padding='SAME')
121+
return convolved
122+
123+
# %% Load up an image:
124+
from skimage import data
125+
img = data.astronaut()
126+
plt.imshow(img)
127+
print(img.shape)
128+
129+
# %% Now create a placeholder for our graph which can store any input:
130+
x = tf.placeholder(tf.float32, shape=img.shape)
131+
132+
# %% And a graph which can convolve our image with a gabor
133+
out = convolve(x, gabor())
134+
135+
# %% Now send the image into the graph and compute the result
136+
result = tf.squeeze(out).eval(feed_dict={x: img})
137+
plt.imshow(result)

2-linear_regression.py

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
"""Simple tutorial for using TensorFlow to compute a linear regression.
2+
3+
Parag K. Mital, Jan. 2016"""
4+
# %% imports
5+
import numpy as np
6+
import tensorflow as tf
7+
import matplotlib.pyplot as plt
8+
9+
10+
# %% Let's create some toy data
11+
plt.ion()
12+
n_observations = 100
13+
fig, ax = plt.subplots(1, 1)
14+
xs = np.linspace(-3, 3, n_observations)
15+
ys = np.sin(xs) + np.random.uniform(-0.5, 0.5, n_observations)
16+
ax.scatter(xs, ys)
17+
fig.show()
18+
plt.draw()
19+
20+
# %% tf.placeholders for the input and output of the network. Placeholders are
21+
# variables which we need to fill in when we are ready to compute the graph.
22+
X = tf.placeholder(tf.float32)
23+
Y = tf.placeholder(tf.float32)
24+
25+
# %% We will try to optimize min_(W,b) ||(X*w + b) - y||^2
26+
# The `Variable()` constructor requires an initial value for the variable,
27+
# which can be a `Tensor` of any type and shape. The initial value defines the
28+
# type and shape of the variable. After construction, the type and shape of
29+
# the variable are fixed. The value can be changed using one of the assign
30+
# methods.
31+
W = tf.Variable(tf.random_normal([1]), name='weight')
32+
b = tf.Variable(tf.random_normal([1]), name='bias')
33+
Y_pred = tf.add(tf.mul(X, W), b)
34+
35+
# %% Loss function will measure the distance between our observations
36+
# and predictions and average over them.
37+
cost = tf.reduce_sum(tf.pow(Y_pred - Y, 2)) / (n_observations - 1)
38+
39+
# %% if we wanted to add regularization, we could add other terms to the cost,
40+
# e.g. ridge regression has a parameter controlling the amount of shrinkage
41+
# over the norm of activations. the larger the shrinkage, the more robust
42+
# to collinearity.
43+
# cost = tf.add(cost, tf.mul(1e-6, tf.global_norm([W])))
44+
45+
# %% Use gradient descent to optimize W,b
46+
# Performs a single step in the negative gradient
47+
learning_rate = 0.01
48+
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
49+
50+
# %% We create a session to use the graph
51+
n_epochs = 1000
52+
with tf.Session() as sess:
53+
# Here we tell tensorflow that we want to initialize all
54+
# the variables in the graph so we can use them
55+
sess.run(tf.initialize_all_variables())
56+
57+
# Fit all training data
58+
prev_training_cost = 0.0
59+
for epoch_i in range(n_epochs):
60+
for (x, y) in zip(xs, ys):
61+
sess.run(optimizer, feed_dict={X: x, Y: y})
62+
63+
training_cost = sess.run(
64+
cost, feed_dict={X: xs, Y: ys})
65+
print(training_cost)
66+
67+
if epoch_i % 20 == 0:
68+
ax.plot(xs, Y_pred.eval(
69+
feed_dict={X: xs}, session=sess),
70+
'k', alpha=epoch_i / n_epochs)
71+
fig.show()
72+
plt.draw()
73+
74+
# Allow the training to quit if we've reached a minimum
75+
if np.abs(prev_training_cost - training_cost) < 0.000001:
76+
break
77+
prev_training_cost = training_cost
78+
fig.show()
79+
plt.waitforbuttonpress()

3-polynomial_regression.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
"""Simple tutorial for using TensorFlow to compute polynomial regression.
2+
3+
Parag K. Mital, Jan. 2016"""
4+
# %% Imports
5+
import numpy as np
6+
import tensorflow as tf
7+
import matplotlib.pyplot as plt
8+
9+
10+
# %% Let's create some toy data
11+
plt.ion()
12+
n_observations = 100
13+
fig, ax = plt.subplots(1, 1)
14+
xs = np.linspace(-3, 3, n_observations)
15+
ys = np.sin(xs) + np.random.uniform(-0.5, 0.5, n_observations)
16+
ax.scatter(xs, ys)
17+
fig.show()
18+
plt.draw()
19+
20+
# %% tf.placeholders for the input and output of the network. Placeholders are
21+
# variables which we need to fill in when we are ready to compute the graph.
22+
X = tf.placeholder(tf.float32)
23+
Y = tf.placeholder(tf.float32)
24+
25+
# %% Instead of a single factor and a bias, we'll create a polynomial function
26+
# of different polynomial degrees. We will then learn the influence that each
27+
# degree of the input (X^0, X^1, X^2, ...) has on the final output (Y).
28+
Y_pred = tf.Variable(tf.random_normal([1]), name='bias')
29+
for pow_i in range(1, 5):
30+
W = tf.Variable(tf.random_normal([1]), name='weight_%d' % pow_i)
31+
Y_pred = tf.add(tf.mul(tf.pow(X, pow_i), W), Y_pred)
32+
33+
# %% Loss function will measure the distance between our observations
34+
# and predictions and average over them.
35+
cost = tf.reduce_sum(tf.pow(Y_pred - Y, 2)) / (n_observations - 1)
36+
37+
# %% if we wanted to add regularization, we could add other terms to the cost,
38+
# e.g. ridge regression has a parameter controlling the amount of shrinkage
39+
# over the norm of activations. the larger the shrinkage, the more robust
40+
# to collinearity.
41+
# cost = tf.add(cost, tf.mul(1e-6, tf.global_norm([W])))
42+
43+
# %% Use gradient descent to optimize W,b
44+
# Performs a single step in the negative gradient
45+
learning_rate = 0.01
46+
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
47+
48+
# %% We create a session to use the graph
49+
n_epochs = 1000
50+
with tf.Session() as sess:
51+
# Here we tell tensorflow that we want to initialize all
52+
# the variables in the graph so we can use them
53+
sess.run(tf.initialize_all_variables())
54+
55+
# Fit all training data
56+
prev_training_cost = 0.0
57+
for epoch_i in range(n_epochs):
58+
for (x, y) in zip(xs, ys):
59+
sess.run(optimizer, feed_dict={X: x, Y: y})
60+
61+
training_cost = sess.run(
62+
cost, feed_dict={X: xs, Y: ys})
63+
print(training_cost)
64+
65+
if epoch_i % 100 == 0:
66+
ax.plot(xs, Y_pred.eval(
67+
feed_dict={X: xs}, session=sess),
68+
'k', alpha=epoch_i / n_epochs)
69+
fig.show()
70+
plt.draw()
71+
72+
# Allow the training to quit if we've reached a minimum
73+
if np.abs(prev_training_cost - training_cost) < 0.000001:
74+
break
75+
prev_training_cost = training_cost
76+
ax.set_ylim([-3, 3])
77+
fig.show()
78+
plt.waitforbuttonpress()

0 commit comments

Comments
 (0)