|
| 1 | +"""Summary of tensorflow basics. |
| 2 | +
|
| 3 | +Parag K. Mital, Jan 2016.""" |
| 4 | +# %% Import tensorflow and pyplot |
| 5 | +import tensorflow as tf |
| 6 | +import matplotlib.pyplot as plt |
| 7 | + |
| 8 | +# %% tf.Graph represents a collection of tf.Operations |
| 9 | +# You can create operations by writing out equations. |
| 10 | +# By default, there is a graph: tf.get_default_graph() |
| 11 | +# and any new operations are added to this graph. |
| 12 | +# The result of a tf.Operation is a tf.Tensor, which holds |
| 13 | +# the values. |
| 14 | + |
| 15 | +# %% First a tf.Tensor |
| 16 | +n_values = 32 |
| 17 | +x = tf.linspace(-3.0, 3.0, n_values) |
| 18 | + |
| 19 | +# %% Construct a tf.Session to execut the graph. |
| 20 | +sess = tf.Session() |
| 21 | +result = sess.run(x) |
| 22 | + |
| 23 | +# %% Alternatively pass a session to the eval fn: |
| 24 | +x.eval(session=sess) |
| 25 | +# x.eval() does not work, as it requires a session! |
| 26 | + |
| 27 | +# %% We can setup an interactive session if we don't |
| 28 | +# want to keep passing the session around: |
| 29 | +sess.close() |
| 30 | +sess = tf.InteractiveSession() |
| 31 | + |
| 32 | +# %% Now this will work! |
| 33 | +x.eval() |
| 34 | + |
| 35 | +# %% Now a tf.Operation |
| 36 | +# We'll use our values from [-3, 3] to create a Gaussian Distribution |
| 37 | +sigma = 1.0 |
| 38 | +mean = 0.0 |
| 39 | +z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) / |
| 40 | + (2.0 * tf.pow(sigma, 2.0)))) * |
| 41 | + (1.0 / (sigma * tf.sqrt(2.0 * 3.1415)))) |
| 42 | + |
| 43 | +# %% By default, new operations are added to the default Graph |
| 44 | +assert z.graph is tf.get_default_graph() |
| 45 | + |
| 46 | +# %% Execute the graph and plot the result |
| 47 | +plt.plot(z.eval()) |
| 48 | + |
| 49 | +# %% We can find out the shape of a tensor like so: |
| 50 | +print(z.get_shape()) |
| 51 | + |
| 52 | +# %% Or in a more friendly format |
| 53 | +print(z.get_shape().as_list()) |
| 54 | + |
| 55 | +# %% Sometimes we may not know the shape of a tensor |
| 56 | +# until it is computed in the graph. In that case |
| 57 | +# we should use the tf.shape fn, which will return a |
| 58 | +# Tensor which can be eval'ed, rather than a discrete |
| 59 | +# value of tf.Dimension |
| 60 | +print(tf.shape(z).eval()) |
| 61 | + |
| 62 | +# %% We can combine tensors like so: |
| 63 | +print(tf.pack([tf.shape(z), tf.shape(z), [3], [4]]).eval()) |
| 64 | + |
| 65 | +# %% Let's multiply the two to get a 2d gaussian |
| 66 | +z_2d = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values])) |
| 67 | + |
| 68 | +# %% Execute the graph and store the value that `out` represents in `result`. |
| 69 | +plt.imshow(z_2d.eval()) |
| 70 | + |
| 71 | +# %% For fun let's create a gabor patch: |
| 72 | +x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1]) |
| 73 | +y = tf.reshape(tf.ones_like(x), [1, n_values]) |
| 74 | +z = tf.mul(tf.matmul(x, y), z_2d) |
| 75 | +plt.imshow(z.eval()) |
| 76 | + |
| 77 | +# %% We can also list all the operations of a graph: |
| 78 | +ops = tf.get_default_graph().get_operations() |
| 79 | +print([op.name for op in ops]) |
| 80 | + |
| 81 | +# %% Lets try creating a generic function for computing the same thing: |
| 82 | +def gabor(n_values=32, sigma=1.0, mean=0.0): |
| 83 | + x = tf.linspace(-3.0, 3.0, n_values) |
| 84 | + z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) / |
| 85 | + (2.0 * tf.pow(sigma, 2.0)))) * |
| 86 | + (1.0 / (sigma * tf.sqrt(2.0 * 3.1415)))) |
| 87 | + gauss_kernel = tf.matmul( |
| 88 | + tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values])) |
| 89 | + x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1]) |
| 90 | + y = tf.reshape(tf.ones_like(x), [1, n_values]) |
| 91 | + gabor_kernel = tf.mul(tf.matmul(x, y), gauss_kernel) |
| 92 | + return gabor_kernel |
| 93 | + |
| 94 | +# %% Confirm this does something: |
| 95 | +plt.imshow(gabor().eval()) |
| 96 | + |
| 97 | +# %% And another function which can convolve |
| 98 | +def convolve(img, W): |
| 99 | + # The W matrix is only 2D |
| 100 | + # But conv2d will need a tensor which is 4d: |
| 101 | + # height x width x n_input x n_output |
| 102 | + if len(W.get_shape()) == 2: |
| 103 | + dims = W.get_shape().as_list() + [1, 1] |
| 104 | + W = tf.reshape(W, dims) |
| 105 | + |
| 106 | + if len(img.get_shape()) == 2: |
| 107 | + # num x height x width x channels |
| 108 | + dims = [1] + img.get_shape().as_list() + [1] |
| 109 | + img = tf.reshape(img, dims) |
| 110 | + elif len(img.get_shape()) == 3: |
| 111 | + dims = [1] + img.get_shape().as_list() |
| 112 | + img = tf.reshape(img, dims) |
| 113 | + # if the image is 3 channels, then our convolution |
| 114 | + # kernel needs to be repeated for each input channel |
| 115 | + W = tf.concat(2, [W, W, W]) |
| 116 | + |
| 117 | + # Stride is how many values to skip for the dimensions of |
| 118 | + # num, height, width, channels |
| 119 | + convolved = tf.nn.conv2d(img, W, |
| 120 | + strides=[1, 1, 1, 1], padding='SAME') |
| 121 | + return convolved |
| 122 | + |
| 123 | +# %% Load up an image: |
| 124 | +from skimage import data |
| 125 | +img = data.astronaut() |
| 126 | +plt.imshow(img) |
| 127 | +print(img.shape) |
| 128 | + |
| 129 | +# %% Now create a placeholder for our graph which can store any input: |
| 130 | +x = tf.placeholder(tf.float32, shape=img.shape) |
| 131 | + |
| 132 | +# %% And a graph which can convolve our image with a gabor |
| 133 | +out = convolve(x, gabor()) |
| 134 | + |
| 135 | +# %% Now send the image into the graph and compute the result |
| 136 | +result = tf.squeeze(out).eval(feed_dict={x: img}) |
| 137 | +plt.imshow(result) |
0 commit comments