Skip to content

Commit a27d844

Browse files
author
Vijay Vasudevan
committed
Merge commit for internal changes
2 parents a45c991 + 9b70316 commit a27d844

File tree

25 files changed

+279
-230
lines changed

25 files changed

+279
-230
lines changed

tensorflow/core/kernels/serialize_sparse_op.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ limitations under the License.
1616
#define EIGEN_USE_THREADS
1717

1818
#include <algorithm>
19+
#include <numeric>
1920
#include <unordered_map>
2021
#include <utility>
2122
#include <vector>

tensorflow/core/kernels/sparse_concat_op.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ limitations under the License.
1616
#define EIGEN_USE_THREADS
1717

1818
#include <algorithm>
19+
#include <numeric>
1920
#include <unordered_map>
2021
#include <utility>
2122
#include <vector>

tensorflow/core/kernels/sparse_reorder_op.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ limitations under the License.
1616
#define EIGEN_USE_THREADS
1717

1818
#include <algorithm>
19+
#include <numeric>
1920
#include <unordered_map>
2021
#include <utility>
2122
#include <numeric>

tensorflow/core/kernels/sparse_to_dense_op.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ limitations under the License.
2020

2121
#define EIGEN_USE_THREADS
2222

23+
#include <numeric>
2324
#include <sstream>
2425
#include <string>
2526
#include <unordered_map>

tensorflow/core/ops/ops.pbtxt

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1712,7 +1712,7 @@ op {
17121712
}
17131713
}
17141714
summary: "Computes a 2-D convolution given 4-D `input` and `filter` tensors."
1715-
description: "Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\nand a filter / kernel tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`, this op\nperforms the following:\n\n1. Flattens the filter to a 2-D matrix with shape\n `[filter_height * filter_width * in_channels, output_channels]`.\n2. Extracts image patches from the the input tensor to form a *virtual*\n tensor of shape `[batch, out_height, out_width,\n filter_height * filter_width * in_channels]`.\n3. For each patch, right-multiplies the filter matrix and the image patch\n vector.\n\nIn detail,\n\n output[b, i, j, k] =\n sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n filter[di, dj, q, k]\n\nMust have `strides[0] = strides[3] = 1`. For the most common case of the same\nhorizontal and vertices strides, `strides = [1, stride, stride, 1]`."
1715+
description: "Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\nand a filter / kernel tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`, this op\nperforms the following:\n\n1. Flattens the filter to a 2-D matrix with shape\n `[filter_height * filter_width * in_channels, output_channels]`.\n2. Extracts image patches from the input tensor to form a *virtual*\n tensor of shape `[batch, out_height, out_width,\n filter_height * filter_width * in_channels]`.\n3. For each patch, right-multiplies the filter matrix and the image patch\n vector.\n\nIn detail,\n\n output[b, i, j, k] =\n sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n filter[di, dj, q, k]\n\nMust have `strides[0] = strides[3] = 1`. For the most common case of the same\nhorizontal and vertices strides, `strides = [1, stride, stride, 1]`."
17161716
}
17171717
op {
17181718
name: "Conv2DBackpropFilter"
@@ -4782,7 +4782,7 @@ op {
47824782
attr {
47834783
name: "num_negative_samples"
47844784
type: "int"
4785-
description: "Number of negative samples per exaple."
4785+
description: "Number of negative samples per example."
47864786
}
47874787
summary: "Training via negative sampling."
47884788
}
@@ -5029,7 +5029,7 @@ op {
50295029
attr {
50305030
name: "dense_shapes"
50315031
type: "list(shape)"
5032-
description: "A list of Ndense shapes; the shapes of data in each Feature\ngiven in dense_keys.\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch."
5032+
description: "A list of Ndense shapes; the shapes of data in each Feature\ngiven in dense_keys.\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch."
50335033
has_minimum: true
50345034
}
50355035
summary: "Transforms a vector of brain.Example protos (as strings) into typed tensors."
@@ -8852,7 +8852,7 @@ op {
88528852
attr {
88538853
name: "f"
88548854
type: "func"
8855-
description: "The function we want to compute the gradient for.\n\nThe function \'f\' must be a numerical function which takes N inputs and\nproduces M outputs. Its gradient function \'g\', which is computed by\nthis SymbolicGradient op is a function taking N + M inputs and\nproduces N outputs.\n\nI.e. if we have\n (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),\nthen, g is\n (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,\n dL/dy1, dL/dy2, ..., dL/dy_M),\n\nwhere L is a scalar-value function of (x1, x2, ..., xN) (e.g., the\nloss function). dL/dx_i is the the partial derivative of L with respect\nto x_i.\n\n(Needs some math expert to say the comment above better.)"
8855+
description: "The function we want to compute the gradient for.\n\nThe function \'f\' must be a numerical function which takes N inputs and\nproduces M outputs. Its gradient function \'g\', which is computed by\nthis SymbolicGradient op is a function taking N + M inputs and\nproduces N outputs.\n\nI.e. if we have\n (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),\nthen, g is\n (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,\n dL/dy1, dL/dy2, ..., dL/dy_M),\n\nwhere L is a scalar-value function of (x1, x2, ..., xN) (e.g., the\nloss function). dL/dx_i is the partial derivative of L with respect\nto x_i.\n\n(Needs some math expert to say the comment above better.)"
88568856
}
88578857
summary: "Computes the gradient function for function f via backpropagation."
88588858
}

tensorflow/models/rnn/ptb/reader.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,7 @@ def _build_vocab(filename):
4343
data = _read_words(filename)
4444

4545
counter = collections.Counter(data)
46-
count_pairs = sorted(counter.items(),
47-
key=lambda x : (-x[1], x[0]))
46+
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
4847

4948
words, _ = list(zip(*count_pairs))
5049
word_to_id = dict(zip(words, range(len(words))))

tensorflow/models/rnn/translate/seq2seq_model.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -141,9 +141,10 @@ def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
141141
# If we use output projection, we need to project outputs for decoding.
142142
if output_projection is not None:
143143
for b in xrange(len(buckets)):
144-
self.outputs[b] = [tf.matmul(output, output_projection[0]) +
145-
output_projection[1]
146-
for output in self.outputs[b]]
144+
self.outputs[b] = [
145+
tf.matmul(output, output_projection[0]) + output_projection[1]
146+
for output in self.outputs[b]
147+
]
147148
else:
148149
self.outputs, self.losses = seq2seq.model_with_buckets(
149150
self.encoder_inputs, self.decoder_inputs, targets,

tensorflow/python/client/session.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,17 @@ def _get_feeds_for_indexed_slices(feed, feed_val):
6060
[feed.values, feed.indices] if feed.dense_shape is None
6161
else [feed.values, feed.indices, feed.dense_shape], feed_val))
6262

63+
def _get_indexed_slices_value_from_fetches(fetched_vals):
64+
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],
65+
fetched_vals[2]
66+
if len(fetched_vals) == 3 else None)
67+
68+
69+
def _get_feeds_for_indexed_slices(feed, feed_val):
70+
return list(zip([feed.values, feed.indices] if feed.dense_shape is None else
71+
[feed.values, feed.indices, feed.dense_shape], feed_val))
72+
73+
6374
class BaseSession(SessionInterface):
6475
"""A class for interacting with a TensorFlow computation.
6576
@@ -235,7 +246,7 @@ def as_default(self):
235246
[fetch.values, fetch.indices] if fetch.dense_shape is None
236247
else [fetch.values, fetch.indices, fetch.dense_shape],
237248
_get_indexed_slices_value_from_fetches),
238-
_get_feeds_for_indexed_slices),
249+
_get_feeds_for_indexed_slices),
239250
# The default catches all types and performs no expansions.
240251
(object,
241252
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),

tensorflow/python/client/session_test.py

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -254,8 +254,7 @@ def testFetchIndexedSlices(self):
254254
values = np.array([1.0, 2.0]).astype(np.float32)
255255
dense_shape = np.array([7, 9, 2]).astype(np.int64)
256256
ind = ops.IndexedSlices(
257-
constant_op.constant(values),
258-
constant_op.constant(indices),
257+
constant_op.constant(values), constant_op.constant(indices),
259258
constant_op.constant(dense_shape))
260259
# Single fetch, use as tuple
261260
ind_out = s.run(ind)
@@ -290,16 +289,20 @@ def testFeedIndexedSlices(self):
290289
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
291290
dense_shape = np.array([7, 9, 2]).astype(np.int64)
292291
ind = ops.IndexedSlices(
293-
array_ops.placeholder(dtype=np.float32, shape=(2,)),
294-
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
295-
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
292+
array_ops.placeholder(dtype=np.float32,
293+
shape=(2,)),
294+
array_ops.placeholder(dtype=np.int64,
295+
shape=(2, 3)),
296+
array_ops.placeholder(dtype=np.int64,
297+
shape=(3,)),)
296298
ind_values = array_ops.identity(ind.values)
297299
ind_indices = array_ops.identity(ind.indices)
298300
ind_dense_shape = array_ops.identity(ind.dense_shape)
299301
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
300302
# Feed with tuple
301303
values_out, indices_out, dense_shape_out = s.run(
302-
[ind_values, ind_indices, ind_dense_shape], {ind: (values, indices, dense_shape)})
304+
[ind_values, ind_indices, ind_dense_shape],
305+
{ind: (values, indices, dense_shape)})
303306
self.assertAllEqual(values_out, values)
304307
self.assertAllEqual(indices_out, indices)
305308
self.assertAllEqual(dense_shape_out, dense_shape)
@@ -311,7 +314,8 @@ def testFeedIndexedSlices(self):
311314
self.assertAllEqual(indices_out, indices)
312315
self.assertAllEqual(dense_shape_out, dense_shape)
313316
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
314-
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
317+
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
318+
dense_shape)})
315319
self.assertAllEqual(ind2_out.values, values)
316320
self.assertAllEqual(ind2_out.indices, indices)
317321
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
@@ -322,9 +326,7 @@ def testFetchIndexedSlicesWithoutDenseShape(self):
322326
values = np.array([1.0, 2.0]).astype(np.float32)
323327
dense_shape = None
324328
ind = ops.IndexedSlices(
325-
constant_op.constant(values),
326-
constant_op.constant(indices),
327-
None)
329+
constant_op.constant(values), constant_op.constant(indices), None)
328330
# Single fetch, use as tuple
329331
ind_out = s.run(ind)
330332
values_out, indices_out, dense_shape_out = ind_out
@@ -358,25 +360,28 @@ def testFeedIndexedSlicesWithoutDenseShape(self):
358360
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
359361
dense_shape = None
360362
ind = ops.IndexedSlices(
361-
array_ops.placeholder(dtype=np.float32, shape=(2,)),
362-
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
363+
array_ops.placeholder(dtype=np.float32,
364+
shape=(2,)),
365+
array_ops.placeholder(dtype=np.int64,
366+
shape=(2, 3)),
363367
None)
364368
ind_values = array_ops.identity(ind.values)
365369
ind_indices = array_ops.identity(ind.indices)
366370
ind2 = ops.IndexedSlices(ind_values, ind_indices)
367371
# Feed with tuple
368372
values_out, indices_out = s.run(
369-
[ind_values, ind_indices], {ind: (values, indices)})
373+
[ind_values, ind_indices], {ind: (values, indices)})
370374
self.assertAllEqual(values_out, values)
371375
self.assertAllEqual(indices_out, indices)
372376
# Feed with IndexedSlicesValue
373377
values_out, indices_out = s.run(
374-
[ind_values, ind_indices],
375-
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
378+
[ind_values, ind_indices],
379+
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
376380
self.assertAllEqual(values_out, values)
377381
self.assertAllEqual(indices_out, indices)
378382
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
379-
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
383+
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
384+
dense_shape)})
380385
self.assertAllEqual(ind2_out.values, values)
381386
self.assertAllEqual(ind2_out.indices, indices)
382387
self.assertAllEqual(ind2_out.dense_shape, dense_shape)

tensorflow/python/framework/ops.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -786,6 +786,10 @@ def __str__(self):
786786
IndexedSlicesValue = collections.namedtuple("IndexedSlicesValue",
787787
["values", "indices", "dense_shape"])
788788

789+
IndexedSlicesValue = collections.namedtuple(
790+
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
791+
792+
789793
class SparseTensor(object):
790794
"""Represents a sparse tensor.
791795

0 commit comments

Comments
 (0)