Skip to content

Commit c3d1889

Browse files
committed
Merge pull request tensorflow#40 from jmchen-g/master
update inception slim.
2 parents d2c7a37 + 1a8c712 commit c3d1889

File tree

12 files changed

+796
-326
lines changed

12 files changed

+796
-326
lines changed

inception/inception/slim/BUILD

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,3 +101,12 @@ py_library(
101101
":variables",
102102
],
103103
)
104+
105+
py_test(
106+
name = "collections_test",
107+
size = "small",
108+
srcs = ["collections_test.py"],
109+
deps = [
110+
":slim",
111+
],
112+
)

inception/inception/slim/README.md

Lines changed: 126 additions & 142 deletions
Large diffs are not rendered by default.

inception/inception/slim/inception_model.py

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@
4343
from __future__ import division
4444
from __future__ import print_function
4545

46-
4746
import tensorflow as tf
4847

4948
from inception.slim import ops
@@ -98,10 +97,10 @@ def inception_v3(inputs,
9897
# 73 x 73 x 64
9998
end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
10099
scope='conv3')
101-
# 71 x 71 x 80.
100+
# 73 x 73 x 80.
102101
end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
103102
scope='conv4')
104-
# 69 x 69 x 192.
103+
# 71 x 71 x 192.
105104
end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
106105
stride=2, scope='pool2')
107106
# 35 x 35 x 192.
@@ -260,7 +259,10 @@ def inception_v3(inputs,
260259
aux_logits = ops.fc(aux_logits, num_classes, activation=None,
261260
stddev=0.001, restore=restore_logits)
262261
end_points['aux_logits'] = aux_logits
263-
# mixed_8: 17 x 17 x 1280.
262+
# mixed_8: 8 x 8 x 1280.
263+
# Note that the scope below is not changed to not void previous
264+
# checkpoints.
265+
# (TODO) Fix the scope when appropriate.
264266
with tf.variable_scope('mixed_17x17x1280a'):
265267
with tf.variable_scope('branch3x3'):
266268
branch3x3 = ops.conv2d(net, 192, [1, 1])
@@ -327,3 +329,28 @@ def inception_v3(inputs,
327329
end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
328330
return logits, end_points
329331

332+
333+
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
334+
batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
335+
"""Yields the scope with the default parameters for inception_v3.
336+
337+
Args:
338+
weight_decay: the weight decay for weights variables.
339+
stddev: standard deviation of the truncated guassian weight distribution.
340+
batch_norm_decay: decay for the moving average of batch_norm momentums.
341+
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
342+
343+
Yields:
344+
a arg_scope with the parameters needed for inception_v3.
345+
"""
346+
# Set weight_decay for weights in Conv and FC layers.
347+
with scopes.arg_scope([ops.conv2d, ops.fc],
348+
weight_decay=weight_decay):
349+
# Set stddev, activation and parameters for batch_norm.
350+
with scopes.arg_scope([ops.conv2d],
351+
stddev=stddev,
352+
activation=tf.nn.relu,
353+
batch_norm_params={
354+
'decay': batch_norm_decay,
355+
'epsilon': batch_norm_epsilon}) as arg_scope:
356+
yield arg_scope

inception/inception/slim/inception_test.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
from __future__ import division
1818
from __future__ import print_function
1919

20-
2120
import tensorflow as tf
2221

2322
from inception.slim import inception_model as inception
@@ -55,6 +54,22 @@ def testBuildEndPoints(self):
5554
self.assertListEqual(pre_pool.get_shape().as_list(),
5655
[batch_size, 8, 8, 2048])
5756

57+
def testVariablesSetDevice(self):
58+
batch_size = 5
59+
height, width = 299, 299
60+
num_classes = 1000
61+
with self.test_session():
62+
inputs = tf.random_uniform((batch_size, height, width, 3))
63+
# Force all Variables to reside on the device.
64+
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
65+
inception.inception_v3(inputs, num_classes)
66+
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
67+
inception.inception_v3(inputs, num_classes)
68+
for v in tf.get_collection(tf.GraphKeys.VARIABLES, scope='on_cpu'):
69+
self.assertDeviceEqual(v.device, '/cpu:0')
70+
for v in tf.get_collection(tf.GraphKeys.VARIABLES, scope='on_gpu'):
71+
self.assertDeviceEqual(v.device, '/gpu:0')
72+
5873
def testHalfSizeImages(self):
5974
batch_size = 5
6075
height, width = 150, 150

inception/inception/slim/losses.py

Lines changed: 65 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
from __future__ import division
2727
from __future__ import print_function
2828

29-
3029
import tensorflow as tf
3130

3231
# In order to gather all losses in a network, the user should use this
@@ -35,6 +34,71 @@
3534
LOSSES_COLLECTION = '_losses'
3635

3736

37+
def l1_regularizer(weight=1.0, scope=None):
38+
"""Define a L1 regularizer.
39+
40+
Args:
41+
weight: scale the loss by this factor.
42+
scope: Optional scope for op_scope.
43+
44+
Returns:
45+
a regularizer function.
46+
"""
47+
def regularizer(tensor):
48+
with tf.op_scope([tensor], scope, 'L1Regularizer'):
49+
l1_weight = tf.convert_to_tensor(weight,
50+
dtype=tensor.dtype.base_dtype,
51+
name='weight')
52+
return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
53+
return regularizer
54+
55+
56+
def l2_regularizer(weight=1.0, scope=None):
57+
"""Define a L2 regularizer.
58+
59+
Args:
60+
weight: scale the loss by this factor.
61+
scope: Optional scope for op_scope.
62+
63+
Returns:
64+
a regularizer function.
65+
"""
66+
def regularizer(tensor):
67+
with tf.op_scope([tensor], scope, 'L2Regularizer'):
68+
l2_weight = tf.convert_to_tensor(weight,
69+
dtype=tensor.dtype.base_dtype,
70+
name='weight')
71+
return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
72+
return regularizer
73+
74+
75+
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
76+
"""Define a L1L2 regularizer.
77+
78+
Args:
79+
weight_l1: scale the L1 loss by this factor.
80+
weight_l2: scale the L2 loss by this factor.
81+
scope: Optional scope for op_scope.
82+
83+
Returns:
84+
a regularizer function.
85+
"""
86+
def regularizer(tensor):
87+
with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
88+
weight_l1_t = tf.convert_to_tensor(weight_l1,
89+
dtype=tensor.dtype.base_dtype,
90+
name='weight_l1')
91+
weight_l2_t = tf.convert_to_tensor(weight_l2,
92+
dtype=tensor.dtype.base_dtype,
93+
name='weight_l2')
94+
reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
95+
name='value_l1')
96+
reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
97+
name='value_l2')
98+
return tf.add(reg_l1, reg_l2, name='value')
99+
return regularizer
100+
101+
38102
def l1_loss(tensor, weight=1.0, scope=None):
39103
"""Define a L1Loss, useful for regularize, i.e. lasso.
40104

inception/inception/slim/losses_test.py

Lines changed: 89 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from __future__ import print_function
1919

2020

21-
2221
import tensorflow as tf
2322

2423
from inception.slim import losses
@@ -47,6 +46,95 @@ def testL2Loss(self):
4746
self.assertAlmostEqual(loss.eval(), num_elem * wd / 2, 5)
4847

4948

49+
class RegularizersTest(tf.test.TestCase):
50+
51+
def testL1Regularizer(self):
52+
with self.test_session():
53+
shape = [5, 5, 5]
54+
num_elem = 5 * 5 * 5
55+
tensor = tf.constant(1.0, shape=shape)
56+
loss = losses.l1_regularizer()(tensor)
57+
self.assertEquals(loss.op.name, 'L1Regularizer/value')
58+
self.assertAlmostEqual(loss.eval(), num_elem, 5)
59+
60+
def testL1RegularizerWithScope(self):
61+
with self.test_session():
62+
shape = [5, 5, 5]
63+
num_elem = 5 * 5 * 5
64+
tensor = tf.constant(1.0, shape=shape)
65+
loss = losses.l1_regularizer(scope='L1')(tensor)
66+
self.assertEquals(loss.op.name, 'L1/value')
67+
self.assertAlmostEqual(loss.eval(), num_elem, 5)
68+
69+
def testL1RegularizerWithWeight(self):
70+
with self.test_session():
71+
shape = [5, 5, 5]
72+
num_elem = 5 * 5 * 5
73+
tensor = tf.constant(1.0, shape=shape)
74+
weight = 0.01
75+
loss = losses.l1_regularizer(weight)(tensor)
76+
self.assertEquals(loss.op.name, 'L1Regularizer/value')
77+
self.assertAlmostEqual(loss.eval(), num_elem * weight, 5)
78+
79+
def testL2Regularizer(self):
80+
with self.test_session():
81+
shape = [5, 5, 5]
82+
num_elem = 5 * 5 * 5
83+
tensor = tf.constant(1.0, shape=shape)
84+
loss = losses.l2_regularizer()(tensor)
85+
self.assertEquals(loss.op.name, 'L2Regularizer/value')
86+
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
87+
88+
def testL2RegularizerWithScope(self):
89+
with self.test_session():
90+
shape = [5, 5, 5]
91+
num_elem = 5 * 5 * 5
92+
tensor = tf.constant(1.0, shape=shape)
93+
loss = losses.l2_regularizer(scope='L2')(tensor)
94+
self.assertEquals(loss.op.name, 'L2/value')
95+
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
96+
97+
def testL2RegularizerWithWeight(self):
98+
with self.test_session():
99+
shape = [5, 5, 5]
100+
num_elem = 5 * 5 * 5
101+
tensor = tf.constant(1.0, shape=shape)
102+
weight = 0.01
103+
loss = losses.l2_regularizer(weight)(tensor)
104+
self.assertEquals(loss.op.name, 'L2Regularizer/value')
105+
self.assertAlmostEqual(loss.eval(), num_elem * weight / 2, 5)
106+
107+
def testL1L2Regularizer(self):
108+
with self.test_session():
109+
shape = [5, 5, 5]
110+
num_elem = 5 * 5 * 5
111+
tensor = tf.constant(1.0, shape=shape)
112+
loss = losses.l1_l2_regularizer()(tensor)
113+
self.assertEquals(loss.op.name, 'L1L2Regularizer/value')
114+
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
115+
116+
def testL1L2RegularizerWithScope(self):
117+
with self.test_session():
118+
shape = [5, 5, 5]
119+
num_elem = 5 * 5 * 5
120+
tensor = tf.constant(1.0, shape=shape)
121+
loss = losses.l1_l2_regularizer(scope='L1L2')(tensor)
122+
self.assertEquals(loss.op.name, 'L1L2/value')
123+
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
124+
125+
def testL1L2RegularizerWithWeights(self):
126+
with self.test_session():
127+
shape = [5, 5, 5]
128+
num_elem = 5 * 5 * 5
129+
tensor = tf.constant(1.0, shape=shape)
130+
weight_l1 = 0.01
131+
weight_l2 = 0.05
132+
loss = losses.l1_l2_regularizer(weight_l1, weight_l2)(tensor)
133+
self.assertEquals(loss.op.name, 'L1L2Regularizer/value')
134+
self.assertAlmostEqual(loss.eval(),
135+
num_elem * weight_l1 + num_elem * weight_l2 / 2, 5)
136+
137+
50138
class CrossEntropyLossTest(tf.test.TestCase):
51139

52140
def testCrossEntropyLossAllCorrect(self):

0 commit comments

Comments
 (0)