Skip to content
This repository was archived by the owner on Jul 7, 2023. It is now read-only.
Merged
Changes from 1 commit
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
ec45cd5
Added new initializers
joaogui1 Aug 20, 2019
29821d8
Fixed glorot uniform
joaogui1 Aug 20, 2019
431b268
Fixed order of init arguments
joaogui1 Aug 20, 2019
eaacbb3
Added uniform random and documentation
joaogui1 Aug 21, 2019
ff08e30
Merge https://github.com/tensorflow/tensor2tensor into initializers
joaogui1 Aug 22, 2019
f30f579
Added requested changes to initializers.py
joaogui1 Aug 22, 2019
a833246
Added requested changes to initializers_test.py
joaogui1 Aug 22, 2019
531b52a
Added choice of input axis and output axis
joaogui1 Aug 22, 2019
6b2a12b
Added choice of input axis and output axis
joaogui1 Aug 22, 2019
3a772d3
Merge https://github.com/tensorflow/tensor2tensor into initializers
joaogui1 Aug 22, 2019
30df555
fixed errors and implemented requested changes
joaogui1 Aug 24, 2019
22d57b6
fixed typo
joaogui1 Aug 24, 2019
021a094
tests passing
joaogui1 Aug 24, 2019
7d1a409
Merge https://github.com/tensorflow/tensor2tensor into initializers
joaogui1 Aug 24, 2019
212307a
fixed get fans
joaogui1 Aug 25, 2019
5536aed
Merge https://github.com/tensorflow/tensor2tensor into initializers
joaogui1 Aug 26, 2019
50823b3
Fixed numpy weird behavior
joaogui1 Aug 26, 2019
ca507ec
Fixed typo
joaogui1 Aug 26, 2019
ca967b0
Fixed merge conflict
joaogui1 Aug 28, 2019
427f1cb
Use scipy.special.expit for sigmoid
joaogui1 Aug 28, 2019
51dfe52
Use clip instead of maximum, see colab on pull request
joaogui1 Aug 28, 2019
5af21d2
Added rectifiers
joaogui1 Aug 28, 2019
7fd566b
Added Gaussian rectifier
joaogui1 Aug 28, 2019
22820b0
Added Gaussian rectifier
joaogui1 Aug 28, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Added rectifiers
  • Loading branch information
joaogui1 committed Aug 28, 2019
commit 5af21d248f380e3bfdaa00271e069b6cb3fccb5f
39 changes: 31 additions & 8 deletions tensor2tensor/trax/layers/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Trax layers library."""

from __future__ import absolute_import
Expand All @@ -33,6 +32,28 @@ def Relu(x, **unused_kwargs):
return np.clip(x, a_min=0.)


@base.layer()
def ParametricRelu(x, a=1., **unused_kwargs):
return np.clip(a * x, a_min=0.)


@base.layer()
def LeakyRelu(x, a=0.01, **unused_kwargs):
return np.where(x >= 0, x, a * x)


@base.layer()
def Elu(x, a=1., **unused_kwargs):
return np.where(x > 0, x, a * np.expm1(x))


@base.layer()
def Selu(x,
alpha=1.6732632423543772848170429916717,
lmbda=1.0507009873554804934193349852946):
return lmbda * np.where(x > 0, x, alpha * np.expm1(x))


@base.layer()
def Sigmoid(x, **unused_kwargs):
return backend.expit(x)
Expand Down Expand Up @@ -87,7 +108,8 @@ def ToFloat(x, **unused_kwargs):
class Dense(base.Layer):
"""Layer constructor function for a dense (fully-connected) layer."""

def __init__(self, n_units,
def __init__(self,
n_units,
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6)):
super(Dense, self).__init__()
Expand All @@ -111,7 +133,9 @@ def new_parameters(self, input_shape, input_dtype, rng):
class Embedding(base.Layer):
"""Layer constructor function for an embedding layer."""

def __init__(self, d_feature, vocab_size,
def __init__(self,
d_feature,
vocab_size,
kernel_initializer=init.GlorotUniformInitializer()):
super(Embedding, self).__init__()
self._d_feature = d_feature # feature dimensionality
Expand All @@ -124,18 +148,17 @@ def call(self, x, params, state, **kwargs):

def new_parameters(self, input_shape, input_dtype, rng):
del input_dtype
return self._kernel_initializer(
(self._vocab_size, self._d_feature), rng), ()
return self._kernel_initializer((self._vocab_size, self._d_feature),
rng), ()


# Flatten.
@base.layer()
def Flatten(x, params, n_axes_to_keep=1, **kwargs):
del params, kwargs
if n_axes_to_keep >= len(x.shape):
raise ValueError(
"n_axes_to_keep[%d] should be less than input's rank[%d]" %
(n_axes_to_keep, len(x.shape)))
raise ValueError("n_axes_to_keep[%d] should be less than input's rank[%d]" %
(n_axes_to_keep, len(x.shape)))
return np.reshape(x, (x.shape[:n_axes_to_keep] + (-1,)))


Expand Down