Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Fixing formatting issues.
  • Loading branch information
pavithrasv committed May 23, 2019
commit 1a70169c2ccb35d9c65759c2fa71a1bf1733b4aa
73 changes: 36 additions & 37 deletions keras/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,21 @@ class Loss(object):
"""Loss base class.

To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.

Example subclass implementation:
```
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
```

Args:
reduction: (Optional) Type of loss Reduction to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
# Arguments
reduction: (Optional) Type of loss Reduction to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
"""

def __init__(self,
Expand All @@ -43,10 +43,10 @@ def __init__(self,
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.

Args:
y_true: Ground truth values.
y_pred: The predicted values.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
# Arguments
y_true: Ground truth values.
y_pred: The predicted values.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, or is broadcastable to `y_true`. `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
Expand All @@ -56,12 +56,12 @@ def __call__(self, y_true, y_pred, sample_weight=None):
loss of each measurable element of `y_pred` is scaled by the
corresponding value of `sample_weight`.

Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `y_true`; otherwise, it is scalar.
# Returns
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `y_true`; otherwise, it is scalar.

Raises:
ValueError: If the shape of `sample_weight` is invalid.
# Raises
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
Expand All @@ -75,10 +75,10 @@ def __call__(self, y_true, y_pred, sample_weight=None):
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).

Args:
# Arguments
config: Output of `get_config()`.

Returns:
# Returns
A `Loss` instance.
"""
return cls(**config)
Expand All @@ -90,23 +90,23 @@ def get_config(self):
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.

Args:
y_true: Ground truth values, with the same shape as 'y_pred'.
y_pred: The predicted values.
# Arguments
y_true: Ground truth values, with the same shape as 'y_pred'.
y_pred: The predicted values.
"""
NotImplementedError('Must be implemented in subclasses.')


class LossFunctionWrapper(Loss):
"""Wraps a loss function in the `Loss` class.

Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of loss reduction to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
# Arguments
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of loss reduction to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""

def __init__(self,
Expand All @@ -121,12 +121,12 @@ def __init__(self,
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.

Args:
y_true: Ground truth values.
y_pred: The predicted values.
# Arguments
y_true: Ground truth values.
y_pred: The predicted values.

Returns:
Loss values per sample.
# Returns
Loss values per sample.
"""
return self.fn(y_true, y_pred, **self._fn_kwargs)

Expand All @@ -149,7 +149,6 @@ class MeanSquaredError(LossFunctionWrapper):
```python
mse = keras.losses.MeanSquaredError()
loss = mse([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.75
```

Usage with the `compile` API:
Expand Down
48 changes: 24 additions & 24 deletions keras/utils/losses_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ class Reduction(object):
Contains the following values:

* `NONE`: Un-reduced weighted losses with the same shape as input. When this
reduction type used with built-in Keras training loops like
`fit`/`evaluate`, the unreduced vector loss is passed to the optimizer but
the reported loss will be a scalar value.
reduction type used with built-in Keras training loops like
`fit`/`evaluate`, the unreduced vector loss is passed to the optimizer but
the reported loss will be a scalar value.
* `SUM`: Scalar sum of weighted losses.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
"""
Expand All @@ -43,16 +43,16 @@ def squeeze_or_expand_dimensions(y_pred, y_true, sample_weight):
from the new rank of `y_pred`.
If `sample_weight` is scalar, it is kept scalar.

# Arguments:
y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
y_true: Optional label `Tensor` whose dimensions match `y_pred`.
sample_weight: Optional weight scalar or `Tensor` whose dimensions match
`y_pred`.
# Arguments
y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
y_true: Optional label `Tensor` whose dimensions match `y_pred`.
sample_weight: Optional weight scalar or `Tensor` whose dimensions match
`y_pred`.

# Returns:
Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
the last dimension squeezed, `sample_weight` could be extended by one
dimension.
# Returns
Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
the last dimension squeezed, `sample_weight` could be extended by one
dimension.
"""
if y_true is not None:
y_pred_rank = K.ndim(y_pred)
Expand Down Expand Up @@ -101,20 +101,20 @@ def compute_weighted_loss(losses,
name=None):
"""Computes the weighted loss.

# Arguments:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: (Optional) Type of Reduction to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
# Arguments
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
` losses`, or be broadcastable to `losses`.
reduction: (Optional) Type of Reduction to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.

# Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
# Raises
ValueError: If the shape of `sample_weight` is not compatible with `losses`.

# Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
# Returns
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
Reduction.validate(reduction)
if sample_weight is None:
Expand Down