Skip to content
Closed
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 94 additions & 4 deletions python/pyspark/ml/feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@
from pyspark.mllib.linalg import _convert_to_vector

__all__ = ['Binarizer', 'Bucketizer', 'ElementwiseProduct', 'HashingTF', 'IDF', 'IDFModel',
'NGram', 'Normalizer', 'OneHotEncoder', 'PolynomialExpansion', 'RegexTokenizer',
'StandardScaler', 'StandardScalerModel', 'StringIndexer', 'StringIndexerModel',
'Tokenizer', 'VectorAssembler', 'VectorIndexer', 'Word2Vec', 'Word2VecModel',
'PCA', 'PCAModel', 'RFormula', 'RFormulaModel']
'MinMaxScaler', 'MinMaxScalerModel', 'NGram', 'Normalizer', 'OneHotEncoder',
'PolynomialExpansion', 'RegexTokenizer', 'StandardScaler', 'StandardScalerModel',
'StringIndexer', 'StringIndexerModel', 'Tokenizer', 'VectorAssembler', 'VectorIndexer',
'Word2Vec', 'Word2VecModel', 'PCA', 'PCAModel', 'RFormula', 'RFormulaModel']


@inherit_doc
Expand Down Expand Up @@ -1291,6 +1291,96 @@ class RFormulaModel(JavaModel):
"""


@inherit_doc
class MinMaxScaler(JavaEstimator, HasInputCol, HasOutputCol):
"""
Rescale each feature individually to a common range [min, max] linearly using column summary
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

mark with .. note:: Experimental

statistics, which is also known as min-max normalization or Rescaling. The rescaled value for
feature E is calculated as,

Rescaled(e_i) = (e_i - E_min) / (E_max - E_min) * (max - min) + min

For the case E_max == E_min, Rescaled(e_i) = 0.5 * (max + min)

Note that since zero values will probably be transformed to non-zero values, output of the
transformer will be DenseVector even for sparse input.

>>> from pyspark.mllib.linalg import Vectors
>>> df = sqlContext.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> mmScaler = MinMaxScaler(inputCol="a", outputCol="scaled")
>>> model = mmScaler.fit(df)
>>> model.transform(df).show()
+-----+------+
| a|scaled|
+-----+------+
|[0.0]| [0.0]|
|[2.0]| [1.0]|
+-----+------+
...
"""

# a placeholder to make it appear in the generated doc
min = Param(Params._dummy(), "min", "Lower bound of the output feature range")
max = Param(Params._dummy(), "max", "Upper bound of the output feature range")

@keyword_only
def __init__(self, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
__init__(self, min=0.0, max=1.0, inputCol=None, outputCol=None)
"""
super(MinMaxScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinMaxScaler", self.uid)
self.min = Param(self, "min", "Lower bound of the output feature range")
self.max = Param(self, "max", "Upper bound of the output feature range")
self._setDefault(min=0.0, max=1.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)

@keyword_only
def setParams(self, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
setParams(self, min=0.0, max=1.0, inputCol=None, outputCol=None)
Sets params for this MinMaxScaler.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)

def setMin(self, value):
"""
Sets the value of :py:attr:`min`.
"""
self._paramMap[self.min] = value
return self

def getMin(self):
"""
Gets the value of min or its default value.
"""
return self.getOrDefault(self.min)

def setMax(self, value):
"""
Sets the value of :py:attr:`max`.
"""
self._paramMap[self.max] = value
return self

def getMax(self):
"""
Gets the value of max or its default value.
"""
return self.getOrDefault(self.max)

def _create_model(self, java_model):
return MinMaxScalerModel(java_model)


class MinMaxScalerModel(JavaModel):
"""
Model fitted by MinMaxScaler.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nicer to write: :py:class:MinMaxScaler``

"""


if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
Expand Down