-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-21741][ML][PySpark] Python API for DataFrame-based multivariate summarizer #20695
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Closed
Closed
Changes from 4 commits
Commits
Show all changes
10 commits
Select commit
Hold shift + click to select a range
488d45a
init pr
WeichenXu123 7d3cb1b
update doctest
WeichenXu123 001ff46
update doctest
WeichenXu123 b3e9ddd
update version
WeichenXu123 e64f795
address bago comment
WeichenXu123 21edbcd
merge master & resolve conflicts
WeichenXu123 f7cec51
Merge branch 'master' into py_summarizer
WeichenXu123 20968c1
address comments
WeichenXu123 b91dbeb
fix python style
WeichenXu123 9a4a0ca
address comments
WeichenXu123 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,7 +17,9 @@ | |
|
|
||
| from pyspark import since, SparkContext | ||
| from pyspark.ml.common import _java2py, _py2java | ||
| from pyspark.ml.wrapper import _jvm | ||
| from pyspark.ml.wrapper import JavaWrapper, _jvm | ||
| from pyspark.sql.column import Column, _to_seq | ||
| from pyspark.sql.functions import lit | ||
|
|
||
|
|
||
| class ChiSquareTest(object): | ||
|
|
@@ -132,6 +134,172 @@ def corr(dataset, column, method="pearson"): | |
| return _java2py(sc, javaCorrObj.corr(*args)) | ||
|
|
||
|
|
||
| class Summarizer(object): | ||
| """ | ||
| .. note:: Experimental | ||
|
|
||
| Tools for vectorized statistics on MLlib Vectors. | ||
| The methods in this package provide various statistics for Vectors contained inside DataFrames. | ||
| This class lets users pick the statistics they would like to extract for a given column. | ||
|
|
||
| >>> from pyspark.ml.stat import Summarizer | ||
| >>> from pyspark.sql import Row | ||
| >>> from pyspark.ml.linalg import Vectors | ||
| >>> summarizer = Summarizer.metrics("mean", "count") | ||
| >>> df = sc.parallelize([Row(weight=1.0, features=Vectors.dense(1.0, 1.0, 1.0)), | ||
| ... Row(weight=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF() | ||
| >>> df.select(summarizer.summary(df.features, df.weight)).show(truncate=False) | ||
| +-----------------------------------+ | ||
| |aggregate_metrics(features, weight)| | ||
| +-----------------------------------+ | ||
| |[[1.0,1.0,1.0], 1] | | ||
| +-----------------------------------+ | ||
| <BLANKLINE> | ||
| >>> df.select(summarizer.summary(df.features)).show(truncate=False) | ||
| +--------------------------------+ | ||
| |aggregate_metrics(features, 1.0)| | ||
| +--------------------------------+ | ||
| |[[1.0,1.5,2.0], 2] | | ||
| +--------------------------------+ | ||
| <BLANKLINE> | ||
| >>> df.select(Summarizer.mean(df.features, df.weight)).show(truncate=False) | ||
| +--------------+ | ||
| |mean(features)| | ||
| +--------------+ | ||
| |[1.0,1.0,1.0] | | ||
| +--------------+ | ||
| <BLANKLINE> | ||
| >>> df.select(Summarizer.mean(df.features)).show(truncate=False) | ||
| +--------------+ | ||
| |mean(features)| | ||
| +--------------+ | ||
| |[1.0,1.5,2.0] | | ||
| +--------------+ | ||
| <BLANKLINE> | ||
|
|
||
| .. versionadded:: 2.4.0 | ||
|
|
||
| """ | ||
| def __init__(self, js): | ||
| self._js = js | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def mean(col, weightCol=None): | ||
| """ | ||
| return a column of mean summary | ||
| """ | ||
| return Summarizer._get_single_metric(col, weightCol, "mean") | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def variance(col, weightCol=None): | ||
| """ | ||
| return a column of variance summary | ||
| """ | ||
| return Summarizer._get_single_metric(col, weightCol, "variance") | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def count(col, weightCol=None): | ||
| """ | ||
| return a column of count summary | ||
| """ | ||
| return Summarizer._get_single_metric(col, weightCol, "count") | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def numNonZeros(col, weightCol=None): | ||
| """ | ||
| return a column of numNonZero summary | ||
| """ | ||
| return Summarizer._get_single_metric(col, weightCol, "numNonZeros") | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def max(col, weightCol=None): | ||
| """ | ||
| return a column of max summary | ||
| """ | ||
| return Summarizer._get_single_metric(col, weightCol, "max") | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def min(col, weightCol=None): | ||
| """ | ||
| return a column of min summary | ||
| """ | ||
| return Summarizer._get_single_metric(col, weightCol, "min") | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def normL1(col, weightCol=None): | ||
| """ | ||
| return a column of normL1 summary | ||
| """ | ||
| return Summarizer._get_single_metric(col, weightCol, "normL1") | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def normL2(col, weightCol=None): | ||
| """ | ||
| return a column of normL2 summary | ||
| """ | ||
| return Summarizer._get_single_metric(col, weightCol, "normL2") | ||
|
|
||
| @staticmethod | ||
| def _check_param(featureCol, weightCol): | ||
| if weightCol is None: | ||
| weightCol = lit(1.0) | ||
| if not isinstance(featureCol, Column) or not isinstance(weightCol, Column): | ||
| raise TypeError("featureCol and weightCol should be a Column") | ||
| return featureCol, weightCol | ||
|
|
||
| @staticmethod | ||
| def _get_single_metric(col, weightCol, metric): | ||
| col, weightCol = Summarizer._check_param(col, weightCol) | ||
| return Column(JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer." + metric, | ||
| col._jc, weightCol._jc)) | ||
|
|
||
| @staticmethod | ||
| @since("2.4.0") | ||
| def metrics(*metrics): | ||
| """ | ||
| Given a list of metrics, provides a builder that it turns computes metrics from a column. | ||
|
|
||
| See the documentation of [[Summarizer]] for an example. | ||
|
|
||
| The following metrics are accepted (case sensitive): | ||
| - mean: a vector that contains the coefficient-wise mean. | ||
| - variance: a vector tha contains the coefficient-wise variance. | ||
| - count: the count of all vectors seen. | ||
| - numNonzeros: a vector with the number of non-zeros for each coefficients | ||
| - max: the maximum for each coefficient. | ||
| - min: the minimum for each coefficient. | ||
| - normL2: the Euclidian norm for each coefficient. | ||
| - normL1: the L1 norm of each coefficient (sum of the absolute values). | ||
|
|
||
| :param metrics metrics that can be provided. | ||
| :return a Summarizer | ||
|
|
||
| Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD | ||
| interface. | ||
| """ | ||
| sc = SparkContext._active_spark_context | ||
| js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics", | ||
| _to_seq(sc, metrics)) | ||
| return Summarizer(js) | ||
|
|
||
| @since("2.4.0") | ||
| def summary(self, featureCol, weightCol=None): | ||
|
||
| """ | ||
| Returns an aggregate object that contains the summary of the column with the requested | ||
| metrics. | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's copy the docs for arguments & return value from Scala |
||
| """ | ||
| featureCol, weightCol = Summarizer._check_param(featureCol, weightCol) | ||
| return Column(self._js.summary(featureCol._jc, weightCol._jc)) | ||
|
|
||
|
|
||
| if __name__ == "__main__": | ||
| import doctest | ||
| import pyspark.ml.stat | ||
|
|
||
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We might want to move the "summary" method into another class, and have Summary only contain static methods. That will help with autocomplete so that it's clear that you're not meant to do
Summery.metrics("min").mean(features).There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sounds reasonable.