-
Notifications
You must be signed in to change notification settings - Fork 29k
[Spark-21854] Added LogisticRegressionTrainingSummary for MultinomialLogisticRegression in Python API #19185
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 4 commits
50cfafe
60579d5
1a73e6c
53ac68e
a4755d7
eb8f6b4
6529fa6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -529,8 +529,11 @@ def summary(self): | |
| """ | ||
| if self.hasSummary: | ||
| java_blrt_summary = self._call_java("summary") | ||
| # Note: Once multiclass is added, update this to return correct summary | ||
| return BinaryLogisticRegressionTrainingSummary(java_blrt_summary) | ||
| if (self.numClasses == 2): | ||
|
||
| java_blrt_binarysummary = self._call_java("binarySummary") | ||
|
||
| return BinaryLogisticRegressionTrainingSummary(java_blrt_binarysummary) | ||
| else: | ||
| return LogisticRegressionTrainingSummary(java_blrt_summary) | ||
| else: | ||
| raise RuntimeError("No training summary available for this %s" % | ||
| self.__class__.__name__) | ||
|
|
@@ -585,6 +588,14 @@ def probabilityCol(self): | |
| """ | ||
| return self._call_java("probabilityCol") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def predictionCol(self): | ||
| """ | ||
| Field in "predictions" which gives the prediction of each class. | ||
| """ | ||
| return self._call_java("predictionCol") | ||
|
|
||
| @property | ||
| @since("2.0.0") | ||
| def labelCol(self): | ||
|
|
@@ -603,6 +614,112 @@ def featuresCol(self): | |
| """ | ||
| return self._call_java("featuresCol") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def labels(self): | ||
| """ | ||
| Returns the sequence of labels in ascending order. This order matches the order used | ||
| in metrics which are specified as arrays over labels, e.g., truePositiveRateByLabel. | ||
|
|
||
| Note: In most cases, it will be values {0.0, 1.0, ..., numClasses-1}, However, if the | ||
| training set is missing a label, then all of the arrays over labels | ||
| (e.g., from truePositiveRateByLabel) will be of length numClasses-1 instead of the | ||
| expected numClasses. | ||
| """ | ||
| return self._call_java("labels") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def truePositiveRateByLabel(self): | ||
| """ | ||
| Returns true positive rate for each label (category). | ||
| """ | ||
| return self._call_java("truePositiveRateByLabel") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def falsePositiveRateByLabel(self): | ||
| """ | ||
| Returns false positive rate for each label (category). | ||
| """ | ||
| return self._call_java("falsePositiveRateByLabel") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def precisionByLabel(self): | ||
| """ | ||
| Returns precision for each label (category). | ||
| """ | ||
| return self._call_java("precisionByLabel") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def recallByLabel(self): | ||
| """ | ||
| Returns recall for each label (category). | ||
| """ | ||
| return self._call_java("recallByLabel") | ||
|
|
||
| @property | ||
|
||
| @since("2.3.0") | ||
| def fMeasureByLabel(self, beta=1.0): | ||
| """ | ||
| Returns f-measure for each label (category). | ||
| """ | ||
| return self._call_java("fMeasureByLabel", beta) | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def accuracy(self): | ||
| """ | ||
| Returns accuracy. | ||
| (equals to the total number of correctly classified instances | ||
| out of the total number of instances.) | ||
| """ | ||
| return self._call_java("accuracy") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def weightedTruePositiveRate(self): | ||
| """ | ||
| Returns weighted true positive rate. | ||
| (equals to precision, recall and f-measure) | ||
| """ | ||
| return self._call_java("weightedTruePositiveRate") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def weightedFalsePositiveRate(self): | ||
| """ | ||
| Returns weighted false positive rate. | ||
| """ | ||
| return self._call_java("weightedFalsePositiveRate") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def weightedRecall(self): | ||
| """ | ||
| Returns weighted averaged recall. | ||
| (equals to precision, recall and f-measure) | ||
| """ | ||
| return self._call_java("weightedRecall") | ||
|
|
||
| @property | ||
| @since("2.3.0") | ||
| def weightedPrecision(self): | ||
| """ | ||
| Returns weighted averaged precision. | ||
| """ | ||
| return self._call_java("weightedPrecision") | ||
|
|
||
| @property | ||
|
||
| @since("2.3.0") | ||
| def weightedFMeasure(self, beta=1.0): | ||
| """ | ||
| Returns weighted averaged f-measure. | ||
| """ | ||
| return self._call_java("weightedFMeasure", beta) | ||
|
|
||
|
|
||
| @inherit_doc | ||
| class LogisticRegressionTrainingSummary(LogisticRegressionSummary): | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1478,6 +1478,40 @@ def test_logistic_regression_summary(self): | |
| sameSummary = model.evaluate(df) | ||
| self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC) | ||
|
|
||
| def test_multiclass_logistic_regression_summary(self): | ||
| df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)), | ||
| (0.0, 2.0, Vectors.sparse(1, [], [])), | ||
| (2.0, 2.0, Vectors.dense(2.0)), | ||
| (2.0, 2.0, Vectors.dense(1.9))], | ||
| ["label", "weight", "features"]) | ||
| lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False) | ||
| model = lr.fit(df) | ||
| self.assertTrue(model.hasSummary) | ||
| s = model.summary | ||
| # test that api is callable and returns expected types | ||
| self.assertTrue(isinstance(s.predictions, DataFrame)) | ||
| self.assertEqual(s.probabilityCol, "probability") | ||
| self.assertEqual(s.labelCol, "label") | ||
| self.assertEqual(s.featuresCol, "features") | ||
| self.assertEqual(s.predictionCol, "prediction") | ||
| objHist = s.objectiveHistory | ||
| self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float)) | ||
| self.assertGreater(s.totalIterations, 0) | ||
| self.assertTrue(isinstance(s.labels, list)) | ||
| self.assertTrue(isinstance(s.truePositiveRateByLabel, list)) | ||
| self.assertTrue(isinstance(s.falsePositiveRateByLabel, list)) | ||
| self.assertTrue(isinstance(s.precisionByLabel, list)) | ||
| self.assertTrue(isinstance(s.recallByLabel, list)) | ||
| self.assertTrue(isinstance(s.fMeasureByLabel, list)) | ||
| self.assertAlmostEqual(s.accuracy, 0.75, 2) | ||
| self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2) | ||
| self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2) | ||
| self.assertAlmostEqual(s.weightedRecall, 0.75, 2) | ||
| self.assertAlmostEqual(s.weightedPrecision, 0.583, 2) | ||
| self.assertAlmostEqual(s.weightedFMeasure, 0.65, 2) | ||
|
||
| # test evaluation (with training dataset) produces a summary with same values | ||
| # one check is enough to verify a summary is returned, Scala version runs full test | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add test for evaluation like: |
||
|
|
||
| def test_gaussian_mixture_summary(self): | ||
| data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),), | ||
| (Vectors.sparse(1, [], []),)] | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Rename this to
java_lrt_summary, as it's not always binary logistic regression.