Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
bfade12
Added lots of classes for new ML API:
jkbradley Nov 24, 2014
d35bb5d
fixed compilation issues, but have not added tests yet
jkbradley Nov 24, 2014
52f4fde
removing everything except for simple class hierarchy for classification
jkbradley Dec 1, 2014
d705e87
Added LinearRegression and Regressor back from ml-api branch
jkbradley Dec 4, 2014
601e792
Modified ParamMap to sort parameters in toString. Cleaned up classes…
jkbradley Dec 5, 2014
0617d61
Fixed bug from last commit (sorting paramMap by parameter names in to…
jkbradley Dec 5, 2014
54b7b31
Fixed issue with logreg threshold being set correctly
jkbradley Dec 5, 2014
e433872
Updated docs. Added LabeledPointSuite to spark.ml
jkbradley Dec 5, 2014
57d54ab
* Changed semantics of Predictor.train() to merge the given paramMap …
jkbradley Dec 5, 2014
58802e3
added train() to Predictor subclasses which does not take a ParamMap.
jkbradley Dec 6, 2014
adbe50a
* fixed LinearRegression train() to use embedded paramMap
jkbradley Dec 6, 2014
1680905
Added JavaLabeledPointSuite.java for spark.ml, and added constructor …
jkbradley Dec 6, 2014
8d13233
Added methods:
jkbradley Dec 8, 2014
bc654e1
Added spark.ml LinearRegressionSuite
jkbradley Dec 8, 2014
4e2f711
rat fix
jkbradley Dec 8, 2014
1c61723
* Made ProbabilisticClassificationModel into a subclass of Classifica…
jkbradley Dec 30, 2014
934f97b
Fixed bugs from previous commit.
jkbradley Dec 30, 2014
c3c8da5
small cleanup
jkbradley Dec 30, 2014
0a16da9
Fixed Linear/Logistic RegressionSuites
jkbradley Dec 31, 2014
82f340b
Fixed bug in LogisticRegression (introduced in this PR). Fixed Java …
jkbradley Dec 31, 2014
343e7bd
added blanket mima exclude for ml package
jkbradley Dec 31, 2014
f549e34
Updates based on code review. Major ones are:
jkbradley Jan 15, 2015
216d199
fixed after sql datatypes PR got merged
jkbradley Jan 15, 2015
f542997
Added MIMA excludes for VectorUDT (now public), and added DeveloperAp…
jkbradley Jan 19, 2015
9872424
fixed JavaLinearRegressionSuite.java Java sql api
jkbradley Jan 19, 2015
bcb9549
Fixed issues after rebasing from master (after move from SchemaRDD to…
jkbradley Jan 30, 2015
fc62406
fixed test suites after last commit
jkbradley Jan 30, 2015
8316d5e
fixes after rebasing on master
jkbradley Feb 5, 2015
fec348a
Added JavaDeveloperApiExample.java and fixed other issues: Made devel…
jkbradley Feb 6, 2015
405bfb8
Last edits based on code review. Small cleanups
jkbradley Feb 6, 2015
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
* fixed LinearRegression train() to use embedded paramMap
* added Predictor.predict(RDD[Vector]) method
* updated Linear/LogisticRegressionSuites
  • Loading branch information
jkbradley committed Feb 5, 2015
commit adbe50a7bfc4af7e6771dcd1d6a5f370a9490eca
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ private[ml] abstract class PredictionModel[M <: PredictionModel[M]]
}

/**
* Strongly typed version of [[transform()]].
* Default implementation using single-instance predict().
*
* Developers should override this for efficiency. E.g., this does not broadcast the model.
Expand All @@ -147,6 +148,9 @@ private[ml] abstract class PredictionModel[M <: PredictionModel[M]]
dataset.map(tmpModel.predict)
}

/** Strongly typed version of [[transform()]]. */
def predict(dataset: RDD[Vector]): RDD[Double] = predict(dataset, new ParamMap)

/**
* Predict label for the given features.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ class LinearRegression extends Regressor[LinearRegression, LinearRegressionModel
* These values override any specified in this Estimator's embedded ParamMap.
*/
override def train(dataset: RDD[LabeledPoint], paramMap: ParamMap): LinearRegressionModel = {
val map = this.paramMap ++ paramMap
val oldDataset = dataset.map { case LabeledPoint(label: Double, features: Vector, weight) =>
org.apache.spark.mllib.regression.LabeledPoint(label, features)
}
Expand All @@ -62,10 +63,10 @@ class LinearRegression extends Regressor[LinearRegression, LinearRegressionModel
}
val lr = new LinearRegressionWithSGD()
lr.optimizer
.setRegParam(paramMap(regParam))
.setNumIterations(paramMap(maxIter))
.setRegParam(map(regParam))
.setNumIterations(map(maxIter))
val model = lr.run(oldDataset)
val lrm = new LinearRegressionModel(this, paramMap, model.weights, model.intercept)
val lrm = new LinearRegressionModel(this, map, model.weights, model.intercept)
if (handlePersistence) {
oldDataset.unpersist()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,19 @@ package org.apache.spark.ml.classification

import org.scalatest.FunSuite

import org.apache.spark.ml.LabeledPoint
import org.apache.spark.mllib.classification.LogisticRegressionSuite.generateLogisticInput
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.sql.{DataFrame, Row, SQLContext}


class LogisticRegressionSuite extends FunSuite with MLlibTestSparkContext {

@transient var sqlContext: SQLContext = _
@transient var dataset: DataFrame = _
private val eps: Double = 1e-5

override def beforeAll(): Unit = {
super.beforeAll()
Expand All @@ -38,6 +42,7 @@ class LogisticRegressionSuite extends FunSuite with MLlibTestSparkContext {

test("logistic regression: default params") {
val lr = new LogisticRegression
assert(lr.getLabelCol == "label")
val model = lr.fit(dataset)
model.transform(dataset)
.select("label", "prediction")
Expand Down Expand Up @@ -96,4 +101,43 @@ class LogisticRegressionSuite extends FunSuite with MLlibTestSparkContext {
assert(model2.getThreshold === 0.4)
assert(model2.getScoreCol == "theProb")
}

test("logistic regression: Predictor, Classifier methods") {
val sqlContext = this.sqlContext
import sqlContext._
val lr = new LogisticRegression

// fit() vs. train()
val model1 = lr.fit(dataset)
val rdd = dataset.select('label, 'features).map { case Row(label: Double, features: Vector) =>
LabeledPoint(label, features)
}
val features = rdd.map(_.features)
val model2 = lr.train(rdd)
assert(model1.intercept == model2.intercept)
assert(model1.weights.equals(model2.weights))
assert(model1.numClasses == model2.numClasses)
assert(model1.numClasses === 2)

// transform() vs. predict()
val trans = model1.transform(dataset).select('prediction)
val preds = model1.predict(rdd.map(_.features))
trans.zip(preds).collect().foreach { case (Row(pred1: Double), pred2: Double) =>
assert(pred1 == pred2)
}

// Check various types of predictions.
val allPredictions = features.map { f =>
(model1.predictRaw(f), model1.predictProbabilities(f), model1.predict(f))
}.collect()
val threshold = model1.getThreshold
allPredictions.foreach { case (raw: Vector, prob: Vector, pred: Double) =>
val computeProbFromRaw: (Double => Double) = (m) => 1.0 / (1.0 + math.exp(-m))
raw.toArray.map(computeProbFromRaw).zip(prob.toArray).foreach { case (r, p) =>
assert(r ~== p relTol eps)
}
val predFromProb = prob.toArray.zipWithIndex.maxBy(_._1)._2
assert(pred == predFromProb)
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.ml.regression

import org.scalatest.FunSuite

import org.apache.spark.ml.LabeledPoint
import org.apache.spark.mllib.classification.LogisticRegressionSuite.generateLogisticInput
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.sql.{Row, SQLContext, SchemaRDD}

class LinearRegressionSuite extends FunSuite with MLlibTestSparkContext {

@transient var sqlContext: SQLContext = _
@transient var dataset: SchemaRDD = _

override def beforeAll(): Unit = {
super.beforeAll()
sqlContext = new SQLContext(sc)
dataset = sqlContext.createSchemaRDD(
sc.parallelize(generateLogisticInput(1.0, 1.0, nPoints = 100, seed = 42), 2))
}

test("linear regression: default params") {
val sqlContext = this.sqlContext
import sqlContext._
val lr = new LinearRegression
assert(lr.getLabelCol == "label")
val model = lr.fit(dataset)
model.transform(dataset)
.select('label, 'prediction)
.collect()
// Check defaults
assert(model.getFeaturesCol == "features")
assert(model.getPredictionCol == "prediction")
}

test("linear regression with setters") {
// Set params, train, and check as many as we can.
val sqlContext = this.sqlContext
import sqlContext._
val lr = new LinearRegression()
.setMaxIter(10)
.setRegParam(1.0)
val model = lr.fit(dataset)
assert(model.fittingParamMap.get(lr.maxIter) === Some(10))
assert(model.fittingParamMap.get(lr.regParam) === Some(1.0))

// Call fit() with new params, and check as many as we can.
val model2 = lr.fit(dataset, lr.maxIter -> 5, lr.regParam -> 0.1, lr.predictionCol -> "thePred")
assert(model2.fittingParamMap.get(lr.maxIter) === Some(5))
assert(model2.fittingParamMap.get(lr.regParam) === Some(0.1))
assert(model2.getPredictionCol == "thePred")
}

test("linear regression: Predictor, Regressor methods") {
val sqlContext = this.sqlContext
import sqlContext._
val lr = new LinearRegression

// fit() vs. train()
val model1 = lr.fit(dataset)
val rdd = dataset.select('label, 'features).map { case Row(label: Double, features: Vector) =>
LabeledPoint(label, features)
}
val features = rdd.map(_.features)
val model2 = lr.train(rdd)
assert(model1.intercept == model2.intercept)
assert(model1.weights.equals(model2.weights))

// transform() vs. predict()
val trans = model1.transform(dataset).select('prediction)
val preds = model1.predict(rdd.map(_.features))
trans.zip(preds).collect().foreach { case (Row(pred1: Double), pred2: Double) =>
assert(pred1 == pred2)
}
}
}