-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-11445][DOCS]Replaced example code in mllib-ensembles.md using include_example #9407
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
d152cb5
a53a20d
870cbb3
a21b0ed
079b1de
24e74e1
a71e99b
29a8067
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
- Loading branch information
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -16,10 +16,13 @@ | |
| */ | ||
|
|
||
| package org.apache.spark.examples.mllib; | ||
|
|
||
| // $example on$ | ||
| import java.util.HashMap; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You need to add some blank lines in imports. See code style guide. |
||
| import java.util.Map; | ||
|
|
||
| import scala.Tuple2; | ||
|
|
||
| import org.apache.spark.SparkConf; | ||
| import org.apache.spark.api.java.JavaPairRDD; | ||
| import org.apache.spark.api.java.JavaRDD; | ||
|
|
@@ -32,28 +35,29 @@ | |
| import org.apache.spark.mllib.tree.model.GradientBoostedTreesModel; | ||
| import org.apache.spark.mllib.util.MLUtils; | ||
| // $example off$ | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. blank line below here |
||
|
|
||
| public class JavaGradientBoostingClassificationExample { | ||
| public static void main(String[] args) { | ||
| // $example on$ | ||
| SparkConf sparkConf = new SparkConf() | ||
| .setAppName("JavaGradientBoostedTreesClassificationExample"); | ||
| JavaSparkContext sc = new JavaSparkContext(sparkConf); | ||
| JavaSparkContext jsc = new JavaSparkContext(sparkConf); | ||
|
|
||
| // Load and parse the data file. | ||
| String datapath = "data/mllib/sample_libsvm_data.txt"; | ||
| JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), datapath).toJavaRDD(); | ||
| JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD(); | ||
| // Split the data into training and test sets (30% held out for testing) | ||
| JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3}); | ||
| JavaRDD<LabeledPoint> trainingData = splits[0]; | ||
| JavaRDD<LabeledPoint> testData = splits[1]; | ||
|
|
||
| // Train a GradientBoostedTrees model. | ||
| // The defaultParams for Classification use LogLoss by default. | ||
| // The defaultParams for Classification use LogLoss by default. | ||
| BoostingStrategy boostingStrategy = BoostingStrategy.defaultParams("Classification"); | ||
| boostingStrategy.setNumIterations(3); // Note: Use more iterations in practice. | ||
| boostingStrategy.getTreeStrategy().setNumClasses(2); | ||
| boostingStrategy.getTreeStrategy().setMaxDepth(5); | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>(); | ||
| boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo); | ||
|
|
||
|
|
@@ -79,9 +83,10 @@ public Boolean call(Tuple2<Double, Double> pl) { | |
| System.out.println("Learned classification GBT model:\n" + model.toDebugString()); | ||
|
|
||
| // Save and load model | ||
| model.save(sc.sc(), "target/tmp/myGradientBoostingClassificationModel"); | ||
| GradientBoostedTreesModel sameModel = GradientBoostedTreesModel.load(sc.sc(), | ||
| model.save(jsc.sc(), "target/tmp/myGradientBoostingClassificationModel"); | ||
| GradientBoostedTreesModel sameModel = GradientBoostedTreesModel.load(jsc.sc(), | ||
| "target/tmp/myGradientBoostingClassificationModel"); | ||
| } | ||
| // $example off$ | ||
| } | ||
|
|
||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -16,10 +16,13 @@ | |
| */ | ||
|
|
||
| package org.apache.spark.examples.mllib; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. blank line here |
||
|
|
||
| // $example on$ | ||
| import java.util.HashMap; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. see comment in the previous code file.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. see comment in the previous code file. |
||
| import java.util.Map; | ||
|
|
||
| import scala.Tuple2; | ||
|
|
||
| import org.apache.spark.SparkConf; | ||
| import org.apache.spark.api.java.function.Function2; | ||
| import org.apache.spark.api.java.JavaPairRDD; | ||
|
|
@@ -33,26 +36,27 @@ | |
| import org.apache.spark.mllib.tree.model.GradientBoostedTreesModel; | ||
| import org.apache.spark.mllib.util.MLUtils; | ||
| // $example off$ | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. blank line here |
||
|
|
||
| public class JavaGradientBoostingRegressionExample { | ||
| public static void main(String[] args) { | ||
| // $example on$ | ||
| SparkConf sparkConf = new SparkConf() | ||
| .setAppName("JavaGradientBoostedTreesRegressionExample"); | ||
| JavaSparkContext sc = new JavaSparkContext(sparkConf); | ||
| JavaSparkContext jsc = new JavaSparkContext(sparkConf); | ||
| // Load and parse the data file. | ||
| String datapath = "data/mllib/sample_libsvm_data.txt"; | ||
| JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), datapath).toJavaRDD(); | ||
| JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD(); | ||
| // Split the data into training and test sets (30% held out for testing) | ||
| JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3}); | ||
| JavaRDD<LabeledPoint> trainingData = splits[0]; | ||
| JavaRDD<LabeledPoint> testData = splits[1]; | ||
|
|
||
| // Train a GradientBoostedTrees model. | ||
| // The defaultParams for Regression use SquaredError by default. | ||
| // The defaultParams for Regression use SquaredError by default. | ||
| BoostingStrategy boostingStrategy = BoostingStrategy.defaultParams("Regression"); | ||
| boostingStrategy.setNumIterations(3); // Note: Use more iterations in practice. | ||
| boostingStrategy.getTreeStrategy().setMaxDepth(5); | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>(); | ||
| boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo); | ||
|
|
||
|
|
@@ -84,8 +88,8 @@ public Double call(Double a, Double b) { | |
| System.out.println("Learned regression GBT model:\n" + model.toDebugString()); | ||
|
|
||
| // Save and load model | ||
| model.save(sc.sc(), "target/tmp/myGradientBoostingRegressionModel"); | ||
| GradientBoostedTreesModel sameModel = GradientBoostedTreesModel.load(sc.sc(), | ||
| model.save(jsc.sc(), "target/tmp/myGradientBoostingRegressionModel"); | ||
| GradientBoostedTreesModel sameModel = GradientBoostedTreesModel.load(jsc.sc(), | ||
| "target/tmp/myGradientBoostingRegressionModel"); | ||
| // $example off$ | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -19,7 +19,9 @@ | |
|
|
||
| // $example on$ | ||
| import java.util.HashMap; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. blank lines according to spark scala style guide
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. blank lines according to spark scala style guide |
||
|
|
||
| import scala.Tuple2; | ||
|
|
||
| import org.apache.spark.SparkConf; | ||
| import org.apache.spark.api.java.JavaPairRDD; | ||
| import org.apache.spark.api.java.JavaRDD; | ||
|
|
@@ -31,21 +33,22 @@ | |
| import org.apache.spark.mllib.tree.model.RandomForestModel; | ||
| import org.apache.spark.mllib.util.MLUtils; | ||
| // $example off$ | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. blank line |
||
|
|
||
| public class JavaRandomForestClassificationExample { | ||
| public static void main(String[] args) { | ||
| // $example on$ | ||
| SparkConf sparkConf = new SparkConf().setAppName("JavaRandomForestClassificationExample"); | ||
| JavaSparkContext sc = new JavaSparkContext(sparkConf); | ||
| JavaSparkContext jsc = new JavaSparkContext(sparkConf); | ||
| // Load and parse the data file. | ||
| String datapath = "data/mllib/sample_libsvm_data.txt"; | ||
| JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), datapath).toJavaRDD(); | ||
| JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD(); | ||
| // Split the data into training and test sets (30% held out for testing) | ||
| JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3}); | ||
| JavaRDD<LabeledPoint> trainingData = splits[0]; | ||
| JavaRDD<LabeledPoint> testData = splits[1]; | ||
|
|
||
| // Train a RandomForest model. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| Integer numClasses = 2; | ||
| HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>(); | ||
| Integer numTrees = 3; // Use more in practice. | ||
|
|
@@ -78,9 +81,9 @@ public Boolean call(Tuple2<Double, Double> pl) { | |
| System.out.println("Learned classification forest model:\n" + model.toDebugString()); | ||
|
|
||
| // Save and load model | ||
| model.save(sc.sc(), "target/tmp/myRandomForestClassificationModel"); | ||
| RandomForestModel sameModel = RandomForestModel.load(sc.sc(), | ||
| model.save(jsc.sc(), "target/tmp/myRandomForestClassificationModel"); | ||
| RandomForestModel sameModel = RandomForestModel.load(jsc.sc(), | ||
| "target/tmp/myRandomForestClassificationModel"); | ||
| //$example off$ | ||
| // $example off$ | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -20,7 +20,9 @@ | |
| // $example on$ | ||
| import java.util.HashMap; | ||
| import java.util.Map; | ||
|
|
||
| import scala.Tuple2; | ||
|
|
||
| import org.apache.spark.api.java.function.Function2; | ||
| import org.apache.spark.api.java.JavaPairRDD; | ||
| import org.apache.spark.api.java.JavaRDD; | ||
|
|
@@ -33,21 +35,22 @@ | |
| import org.apache.spark.mllib.util.MLUtils; | ||
| import org.apache.spark.SparkConf; | ||
| // $example off$ | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same issue with previous code files. |
||
|
|
||
| public class JavaRandomForestRegressionExample { | ||
| public static void main(String[] args) { | ||
| // $example on$ | ||
| SparkConf sparkConf = new SparkConf().setAppName("JavaRandomForestRegressionExample"); | ||
| JavaSparkContext sc = new JavaSparkContext(sparkConf); | ||
| JavaSparkContext jsc = new JavaSparkContext(sparkConf); | ||
| // Load and parse the data file. | ||
| String datapath = "data/mllib/sample_libsvm_data.txt"; | ||
| JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), datapath).toJavaRDD(); | ||
| JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), datapath).toJavaRDD(); | ||
| // Split the data into training and test sets (30% held out for testing) | ||
| JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.7, 0.3}); | ||
| JavaRDD<LabeledPoint> trainingData = splits[0]; | ||
| JavaRDD<LabeledPoint> testData = splits[1]; | ||
|
|
||
| // Set parameters. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>(); | ||
| Integer numTrees = 3; // Use more in practice. | ||
| String featureSubsetStrategy = "auto"; // Let the algorithm choose. | ||
|
|
@@ -84,8 +87,8 @@ public Double call(Double a, Double b) { | |
| System.out.println("Learned regression forest model:\n" + model.toDebugString()); | ||
|
|
||
| // Save and load model | ||
| model.save(sc.sc(), "target/tmp/myRandomForestRegressionModel"); | ||
| RandomForestModel sameModel = RandomForestModel.load(sc.sc(), | ||
| model.save(jsc.sc(), "target/tmp/myRandomForestRegressionModel"); | ||
| RandomForestModel sameModel = RandomForestModel.load(jsc.sc(), | ||
| "target/tmp/myRandomForestRegressionModel"); | ||
| // $example off$ | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -24,6 +24,7 @@ import org.apache.spark.mllib.tree.RandomForest | |
| import org.apache.spark.mllib.tree.model.RandomForestModel | ||
| import org.apache.spark.mllib.util.MLUtils | ||
| // $example off$ | ||
|
|
||
| object RandomForestClassificationExample { | ||
| def main(args: Array[String]) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
| val conf = new SparkConf().setAppName("RandomForestClassificationExample") | ||
|
|
@@ -36,7 +37,7 @@ object RandomForestClassificationExample { | |
| val (trainingData, testData) = (splits(0), splits(1)) | ||
|
|
||
| // Train a RandomForest model. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| val numClasses = 2 | ||
| val categoricalFeaturesInfo = Map[Int, Int]() | ||
| val numTrees = 3 // Use more in practice. | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -24,6 +24,7 @@ import org.apache.spark.mllib.tree.RandomForest | |
| import org.apache.spark.mllib.tree.model.RandomForestModel | ||
| import org.apache.spark.mllib.util.MLUtils | ||
| // $example off$ | ||
|
|
||
| object RandomForestRegressionExample { | ||
| def main(args: Array[String]) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ditto |
||
| val conf = new SparkConf().setAppName("RandomForestRegressionExample") | ||
|
|
@@ -36,7 +37,7 @@ object RandomForestRegressionExample { | |
| val (trainingData, testData) = (splits(0), splits(1)) | ||
|
|
||
| // Train a RandomForest model. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| // Empty categoricalFeaturesInfo indicates all features are continuous. | ||
| val numClasses = 2 | ||
| val categoricalFeaturesInfo = Map[Int, Int]() | ||
| val numTrees = 3 // Use more in practice. | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Add a blank line below here.