diff --git a/R/README.md b/R/README.md index 31174c73526f..da9f042b4fde 100644 --- a/R/README.md +++ b/R/README.md @@ -17,10 +17,14 @@ export R_HOME=/home/username/R #### Build Spark -Build Spark with [Maven](https://spark.apache.org/docs/latest/building-spark.html#buildmvn) and include the `-Psparkr` profile to build the R package. For example to use the default Hadoop versions you can run +Build Spark with [Maven](https://spark.apache.org/docs/latest/building-spark.html#buildmvn) or [SBT](https://spark.apache.org/docs/latest/building-spark.html#building-with-sbt), and include the `-Psparkr` profile to build the R package. For example to use the default Hadoop versions you can run ```bash +# Maven ./build/mvn -DskipTests -Psparkr package + +# SBT +./build/sbt -Psparkr package ``` #### Running sparkR diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 54ac3c19fa83..b872668db765 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -414,6 +414,10 @@ object SparkBuild extends PomBuild { enable(YARN.settings)(yarn) + if (profiles.contains("sparkr")) { + enable(SparkR.settings)(core) + } + /** * Adds the ability to run the spark shell directly from SBT without building an assembly * jar. @@ -888,6 +892,25 @@ object PySparkAssembly { } +object SparkR { + import scala.sys.process.Process + + val buildRPackage = taskKey[Unit]("Build the R package") + lazy val settings = Seq( + buildRPackage := { + val command = baseDirectory.value / ".." / "R" / "install-dev.sh" + Process(command.toString).!! + }, + (Compile / compile) := (Def.taskDyn { + val c = (Compile / compile).value + Def.task { + (Compile / buildRPackage).value + c + } + }).value + ) +} + object Unidoc { import BuildCommons._