Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
port seed etc
  • Loading branch information
wangmiao1981 committed Feb 1, 2017
commit 34898999bbe813f94ee54a01975ef6077d24f0bd
13 changes: 11 additions & 2 deletions R/pkg/R/mllib.R
Original file line number Diff line number Diff line change
Expand Up @@ -599,6 +599,10 @@ setMethod("summary", signature(object = "IsotonicRegressionModel"),
#' @param k number of centers.
#' @param maxIter maximum iteration number.
#' @param initMode the initialization algorithm choosen to fit the model.
#' @param seed the random seed for cluster initialization
#' @param initSteps the number of steps for the k-means|| initialization mode.
#' This is an advanced setting, the default of 2 is almost always enough. Must be > 0.
#' @param tol convergence tolerance of iterations.
#' @param ... additional argument(s) passed to the method.
#' @return \code{spark.kmeans} returns a fitted k-means model.
#' @rdname spark.kmeans
Expand Down Expand Up @@ -628,11 +632,16 @@ setMethod("summary", signature(object = "IsotonicRegressionModel"),
#' @note spark.kmeans since 2.0.0
#' @seealso \link{predict}, \link{read.ml}, \link{write.ml}
setMethod("spark.kmeans", signature(data = "SparkDataFrame", formula = "formula"),
function(data, formula, k = 2, maxIter = 20, initMode = c("k-means||", "random")) {
function(data, formula, k = 2, maxIter = 20, initMode = c("k-means||", "random"),
seed = NULL, initSteps = 2, tol = 1E-4) {
formula <- paste(deparse(formula), collapse = "")
initMode <- match.arg(initMode)
if (!is.null(seed)) {
seed <- as.character(as.integer(seed))
}
jobj <- callJStatic("org.apache.spark.ml.r.KMeansWrapper", "fit", data@sdf, formula,
as.integer(k), as.integer(maxIter), initMode)
as.integer(k), as.integer(maxIter), initMode, seed,
as.integer(initSteps), as.numeric(tol))
new("KMeansModel", jobj = jobj)
})

Expand Down
4 changes: 2 additions & 2 deletions R/pkg/inst/tests/testthat/test_mllib.R
Original file line number Diff line number Diff line change
Expand Up @@ -384,9 +384,9 @@ test_that("spark.kmeans", {
df <- createDataFrame(cols)

model1 <- spark.kmeans(data = df, ~ ., k = 5, maxIter = 10,
initMode = "random", seed = 1, tol = 1E-5)
initMode = "random", seed = 1, tol = 1E-5)
model2 <- spark.kmeans(data = df, ~ ., k = 5, maxIter = 10,
initMode = "random", seed = 22222, tol = 1E-5)
initMode = "random", seed = 22222, tol = 1E-5)

summary.model1 <- summary(model1)
summary.model2 <- summary(model2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,10 @@ private[r] object KMeansWrapper extends MLReadable[KMeansWrapper] {
formula: String,
k: Int,
maxIter: Int,
initMode: String): KMeansWrapper = {
initMode: String,
seed: String,
initSteps: Int,
tol: Double): KMeansWrapper = {

val rFormula = new RFormula()
.setFormula(formula)
Expand All @@ -89,6 +92,10 @@ private[r] object KMeansWrapper extends MLReadable[KMeansWrapper] {
.setMaxIter(maxIter)
.setInitMode(initMode)
.setFeaturesCol(rFormula.getFeaturesCol)
.setInitSteps(initSteps)
.setTol(tol)

if (seed != null && seed.length > 0) kMeans.setSeed(seed.toInt)

val pipeline = new Pipeline()
.setStages(Array(rFormulaModel, kMeans))
Expand Down