Skip to content
Closed
Changes from 1 commit
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
b80bb1f
add pic framework (model, class etc)
wangmiao1981 Jun 13, 2016
75004e8
change a comment
wangmiao1981 Jun 13, 2016
e1d9a33
add missing functions fit predict load save etc.
wangmiao1981 Jun 17, 2016
f8343e0
add unit test flie
wangmiao1981 Jun 18, 2016
c62a2c0
add test cases part 1
wangmiao1981 Jun 20, 2016
1277f75
add unit test part 2: test fit, parameters etc.
wangmiao1981 Jun 20, 2016
f50873d
fix a type issue
wangmiao1981 Jun 20, 2016
88a9ae0
add more unit tests
wangmiao1981 Jun 21, 2016
0618815
delete unused import and add comments
wangmiao1981 Jun 21, 2016
04fddbd
change version to 2.1.0
wangmiao1981 Oct 25, 2016
b49f4c7
change PIC as a Transformer
wangmiao1981 Nov 3, 2016
d3f86d0
add LabelCol
wangmiao1981 Nov 4, 2016
655bc67
change col implementation
wangmiao1981 Nov 4, 2016
d5975bc
address some of the comments
wangmiao1981 Feb 17, 2017
f012624
add additional test with dataset having more data
wangmiao1981 Feb 21, 2017
bef0594
change input data format
wangmiao1981 Mar 14, 2017
a4bee89
resolve warnings
wangmiao1981 Mar 15, 2017
0f97907
add neighbor and weight cols
wangmiao1981 Mar 16, 2017
015383a
address review comments 1
wangmiao1981 Aug 15, 2017
2d29570
fix style
wangmiao1981 Aug 15, 2017
af549e8
remove unused comments
wangmiao1981 Aug 15, 2017
9b4f3d5
add Since
wangmiao1981 Aug 15, 2017
e35fe54
fix missing >
wangmiao1981 Aug 17, 2017
73485d8
fix doc
wangmiao1981 Aug 17, 2017
bd5ca5d
Merge github.com:apache/spark into pic
wangmiao1981 Sep 12, 2017
3b0f71c
Merge github.com:apache/spark into pic
wangmiao1981 Oct 25, 2017
752b685
address review comments
wangmiao1981 Oct 25, 2017
cfa18af
fix unit test
wangmiao1981 Oct 30, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add additional test with dataset having more data
  • Loading branch information
wangmiao1981 committed Aug 16, 2017
commit f012624a5061c4df0aaad1f232e0ca17f366b824
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package org.apache.spark.ml.clustering

import scala.collection.mutable

import org.apache.spark.SparkFunSuite
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.util.DefaultReadWriteTest
import org.apache.spark.mllib.util.MLlibTestSparkContext
Expand All @@ -29,6 +29,7 @@ class PowerIterationClusteringSuite extends SparkFunSuite
with MLlibTestSparkContext with DefaultReadWriteTest {

@transient var data: Dataset[_] = _
@transient var malData: Dataset[_] = _
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not used

final val r1 = 1.0
final val n1 = 10
final val r2 = 4.0
Expand All @@ -38,6 +39,7 @@ class PowerIterationClusteringSuite extends SparkFunSuite
super.beforeAll()

data = PowerIterationClusteringSuite.generatePICData(spark, r1, r2, n1, n2)
malData = PowerIterationClusteringSuite.generateMalFormatData(spark)
}

test("default parameters") {
Expand Down Expand Up @@ -80,10 +82,16 @@ class PowerIterationClusteringSuite extends SparkFunSuite
test("power iteration clustering") {
val n = n1 + n2

val result = new PowerIterationClustering()
val model = new PowerIterationClustering()
.setK(2)
.setMaxIter(40)
.transform(data)
val result = model.transform(data)

val thrownData = intercept[SparkException] {
model.transform(malData)
}

assert(thrownData.getMessage().contains("The number of elements in each row must be 3"))

val predictions = Array.fill(2)(mutable.Set.empty[Long])
result.select("id", "prediction").collect().foreach {
Expand Down Expand Up @@ -150,4 +158,16 @@ object PowerIterationClusteringSuite {
.map(v => TestRow(v))
spark.createDataFrame(rdd)
}

def generateMalFormatData(spark: SparkSession): DataFrame = {
val data = for (i <- 1 until 2; j <- 0 until i) yield {
(i.toLong, j.toLong, 0.01, (i + j).toLong)
}
val sc = spark.sparkContext
val rdd = sc.parallelize(data)
.map{case (i: Long, j: Long, sim: Double, k: Long) => Vectors.dense(Array(i, j, sim, k))}
.map(v => TestRow(v))
spark.createDataFrame(rdd)
}

}