Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
1d6b718
continuous shuffle read RDD
jose-torres May 15, 2018
b5d1008
docs
jose-torres May 17, 2018
af40769
Merge remote-tracking branch 'apache/master' into readerRddMaster
jose-torres May 17, 2018
46456dc
fix ctor
jose-torres May 17, 2018
2ea8a6f
multiple partition test
jose-torres May 17, 2018
955ac79
unset task context after test
jose-torres May 17, 2018
8cefb72
conf from RDD
jose-torres May 18, 2018
f91bfe7
endpoint name
jose-torres May 18, 2018
2590292
testing bool
jose-torres May 18, 2018
859e6e4
tests
jose-torres May 18, 2018
b23b7bb
take instead of poll
jose-torres May 18, 2018
97f7e8f
add interface
jose-torres May 18, 2018
de21b1c
clarify comment
jose-torres May 18, 2018
7dcf51a
multiple
jose-torres May 18, 2018
ad0b5aa
writer with 1 reader partition
jose-torres May 25, 2018
c9adee5
docs and iface
jose-torres May 25, 2018
63d38d8
Merge remote-tracking branch 'apache/master' into writerTask
jose-torres May 25, 2018
331f437
increment epoch
jose-torres May 25, 2018
f3ce675
undo oop
jose-torres May 25, 2018
e0108d7
make rdd loop
jose-torres May 25, 2018
f400651
remote write RDD
jose-torres May 25, 2018
1aaad8d
rename classes
jose-torres May 25, 2018
59890d4
combine suites
jose-torres May 25, 2018
af1508c
fully rm old suite
jose-torres May 25, 2018
65837ac
reorder tests
jose-torres May 29, 2018
a68fae2
return future
jose-torres May 31, 2018
98d55e4
finish getting rid of old name
jose-torres May 31, 2018
e6b9118
synchronous
jose-torres May 31, 2018
629455b
finish rename
jose-torres May 31, 2018
cb6d42b
add timeouts
jose-torres Jun 13, 2018
59d6ff7
unalign
jose-torres Jun 13, 2018
f90388c
add note
jose-torres Jun 13, 2018
4bbdeae
parallel
jose-torres Jun 13, 2018
e57531d
fix compile
jose-torres Jun 13, 2018
cff37c4
fix compile
jose-torres Jun 13, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
rename classes
  • Loading branch information
jose-torres committed May 25, 2018
commit 1aaad8d7660d1d6cd2abbca10d67ef724b4a0dcc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ case class ContinuousShuffleReadPartition(
// Initialized only on the executor, and only once even as we call compute() multiple times.
lazy val (reader: ContinuousShuffleReader, endpoint) = {
val env = SparkEnv.get.rpcEnv
val receiver = new UnsafeRowReceiver(queueSize, numShuffleWriters, epochIntervalMs, env)
val receiver = new RPCContinuousShuffleReader(
queueSize, numShuffleWriters, epochIntervalMs, env)
val endpoint = env.setupEndpoint(s"UnsafeRowReceiver-${UUID.randomUUID()}", receiver)

TaskContext.get().addTaskCompletionListener { ctx =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ private[shuffle] case class ReceiverEpochMarker(writerId: Int) extends UnsafeRow
* TODO: Support multiple source tasks. We need to output a single epoch marker once all
* source tasks have sent one.
*/
private[shuffle] class UnsafeRowReceiver(
private[shuffle] class RPCContinuousShuffleReader(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. Caught what I think are the rest.

queueSize: Int,
numShuffleWriters: Int,
epochIntervalMs: Long,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.sql.catalyst.expressions.UnsafeRow

/**
* A [[ContinuousShuffleWriter]] sending data to [[UnsafeRowReceiver]] instances.
* A [[ContinuousShuffleWriter]] sending data to [[RPCContinuousShuffleReader]] instances.
*
* @param writerId The partition ID of this writer.
* @param writerId The partition ID of this writer.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: we don't use vertical alignment as they will introduce unnecessary changes in future.

* @param outputPartitioner The partitioner on the reader side of the shuffle.
* @param endpoints The [[UnsafeRowReceiver]] endpoints to write to. Indexed by partition ID within
* outputPartitioner.
* @param endpoints The [[RPCContinuousShuffleReader]] endpoints to write to. Indexed by
* partition ID within outputPartitioner.
*/
class UnsafeRowWriter(
class RPCContinuousShuffleWriter(
writerId: Int,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: rename to partitionId?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I worry that partitionId is ambiguous with the partition to which the shuffle data is being written.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok makes sense.

outputPartitioner: Partitioner,
endpoints: Array[RpcEndpointRef]) extends ContinuousShuffleWriter {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class ContinuousShuffleReadSuite extends StreamTest {
ctx.markTaskCompleted(None)
val receiver = rdd.partitions(0).asInstanceOf[ContinuousShuffleReadPartition].reader
eventually(timeout(streamingTimeout)) {
assert(receiver.asInstanceOf[UnsafeRowReceiver].stopped.get())
assert(receiver.asInstanceOf[RPCContinuousShuffleReader].stopped.get())
}
}

Expand All @@ -86,7 +86,7 @@ class ContinuousShuffleReadSuite extends StreamTest {
ctx.markTaskCompleted(None)
val receiver = rdd.partitions(0).asInstanceOf[ContinuousShuffleReadPartition].reader
eventually(timeout(streamingTimeout)) {
assert(receiver.asInstanceOf[UnsafeRowReceiver].stopped.get())
assert(receiver.asInstanceOf[RPCContinuousShuffleReader].stopped.get())
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import scala.collection.mutable
import org.apache.spark.{HashPartitioner, Partition, TaskContext, TaskContextImpl}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.execution.streaming.continuous.shuffle.{ContinuousShuffleReadPartition, ContinuousShuffleReadRDD, UnsafeRowWriter}
import org.apache.spark.sql.execution.streaming.continuous.shuffle.{ContinuousShuffleReadPartition, ContinuousShuffleReadRDD, RPCContinuousShuffleWriter}
import org.apache.spark.sql.streaming.StreamTest
import org.apache.spark.sql.types.{DataType, IntegerType}

Expand Down Expand Up @@ -85,7 +85,8 @@ class ContinuousShuffleSuite extends StreamTest {

test("one epoch") {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: i generally put the simplest test first (likely to be the reader tests since they dont depend on writer) and the more complex, e2e-ish tests later (writers since they needs readers).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Reordered.

val reader = new ContinuousShuffleReadRDD(sparkContext, numPartitions = 1)
val writer = new UnsafeRowWriter(0, new HashPartitioner(1), Array(readRDDEndpoint(reader)))
val writer = new RPCContinuousShuffleWriter(
0, new HashPartitioner(1), Array(readRDDEndpoint(reader)))

writer.write(Iterator(1, 2, 3))

Expand All @@ -94,7 +95,8 @@ class ContinuousShuffleSuite extends StreamTest {

test("multiple epochs") {
val reader = new ContinuousShuffleReadRDD(sparkContext, numPartitions = 1)
val writer = new UnsafeRowWriter(0, new HashPartitioner(1), Array(readRDDEndpoint(reader)))
val writer = new RPCContinuousShuffleWriter(
0, new HashPartitioner(1), Array(readRDDEndpoint(reader)))

writer.write(Iterator(1, 2, 3))
writer.write(Iterator(4, 5, 6))
Expand All @@ -107,7 +109,8 @@ class ContinuousShuffleSuite extends StreamTest {
val data = new MultipleEpochRDD(1, Array(), Array(1, 2), Array(), Array(), Array(3, 4), Array())

val reader = new ContinuousShuffleReadRDD(sparkContext, numPartitions = 1)
val writer = new UnsafeRowWriter(0, new HashPartitioner(1), Array(readRDDEndpoint(reader)))
val writer = new RPCContinuousShuffleWriter(
0, new HashPartitioner(1), Array(readRDDEndpoint(reader)))

writer.write(Iterator())
writer.write(Iterator(1, 2))
Expand All @@ -126,7 +129,8 @@ class ContinuousShuffleSuite extends StreamTest {

test("blocks waiting for writer") {
val reader = new ContinuousShuffleReadRDD(sparkContext, numPartitions = 1)
val writer = new UnsafeRowWriter(0, new HashPartitioner(1), Array(readRDDEndpoint(reader)))
val writer = new RPCContinuousShuffleWriter(
0, new HashPartitioner(1), Array(readRDDEndpoint(reader)))

val readerEpoch = reader.compute(reader.partitions(0), ctx)

Expand All @@ -152,7 +156,7 @@ class ContinuousShuffleSuite extends StreamTest {
val reader = new ContinuousShuffleReadRDD(
sparkContext, numPartitions = 1, numShuffleWriters = numWriterPartitions)
val writers = (0 until 3).map { idx =>
new UnsafeRowWriter(idx, new HashPartitioner(1), Array(readRDDEndpoint(reader)))
new RPCContinuousShuffleWriter(idx, new HashPartitioner(1), Array(readRDDEndpoint(reader)))
}

writers(0).write(Iterator(1, 4, 7))
Expand All @@ -176,7 +180,7 @@ class ContinuousShuffleSuite extends StreamTest {
val reader = new ContinuousShuffleReadRDD(
sparkContext, numPartitions = 1, numShuffleWriters = numWriterPartitions)
val writers = (0 until 3).map { idx =>
new UnsafeRowWriter(idx, new HashPartitioner(1), Array(readRDDEndpoint(reader)))
new RPCContinuousShuffleWriter(idx, new HashPartitioner(1), Array(readRDDEndpoint(reader)))
}

writers(1).write(Iterator())
Expand Down