Skip to content
Closed
Changes from 1 commit
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
1d6b718
continuous shuffle read RDD
jose-torres May 15, 2018
b5d1008
docs
jose-torres May 17, 2018
af40769
Merge remote-tracking branch 'apache/master' into readerRddMaster
jose-torres May 17, 2018
46456dc
fix ctor
jose-torres May 17, 2018
2ea8a6f
multiple partition test
jose-torres May 17, 2018
955ac79
unset task context after test
jose-torres May 17, 2018
8cefb72
conf from RDD
jose-torres May 18, 2018
f91bfe7
endpoint name
jose-torres May 18, 2018
2590292
testing bool
jose-torres May 18, 2018
859e6e4
tests
jose-torres May 18, 2018
b23b7bb
take instead of poll
jose-torres May 18, 2018
97f7e8f
add interface
jose-torres May 18, 2018
de21b1c
clarify comment
jose-torres May 18, 2018
7dcf51a
multiple
jose-torres May 18, 2018
ad0b5aa
writer with 1 reader partition
jose-torres May 25, 2018
c9adee5
docs and iface
jose-torres May 25, 2018
63d38d8
Merge remote-tracking branch 'apache/master' into writerTask
jose-torres May 25, 2018
331f437
increment epoch
jose-torres May 25, 2018
f3ce675
undo oop
jose-torres May 25, 2018
e0108d7
make rdd loop
jose-torres May 25, 2018
f400651
remote write RDD
jose-torres May 25, 2018
1aaad8d
rename classes
jose-torres May 25, 2018
59890d4
combine suites
jose-torres May 25, 2018
af1508c
fully rm old suite
jose-torres May 25, 2018
65837ac
reorder tests
jose-torres May 29, 2018
a68fae2
return future
jose-torres May 31, 2018
98d55e4
finish getting rid of old name
jose-torres May 31, 2018
e6b9118
synchronous
jose-torres May 31, 2018
629455b
finish rename
jose-torres May 31, 2018
cb6d42b
add timeouts
jose-torres Jun 13, 2018
59d6ff7
unalign
jose-torres Jun 13, 2018
f90388c
add note
jose-torres Jun 13, 2018
4bbdeae
parallel
jose-torres Jun 13, 2018
e57531d
fix compile
jose-torres Jun 13, 2018
cff37c4
fix compile
jose-torres Jun 13, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
conf from RDD
  • Loading branch information
jose-torres committed May 18, 2018
commit 8cefb724512b51f2aa1fdd81fa8a2d4560e60ce3
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,19 @@ package org.apache.spark.sql.execution.streaming.continuous.shuffle
import java.util.UUID

import org.apache.spark.{Partition, SparkContext, SparkEnv, TaskContext}

import org.apache.spark.rdd.RDD
import org.apache.spark.rpc.{RpcEndpoint, RpcEndpointRef}
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.NextIterator

case class ContinuousShuffleReadPartition(index: Int) extends Partition {
// Initialized only on the executor, and only once even as we call compute() multiple times.
lazy val (receiver, endpoint) = {
val env = SparkEnv.get.rpcEnv
val receiver = new UnsafeRowReceiver(SQLConf.get.continuousStreamingExecutorQueueSize, env)
val endpoint = env.setupEndpoint(UUID.randomUUID().toString, receiver)
TaskContext.get().addTaskCompletionListener { ctx =>
env.stop(endpoint)
}
(receiver, endpoint)
}
// Semantically lazy vals - initialized only on the executor, and only once even as we call
// compute() multiple times. We need to initialize them inside compute() so we have access to the
// RDD's conf.
var receiver: UnsafeRowReceiver = _
var endpoint: RpcEndpointRef = _
}

/**
Expand All @@ -46,14 +43,26 @@ case class ContinuousShuffleReadPartition(index: Int) extends Partition {
class ContinuousShuffleReadRDD(sc: SparkContext, numPartitions: Int)
extends RDD[UnsafeRow](sc, Nil) {

private val queueSize = sc.conf.get(SQLConf.CONTINUOUS_STREAMING_EXECUTOR_QUEUE_SIZE)

override protected def getPartitions: Array[Partition] = {
(0 until numPartitions).map(ContinuousShuffleReadPartition).toArray
}

override def compute(split: Partition, context: TaskContext): Iterator[UnsafeRow] = {
val receiver = split.asInstanceOf[ContinuousShuffleReadPartition].receiver
val part = split.asInstanceOf[ContinuousShuffleReadPartition]
if (part.receiver == null) {
val env = SparkEnv.get.rpcEnv
part.receiver = new UnsafeRowReceiver(queueSize, env)
part.endpoint = env.setupEndpoint(UUID.randomUUID().toString, part.receiver)
TaskContext.get().addTaskCompletionListener { _ =>
env.stop(part.endpoint)
}
}

new NextIterator[UnsafeRow] {
private val receiver = part.receiver

override def getNext(): UnsafeRow = receiver.poll() match {
case ReceiverRow(r) => r
case ReceiverEpochMarker() =>
Expand Down