1717
1818package org .apache .spark
1919
20- import scala .concurrent .duration ._
21- import scala .collection .mutable
20+ import java .util .concurrent .{ScheduledFuture , TimeUnit , Executors }
2221
23- import akka . actor .{ Actor , Cancellable }
22+ import scala . collection . mutable
2423
2524import org .apache .spark .executor .TaskMetrics
25+ import org .apache .spark .rpc .{RpcEnv , RpcCallContext , RpcEndpoint }
2626import org .apache .spark .storage .BlockManagerId
2727import org .apache .spark .scheduler .{SlaveLost , TaskScheduler }
28- import org .apache .spark .util .ActorLogReceive
28+ import org .apache .spark .util .Utils
2929
3030/**
3131 * A heartbeat from executors to the driver. This is a shared message used by several internal
@@ -45,7 +45,9 @@ private[spark] case class HeartbeatResponse(reregisterBlockManager: Boolean)
4545 * Lives in the driver to receive heartbeats from executors..
4646 */
4747private [spark] class HeartbeatReceiver (sc : SparkContext , scheduler : TaskScheduler )
48- extends Actor with ActorLogReceive with Logging {
48+ extends RpcEndpoint with Logging {
49+
50+ override val rpcEnv : RpcEnv = sc.env.rpcEnv
4951
5052 // executor ID -> timestamp of when the last heartbeat from this executor was received
5153 private val executorLastSeen = new mutable.HashMap [String , Long ]
@@ -61,24 +63,31 @@ private[spark] class HeartbeatReceiver(sc: SparkContext, scheduler: TaskSchedule
6163 sc.conf.getOption(" spark.network.timeoutInterval" ).map(_.toLong * 1000 ).
6264 getOrElse(sc.conf.getLong(" spark.storage.blockManagerTimeoutIntervalMs" , 60000 ))
6365
64- private var timeoutCheckingTask : Cancellable = null
65-
66- override def preStart (): Unit = {
67- import context .dispatcher
68- timeoutCheckingTask = context.system.scheduler.schedule(0 .seconds,
69- checkTimeoutIntervalMs.milliseconds, self, ExpireDeadHosts )
70- super .preStart()
66+ private var timeoutCheckingTask : ScheduledFuture [_] = null
67+
68+ private val messageScheduler = Executors .newSingleThreadScheduledExecutor(
69+ Utils .namedThreadFactory(" heart-beat-receiver-thread" ))
70+
71+ override def onStart (): Unit = {
72+ timeoutCheckingTask = messageScheduler.scheduleAtFixedRate(new Runnable {
73+ override def run (): Unit = {
74+ self.send(ExpireDeadHosts )
75+ }
76+ }, 0 , checkTimeoutIntervalMs, TimeUnit .MILLISECONDS )
7177 }
72-
73- override def receiveWithLogging : PartialFunction [Any , Unit ] = {
78+
79+ override def receive : PartialFunction [Any , Unit ] = {
80+ case ExpireDeadHosts =>
81+ expireDeadHosts()
82+ }
83+
84+ override def receiveAndReply (context : RpcCallContext ): PartialFunction [Any , Unit ] = {
7485 case Heartbeat (executorId, taskMetrics, blockManagerId) =>
7586 val unknownExecutor = ! scheduler.executorHeartbeatReceived(
7687 executorId, taskMetrics, blockManagerId)
7788 val response = HeartbeatResponse (reregisterBlockManager = unknownExecutor)
7889 executorLastSeen(executorId) = System .currentTimeMillis()
79- sender ! response
80- case ExpireDeadHosts =>
81- expireDeadHosts()
90+ context.reply(response)
8291 }
8392
8493 private def expireDeadHosts (): Unit = {
@@ -98,10 +107,9 @@ private[spark] class HeartbeatReceiver(sc: SparkContext, scheduler: TaskSchedule
98107 }
99108 }
100109
101- override def postStop (): Unit = {
110+ override def onStop (): Unit = {
102111 if (timeoutCheckingTask != null ) {
103- timeoutCheckingTask.cancel()
112+ timeoutCheckingTask.cancel(true )
104113 }
105- super .postStop()
106114 }
107115}
0 commit comments