-
Notifications
You must be signed in to change notification settings - Fork 29k
Blockingly drain all events in LiveListenerBus#stop(). #251
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
a49dc74
36bcc4b
0dc8e51
8f65949
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,101 +1,119 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one or more | ||
| * contributor license agreements. See the NOTICE file distributed with | ||
| * this work for additional information regarding copyright ownership. | ||
| * The ASF licenses this file to You under the Apache License, Version 2.0 | ||
| * (the "License"); you may not use this file except in compliance with | ||
| * the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
|
|
||
| package org.apache.spark.scheduler | ||
|
|
||
| import java.util.concurrent.LinkedBlockingQueue | ||
|
|
||
| import org.apache.spark.Logging | ||
|
|
||
| /** | ||
| * Asynchronously passes SparkListenerEvents to registered SparkListeners. | ||
| * | ||
| * Until start() is called, all posted events are only buffered. Only after this listener bus | ||
| * has started will events be actually propagated to all attached listeners. This listener bus | ||
| * is stopped when it receives a SparkListenerShutdown event, which is posted using stop(). | ||
| */ | ||
| private[spark] class LiveListenerBus extends SparkListenerBus with Logging { | ||
|
|
||
| /* Cap the capacity of the SparkListenerEvent queue so we get an explicit error (rather than | ||
| * an OOM exception) if it's perpetually being added to more quickly than it's being drained. */ | ||
| private val EVENT_QUEUE_CAPACITY = 10000 | ||
| private val eventQueue = new LinkedBlockingQueue[SparkListenerEvent](EVENT_QUEUE_CAPACITY) | ||
| private var queueFullErrorMessageLogged = false | ||
| private var started = false | ||
|
|
||
| /** | ||
| * Start sending events to attached listeners. | ||
| * | ||
| * This first sends out all buffered events posted before this listener bus has started, then | ||
| * listens for any additional events asynchronously while the listener bus is still running. | ||
| * This should only be called once. | ||
| */ | ||
| def start() { | ||
| if (started) { | ||
| throw new IllegalStateException("Listener bus already started!") | ||
| } | ||
| started = true | ||
| new Thread("SparkListenerBus") { | ||
| setDaemon(true) | ||
| override def run() { | ||
| while (true) { | ||
| val event = eventQueue.take | ||
| if (event == SparkListenerShutdown) { | ||
| // Get out of the while loop and shutdown the daemon thread | ||
| return | ||
| } | ||
| postToAll(event) | ||
| } | ||
| } | ||
| }.start() | ||
| } | ||
|
|
||
| def post(event: SparkListenerEvent) { | ||
| val eventAdded = eventQueue.offer(event) | ||
| if (!eventAdded && !queueFullErrorMessageLogged) { | ||
| logError("Dropping SparkListenerEvent because no remaining room in event queue. " + | ||
| "This likely means one of the SparkListeners is too slow and cannot keep up with the " + | ||
| "rate at which tasks are being started by the scheduler.") | ||
| queueFullErrorMessageLogged = true | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Waits until there are no more events in the queue, or until the specified time has elapsed. | ||
| * Used for testing only. Returns true if the queue has emptied and false is the specified time | ||
| * elapsed before the queue emptied. | ||
| */ | ||
| def waitUntilEmpty(timeoutMillis: Int): Boolean = { | ||
| val finishTime = System.currentTimeMillis + timeoutMillis | ||
| while (!eventQueue.isEmpty) { | ||
| if (System.currentTimeMillis > finishTime) { | ||
| return false | ||
| } | ||
| /* Sleep rather than using wait/notify, because this is used only for testing and wait/notify | ||
| * add overhead in the general case. */ | ||
| Thread.sleep(10) | ||
| } | ||
| true | ||
| } | ||
|
|
||
| def stop() { | ||
| if (!started) { | ||
| throw new IllegalStateException("Attempted to stop a listener bus that has not yet started!") | ||
| } | ||
| post(SparkListenerShutdown) | ||
| } | ||
| } | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one or more | ||
| * contributor license agreements. See the NOTICE file distributed with | ||
| * this work for additional information regarding copyright ownership. | ||
| * The ASF licenses this file to You under the Apache License, Version 2.0 | ||
| * (the "License"); you may not use this file except in compliance with | ||
| * the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
|
|
||
| package org.apache.spark.scheduler | ||
|
|
||
| import java.util.concurrent.LinkedBlockingQueue | ||
|
|
||
| import org.apache.spark.Logging | ||
|
|
||
| /** | ||
| * Asynchronously passes SparkListenerEvents to registered SparkListeners. | ||
| * | ||
| * Until start() is called, all posted events are only buffered. Only after this listener bus | ||
| * has started will events be actually propagated to all attached listeners. This listener bus | ||
| * is stopped when it receives a SparkListenerShutdown event, which is posted using stop(). | ||
| */ | ||
| private[spark] class LiveListenerBus extends SparkListenerBus with Logging { | ||
|
|
||
| /* Cap the capacity of the SparkListenerEvent queue so we get an explicit error (rather than | ||
| * an OOM exception) if it's perpetually being added to more quickly than it's being drained. */ | ||
| private val EVENT_QUEUE_CAPACITY = 10000 | ||
| private val eventQueue = new LinkedBlockingQueue[SparkListenerEvent](EVENT_QUEUE_CAPACITY) | ||
| private var queueFullErrorMessageLogged = false | ||
| private var started = false | ||
|
|
||
| private var drained = false | ||
| private val drainedLock = new Object() | ||
|
|
||
| /** | ||
| * Start sending events to attached listeners. | ||
| * | ||
| * This first sends out all buffered events posted before this listener bus has started, then | ||
| * listens for any additional events asynchronously while the listener bus is still running. | ||
| * This should only be called once. | ||
| */ | ||
| def start() { | ||
| if (started) { | ||
| throw new IllegalStateException("Listener bus already started!") | ||
| } | ||
| started = true | ||
| new Thread("SparkListenerBus") { | ||
| setDaemon(true) | ||
| override def run() { | ||
| while (true) { | ||
| val event = eventQueue.take | ||
| if (event == SparkListenerShutdown) { | ||
| drainedLock.synchronized { | ||
| drained = true | ||
| drainedLock.notify() | ||
| } | ||
| // Get out of the while loop and shutdown the daemon thread | ||
| return | ||
| } | ||
| postToAll(event) | ||
| } | ||
| } | ||
| }.start() | ||
| } | ||
|
|
||
| def post(event: SparkListenerEvent) { | ||
| val eventAdded = eventQueue.offer(event) | ||
| if (!eventAdded && !queueFullErrorMessageLogged) { | ||
| logError("Dropping SparkListenerEvent because no remaining room in event queue. " + | ||
| "This likely means one of the SparkListeners is too slow and cannot keep up with the " + | ||
| "rate at which tasks are being started by the scheduler.") | ||
| queueFullErrorMessageLogged = true | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Waits until there are no more events in the queue, or until the specified time has elapsed. | ||
| * Used for testing only. Returns true if the queue has emptied and false is the specified time | ||
| * elapsed before the queue emptied. | ||
| */ | ||
| def waitUntilEmpty(timeoutMillis: Int): Boolean = { | ||
| val finishTime = System.currentTimeMillis + timeoutMillis | ||
| while (!eventQueue.isEmpty) { | ||
| if (System.currentTimeMillis > finishTime) { | ||
| return false | ||
| } | ||
| /* Sleep rather than using wait/notify, because this is used only for testing and wait/notify | ||
| * add overhead in the general case. */ | ||
| Thread.sleep(10) | ||
| } | ||
| true | ||
| } | ||
|
|
||
| /** | ||
| * Stop the listener bus; wait until all listener events are processed by the listener bus | ||
| * thread. The user has to make sure the listeners finish in a reasonable amount of time. | ||
| */ | ||
| def stop() { | ||
| if (!started) { | ||
| throw new IllegalStateException("Attempted to stop a listener bus that has not yet started!") | ||
| } | ||
| drainedLock.synchronized { | ||
| // put post() and wait() in the same synchronized block to ensure wait() happens before | ||
| // notify() | ||
| post(SparkListenerShutdown) | ||
| while (!drained) { | ||
| drainedLock.wait() | ||
| } | ||
| } | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -72,6 +72,20 @@ class SparkListenerSuite extends FunSuite with LocalSparkContext with ShouldMatc | |
| } | ||
| } | ||
|
|
||
| test("bus.stop() waits for the event queue to completely drain") { | ||
| val sleepyListener = new SleepyListener(1000) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Relying on sleeps like here is a little bit brittle and it also means this test will take 5 seconds which is along time. What about instead using a synchronization mechanism and create a
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fixed; I use a Semaphore instead and create a
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also, the test now runs in ~200ms on my laptop. |
||
| val bus = new LiveListenerBus | ||
| bus.addListener(sleepyListener) | ||
| (1 to 5).foreach { _ => bus.post(SparkListenerJobEnd(0, JobSucceeded)) } | ||
|
|
||
| bus.start() | ||
| // since the handler is just thread sleep, the queue should not drain immediately | ||
| assert(!bus.waitUntilEmpty(0)) | ||
| bus.stop() | ||
| // bus.stop() should wait until the event queue is drained, ensuring no events are lost | ||
| assert(bus.waitUntilEmpty(0)) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fairly minor thing, but I think it makes sense to expose something like
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good call. I added a |
||
| } | ||
|
|
||
| test("basic creation of StageInfo") { | ||
| val listener = new SaveStageAndTaskInfo | ||
| sc.addSparkListener(listener) | ||
|
|
@@ -282,4 +296,11 @@ class SparkListenerSuite extends FunSuite with LocalSparkContext with ShouldMatc | |
| startedGettingResultTasks += taskGettingResult.taskInfo.index | ||
| } | ||
| } | ||
|
|
||
| class SleepyListener(val sleepTime: Long) extends SparkListener { | ||
| override def onJobEnd(job: SparkListenerJobEnd) = { | ||
| Thread.sleep(sleepTime) | ||
| } | ||
| } | ||
|
|
||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can't you just check for eventQueue.isEmpty? This can get rid of the
drainedvariable. Also, if you synchronize on an existing variable (e.g. eventQueue) then you don't even needdrainedLock.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I used
drainedinstead of checkingisEmptybecause I wasn't absolutely sure if it's (currently in the codebase or in the future) possible for new events to be posted after this stop method was called. If it is possible, then havingisEmptybe true is not enough to imply that the end-of-work / shutdown signal has been reached, and that although we can enforce a semantics that ignores such events we can't really prevent them from popping up?As for 2nd point: after a bit googling it seems synchronizing on a thread-safe queue can cause issues using the queue.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hm yeah these are good points. It's just a little strange to equate the "event queue being drained" with "the shutdown event is received." These are not inherently the same things, but we're treating them so.