Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
1e752f1
Added unpersist method to Broadcast.
Feb 5, 2014
80dd977
Fix for Broadcast unpersist patch.
Feb 6, 2014
c7ccef1
Merge branch 'bc-unpersist-merge' of github.com:ignatich/incubator-sp…
andrewor14 Mar 26, 2014
ba52e00
Refactor broadcast classes
andrewor14 Mar 26, 2014
d0edef3
Add framework for broadcast cleanup
andrewor14 Mar 26, 2014
544ac86
Clean up broadcast blocks through BlockManager*
andrewor14 Mar 26, 2014
e95479c
Add tests for unpersisting broadcast
andrewor14 Mar 27, 2014
f201a8d
Test broadcast cleanup in ContextCleanerSuite + remove BoundedHashMap
andrewor14 Mar 27, 2014
c92e4d9
Merge github.com:apache/spark into cleanup
andrewor14 Mar 27, 2014
0d17060
Import, comments, and style fixes (minor)
andrewor14 Mar 28, 2014
34f436f
Generalize BroadcastBlockId to remove BroadcastHelperBlockId
andrewor14 Mar 28, 2014
fbfeec8
Add functionality to query executors for their local BlockStatuses
andrewor14 Mar 29, 2014
88904a3
Make TimeStampedWeakValueHashMap a wrapper of TimeStampedHashMap
andrewor14 Mar 29, 2014
e442246
Merge github.com:apache/spark into cleanup
andrewor14 Mar 29, 2014
8557c12
Merge github.com:apache/spark into cleanup
andrewor14 Mar 30, 2014
634a097
Merge branch 'state-cleanup' of github.com:tdas/spark into cleanup
andrewor14 Mar 31, 2014
7ed72fb
Fix style test fail + remove verbose test message regarding broadcast
andrewor14 Mar 31, 2014
5016375
Address TD's comments
andrewor14 Apr 1, 2014
f0aabb1
Correct semantics for TimeStampedWeakValueHashMap + add tests
andrewor14 Apr 2, 2014
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Fix for Broadcast unpersist patch.
Updated comment in MemoryStore.dropFromMemory
Keep TorrentBroadcast piece blocks until unpersist is called
  • Loading branch information
Roman Pastukhov committed Feb 6, 2014
commit 80dd9778d2e7338bc93bc7de95ecc6776b0d9e8b
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,15 @@ private[spark] class HttpBroadcast[T](@transient var value_ : T, isLocal: Boolea
def value = value_

def unpersist(removeSource: Boolean) {
SparkEnv.get.blockManager.master.removeBlock(blockId)
SparkEnv.get.blockManager.removeBlock(blockId)
HttpBroadcast.synchronized {
SparkEnv.get.blockManager.master.removeBlock(blockId)
SparkEnv.get.blockManager.removeBlock(blockId)
}

if (removeSource) {
HttpBroadcast.cleanupById(id)
HttpBroadcast.synchronized {
HttpBroadcast.cleanupById(id)
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,19 +33,55 @@ extends Broadcast[T](id) with Logging with Serializable {
def value = value_

def unpersist(removeSource: Boolean) {
SparkEnv.get.blockManager.master.removeBlock(broadcastId)
SparkEnv.get.blockManager.removeBlock(broadcastId)
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.master.removeBlock(broadcastId)
SparkEnv.get.blockManager.removeBlock(broadcastId)
}

if (!removeSource) {
//We can't tell BlockManager master to remove blocks from all nodes except driver,
//so we need to save them here in order to store them on disk later.
//This may be inefficient if blocks were already dropped to disk,
//but since unpersist is supposed to be called right after working with
//a broadcast this should not happen (and getting them from memory is cheap).
arrayOfBlocks = new Array[TorrentBlock](totalBlocks)

for (pid <- 0 until totalBlocks) {
val pieceId = pieceBlockId(pid)
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.getSingle(pieceId) match {
case Some(x) =>
arrayOfBlocks(pid) = x.asInstanceOf[TorrentBlock]
case None =>
throw new SparkException("Failed to get " + pieceId + " of " + broadcastId)
}
}
}
}

for (pid <- 0 until totalBlocks) {
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.master.removeBlock(pieceBlockId(pid))
}
}

if (removeSource) {
for (pid <- pieceIds) {
SparkEnv.get.blockManager.removeBlock(pieceBlockId(pid))
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.removeBlock(metaId)
}
SparkEnv.get.blockManager.removeBlock(metaId)
} else {
for (pid <- pieceIds) {
SparkEnv.get.blockManager.dropFromMemory(pieceBlockId(pid))
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.dropFromMemory(metaId)
}
SparkEnv.get.blockManager.dropFromMemory(metaId)

for (i <- 0 until totalBlocks) {
val pieceId = pieceBlockId(i)
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.putSingle(
pieceId, arrayOfBlocks(i), StorageLevel.DISK_ONLY, true)
}
}
arrayOfBlocks = null
}
}

Expand Down Expand Up @@ -128,11 +164,6 @@ extends Broadcast[T](id) with Logging with Serializable {
}

private def resetWorkerVariables() {
if (arrayOfBlocks != null) {
for (pid <- pieceIds) {
SparkEnv.get.blockManager.removeBlock(pieceBlockId(pid))
}
}
arrayOfBlocks = null
totalBytes = -1
totalBlocks = -1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -187,9 +187,9 @@ private class MemoryStore(blockManager: BlockManager, maxMemory: Long)
*/
def dropFromMemory(blockId: BlockId) {
val entry = entries.synchronized { entries.get(blockId) }
// This should never be null as only one thread should be dropping
// blocks and removing entries. However the check is still here for
// future safety.
// This should never be null if called from ensureFreeSpace as only one
// thread should be dropping blocks and removing entries.
// However the check is required in other cases.
if (entry != null) {
val data = if (entry.deserialized) {
Left(entry.value.asInstanceOf[ArrayBuffer[Any]])
Expand Down