Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
b7c9c23
Move Unsafe mem. mgrs. to spark-core subproject.
JoshRosen Oct 14, 2015
25ba4b5
Merge ExecutorMemoryManager into MemoryManager.
JoshRosen Oct 14, 2015
3d997ce
Naming and formatting fixes.
JoshRosen Oct 16, 2015
d9e6b84
Move Tungsten-related methods to end of MemoryManager file.
JoshRosen Oct 16, 2015
98ef86b
Add taskAttemptId to TaskMemoryManager constructor.
JoshRosen Oct 16, 2015
8f93e94
Move ShuffleMemoryManager into memory package.
JoshRosen Oct 16, 2015
3bbc54d
Merge remote-tracking branch 'origin/master' into SPARK-10984
JoshRosen Oct 16, 2015
88a7970
Fix bug in AbstractBytesToBytesMapSuite.
JoshRosen Oct 16, 2015
ec48ff9
Refactor the existing Tungsten TaskMemoryManager interactions so Tung…
JoshRosen Oct 16, 2015
6f98bc4
Move TaskMemoryManager from unsafe to memory.
JoshRosen Oct 16, 2015
6459397
Further minimization of ShuffleMemoryManager usage.
JoshRosen Oct 16, 2015
60c66b2
Merge ShuffleMemoryManager into MemoryManager.
JoshRosen Oct 17, 2015
7d6a37f
Clean up interaction between TaskMemoryManager and MemoryManager.
JoshRosen Oct 17, 2015
0dc21dc
Merge remote-tracking branch 'origin/master' into SPARK-10984
JoshRosen Oct 22, 2015
f21b767
Fix compilation.
JoshRosen Oct 22, 2015
46ad693
Fix Scalastyle
JoshRosen Oct 22, 2015
c33e330
Fix import ordering in Executor.scala
JoshRosen Oct 22, 2015
ef45d91
Fix import ordering in Task.scala
JoshRosen Oct 22, 2015
c7eac69
Fix import ordering in TaskContextImpl
JoshRosen Oct 22, 2015
d86f435
Fix spillable collection tests
JoshRosen Oct 22, 2015
bba5550
Integrate TaskMemoryManager acquire/releasePage with MemoryManager bo…
JoshRosen Oct 22, 2015
66ae259
Move pooling logic into allocators themselves.
JoshRosen Oct 22, 2015
b1d5151
Scaladoc updates.
JoshRosen Oct 22, 2015
d0c0dd9
Update Spillable to properly integrate with TaskMemoryManager.
JoshRosen Oct 22, 2015
48149fc
Move pageSizeBytes to Tungsten section
JoshRosen Oct 23, 2015
c8ba196
Cleanup after merging of ShuffleMemoryManager into MemoryManager.
JoshRosen Oct 23, 2015
63a6cbc
Rename getMemoryConsumptionForThisTask to getExecutionMemoryUsageForTask
JoshRosen Oct 23, 2015
6ec9c30
Properly thread numCores to memory manager.
JoshRosen Oct 23, 2015
1593fad
Explain why MemoryBlock.pageNumber is public
JoshRosen Oct 23, 2015
64bec0b
Fix TaskMemoryManagerSuite tests.
JoshRosen Oct 23, 2015
f9240e9
Fix compilation
JoshRosen Oct 23, 2015
a95bc08
Fix a memory leak in UnsafeShuffleWriter's sorter
JoshRosen Oct 23, 2015
b3ad761
Remove println
JoshRosen Oct 23, 2015
a7e8320
Fix Scalastyle.
JoshRosen Oct 23, 2015
e874a45
Fix remaining TODOs in UnsafeShuffleWriterSuite.
JoshRosen Oct 23, 2015
2ba6e51
Fix DeveloperAPI change
JoshRosen Oct 23, 2015
0c13723
Address comments in MemoryManager
JoshRosen Oct 23, 2015
04ec429
Release memory acquired after unsuccessful allocatePage() call
JoshRosen Oct 23, 2015
e56d039
Fix EAOM compilation.
JoshRosen Oct 23, 2015
aa14113
Port tests from ShuffleMemoryManagerSuite
JoshRosen Oct 23, 2015
7addf8b
Remove unused non-page-memory allocation methods.
JoshRosen Oct 23, 2015
5af0b17
Update Tungsten tests
JoshRosen Oct 23, 2015
a264703
Fix execution memory leaks in Spillable collections
JoshRosen Oct 24, 2015
f2ab708
Fix NPE in UnsafeRowSerializerSuite
JoshRosen Oct 24, 2015
0b5c72f
Update EAOM tests to reflect fact that iterator() is destructive.
JoshRosen Oct 24, 2015
f68fdb1
Fix streaming test compilation
JoshRosen Oct 26, 2015
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Cleanup after merging of ShuffleMemoryManager into MemoryManager.
  • Loading branch information
JoshRosen committed Oct 23, 2015
commit c8ba196e6628c12ae3cc4517eb343dd546715716
Original file line number Diff line number Diff line change
Expand Up @@ -120,14 +120,14 @@ public TaskMemoryManager(MemoryManager memoryManager, long taskAttemptId) {
* @return number of bytes successfully granted (<= N).
*/
public long acquireExecutionMemory(long size) {
return memoryManager.tryToAcquire(size, taskAttemptId);
return memoryManager.acquireExecutionMemory(size, taskAttemptId);
}

/**
* Release N bytes of execution memory.
*/
public void releaseExecutionMemory(long size) {
memoryManager.release(size, taskAttemptId);
memoryManager.releaseExecutionMemory(size, taskAttemptId);
}

public long pageSizeBytes() {
Expand Down Expand Up @@ -317,7 +317,7 @@ public long cleanUpAllAllocatedMemory() {
}

freedBytes += memoryManager.getMemoryConsumptionForTask(taskAttemptId);
memoryManager.releaseMemoryForTask(taskAttemptId);
memoryManager.releaseAllExecutionMemoryForTask(taskAttemptId);

return freedBytes;
}
Expand Down
213 changes: 105 additions & 108 deletions core/src/main/scala/org/apache/spark/memory/MemoryManager.scala
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

import com.google.common.annotations.VisibleForTesting

import org.apache.spark.{SparkException, TaskContext, SparkConf, Logging}
import org.apache.spark.storage.{BlockId, BlockStatus, MemoryStore}
import org.apache.spark.unsafe.array.ByteArrayMethods
Expand All @@ -41,7 +43,8 @@ import org.apache.spark.unsafe.memory.MemoryAllocator
* before it has to spill, and at most 1 / N. Because N varies dynamically, we keep track of the
* set of active tasks and redo the calculations of 1 / 2N and 1 / N in waiting tasks whenever
* this set changes. This is all done by synchronizing access to mutable state and using wait() and
* notifyAll() to signal changes to callers.
* notifyAll() to signal changes to callers. Prior to Spark 1.6, this arbitration of memory across
* tasks was performed by the ShuffleMemoryManager.
*/
private[spark] abstract class MemoryManager(conf: SparkConf, numCores: Int = 1) extends Logging {
// TODO(josh) pass in numCores
Expand All @@ -60,6 +63,8 @@ private[spark] abstract class MemoryManager(conf: SparkConf, numCores: Int = 1)
// Amount of execution/storage memory in use, accesses must be synchronized on `this`
@GuardedBy("this") protected var _executionMemoryUsed: Long = 0
@GuardedBy("this") protected var _storageMemoryUsed: Long = 0
// Map from taskAttemptId -> memory consumption in bytes
@GuardedBy("this") private val memoryConsumptionForTask = new mutable.HashMap[Long, Long]()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

executionMemoryForTask?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good idea.


/**
* Set the [[MemoryStore]] used by this manager to evict cached blocks.
Expand All @@ -81,15 +86,6 @@ private[spark] abstract class MemoryManager(conf: SparkConf, numCores: Int = 1)

// TODO: avoid passing evicted blocks around to simplify method signatures (SPARK-10985)

/**
* Acquire N bytes of memory for execution, evicting cached blocks if necessary.
* Blocks evicted in the process, if any, are added to `evictedBlocks`.
* @return number of bytes successfully granted (<= N).
*/
def acquireExecutionMemory(
numBytes: Long,
evictedBlocks: mutable.Buffer[(BlockId, BlockStatus)]): Long

/**
* Acquire N bytes of memory to cache the given block, evicting existing ones if necessary.
* Blocks evicted in the process, if any, are added to `evictedBlocks`.
Expand Down Expand Up @@ -118,91 +114,59 @@ private[spark] abstract class MemoryManager(conf: SparkConf, numCores: Int = 1)
}

/**
* Release N bytes of execution memory.
*/
def releaseExecutionMemory(numBytes: Long): Unit = synchronized {
if (numBytes > _executionMemoryUsed) {
logWarning(s"Attempted to release $numBytes bytes of execution " +
s"memory when we only have ${_executionMemoryUsed} bytes")
_executionMemoryUsed = 0
} else {
_executionMemoryUsed -= numBytes
}
}

/**
* Release N bytes of storage memory.
*/
def releaseStorageMemory(numBytes: Long): Unit = synchronized {
if (numBytes > _storageMemoryUsed) {
logWarning(s"Attempted to release $numBytes bytes of storage " +
s"memory when we only have ${_storageMemoryUsed} bytes")
_storageMemoryUsed = 0
} else {
_storageMemoryUsed -= numBytes
}
}

/**
* Release all storage memory acquired.
*/
def releaseAllStorageMemory(): Unit = synchronized {
_storageMemoryUsed = 0
}

/**
* Release N bytes of unroll memory.
*/
def releaseUnrollMemory(numBytes: Long): Unit = synchronized {
releaseStorageMemory(numBytes)
}

/**
* Execution memory currently in use, in bytes.
*/
final def executionMemoryUsed: Long = synchronized {
_executionMemoryUsed
}

/**
* Storage memory currently in use, in bytes.
* Acquire N bytes of memory for execution, evicting cached blocks if necessary.
* Blocks evicted in the process, if any, are added to `evictedBlocks`.
* @return number of bytes successfully granted (<= N).
*/
final def storageMemoryUsed: Long = synchronized {
_storageMemoryUsed
}

// -- Policies for arbitrating execution memory across tasks -------------------------------------
// Prior to Spark 1.6, these policies were implemented in the ShuffleMemoryManager.


private val taskMemory = new mutable.HashMap[Long, Long]() // taskAttemptId -> memory bytes
@VisibleForTesting
private[memory] def doAcquireExecutionMemory(
numBytes: Long,
evictedBlocks: mutable.Buffer[(BlockId, BlockStatus)]): Long

/**
* Try to acquire up to numBytes memory for the current task, and return the number of bytes
* obtained, or 0 if none can be allocated. This call may block until there is enough free memory
* in some situations, to make sure each task has a chance to ramp up to at least 1 / 2N of the
* total memory pool (where N is the # of active tasks) before it is forced to spill. This can
* happen if the number of tasks increases but an older task had a lot of memory already.
* Try to acquire up to `numBytes` of execution memory for the current task and return the number
* of bytes obtained, or 0 if none can be allocated.
*
* This call may block until there is enough free memory in some situations, to make sure each
* task has a chance to ramp up to at least 1 / 2N of the total memory pool (where N is the # of
* active tasks) before it is forced to spill. This can happen if the number of tasks increase
* but an older task had a lot of memory already.
*/
def tryToAcquire(numBytes: Long, taskAttemptId: Long): Long = synchronized {
def acquireExecutionMemory(numBytes: Long, taskAttemptId: Long): Long = synchronized {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

final def?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

then maybe we should add that "subclasses should override doAcquireExecutionMemory instead" in the java doc.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed.

assert(numBytes > 0, "invalid number of bytes requested: " + numBytes)

// Add this task to the taskMemory map just so we can keep an accurate count of the number
// of active tasks, to let other tasks ramp down their memory in calls to tryToAcquire
if (!taskMemory.contains(taskAttemptId)) {
taskMemory(taskAttemptId) = 0L
if (!memoryConsumptionForTask.contains(taskAttemptId)) {
memoryConsumptionForTask(taskAttemptId) = 0L
// This will later cause waiting tasks to wake up and check numTasks again
notifyAll()
}

// Once the cross-task memory allocation policy has decided to grant more memory to a task,
// this method is called in order to actually obtain that execution memory, potentially
// triggering eviction of storage memory:
def acquire(toGrant: Long): Long = synchronized {
val evictedBlocks = new ArrayBuffer[(BlockId, BlockStatus)]
val acquired = doAcquireExecutionMemory(toGrant, evictedBlocks)
// Register evicted blocks, if any, with the active task metrics
Option(TaskContext.get()).foreach { tc =>
val metrics = tc.taskMetrics()
val lastUpdatedBlocks = metrics.updatedBlocks.getOrElse(Seq[(BlockId, BlockStatus)]())
metrics.updatedBlocks = Some(lastUpdatedBlocks ++ evictedBlocks.toSeq)
}
memoryConsumptionForTask(taskAttemptId) += acquired
acquired
}

// Keep looping until we're either sure that we don't want to grant this request (because this
// task would have more than 1 / numActiveTasks of the memory) or we have enough free
// memory to give it (we always let each task get at least 1 / (2 * numActiveTasks)).
// TODO: simplify this to limit each task to its own slot
while (true) {
val numActiveTasks = taskMemory.keys.size
val curMem = taskMemory(taskAttemptId)
val freeMemory = maxExecutionMemory - taskMemory.values.sum
val numActiveTasks = memoryConsumptionForTask.keys.size
val curMem = memoryConsumptionForTask(taskAttemptId)
val freeMemory = maxExecutionMemory - memoryConsumptionForTask.values.sum

// How much we can grant this task; don't let it grow to more than 1 / numActiveTasks;
// don't let it be negative
Expand All @@ -217,62 +181,95 @@ private[spark] abstract class MemoryManager(conf: SparkConf, numCores: Int = 1)
// (this happens if older tasks allocated lots of memory before N grew)
if (
freeMemory >= math.min(maxToGrant, maxExecutionMemory / (2 * numActiveTasks) - curMem)) {
return acquire(toGrant, taskAttemptId)
return acquire(toGrant)
} else {
logInfo(
s"TID $taskAttemptId waiting for at least 1/2N of shuffle memory pool to be free")
s"TID $taskAttemptId waiting for at least 1/2N of execution memory pool to be free")
wait()
}
} else {
return acquire(toGrant, taskAttemptId)
return acquire(toGrant)
}
}
0L // Never reached
}

/**
* Acquire N bytes of execution memory from the memory manager for the current task.
* @return number of bytes actually acquired (<= N).
*/
private def acquire(numBytes: Long, taskAttemptId: Long): Long = synchronized {
val evictedBlocks = new ArrayBuffer[(BlockId, BlockStatus)]
val acquired = acquireExecutionMemory(numBytes, evictedBlocks)
// Register evicted blocks, if any, with the active task metrics
// TODO: just do this in `acquireExecutionMemory` (SPARK-10985)
Option(TaskContext.get()).foreach { tc =>
val metrics = tc.taskMetrics()
val lastUpdatedBlocks = metrics.updatedBlocks.getOrElse(Seq[(BlockId, BlockStatus)]())
metrics.updatedBlocks = Some(lastUpdatedBlocks ++ evictedBlocks.toSeq)
@VisibleForTesting
private[memory] def releaseExecutionMemory(numBytes: Long): Unit = synchronized {
if (numBytes > _executionMemoryUsed) {
logWarning(s"Attempted to release $numBytes bytes of execution " +
s"memory when we only have ${_executionMemoryUsed} bytes")
_executionMemoryUsed = 0
} else {
_executionMemoryUsed -= numBytes
}
taskMemory(taskAttemptId) += acquired
acquired
}

/** Release numBytes bytes for the current task. */
def release(numBytes: Long, taskAttemptId: Long): Unit = synchronized {
val curMem = taskMemory.getOrElse(taskAttemptId, 0L)
if (curMem < numBytes) {
/**
* Release numBytes of execution memory belonging to the given task.
*/
def releaseExecutionMemory(numBytes: Long, taskAttemptId: Long): Unit = synchronized {
val curMem = memoryConsumptionForTask.getOrElse(taskAttemptId, 0L)
if (curMem < numBytes && taskAttemptId != -1) { // -1 is a dummy id used in some tests
throw new SparkException(
s"Internal error: release called on $numBytes bytes but task only has $curMem")
}
if (taskMemory.contains(taskAttemptId)) {
taskMemory(taskAttemptId) -= numBytes
if (memoryConsumptionForTask.contains(taskAttemptId)) {
memoryConsumptionForTask(taskAttemptId) -= numBytes
releaseExecutionMemory(numBytes)
}
notifyAll() // Notify waiters in tryToAcquire that memory has been freed
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

outdated

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed.

}

/** Release all memory for the current task and mark it as inactive (e.g. when a task ends). */
private[memory] def releaseMemoryForTask(taskAttemptId: Long): Unit = synchronized {
taskMemory.remove(taskAttemptId).foreach { numBytes =>
releaseExecutionMemory(numBytes)
/** Release all memory for the given task and mark it as inactive (e.g. when a task ends). */
private[memory] def releaseAllExecutionMemoryForTask(taskAttemptId: Long): Unit = synchronized {
releaseExecutionMemory(getMemoryConsumptionForTask(taskAttemptId), taskAttemptId)
}

/**
* Release N bytes of storage memory.
*/
def releaseStorageMemory(numBytes: Long): Unit = synchronized {
if (numBytes > _storageMemoryUsed) {
logWarning(s"Attempted to release $numBytes bytes of storage " +
s"memory when we only have ${_storageMemoryUsed} bytes")
_storageMemoryUsed = 0
} else {
_storageMemoryUsed -= numBytes
}
notifyAll() // Notify waiters in tryToAcquire that memory has been freed
}

/**
* Release all storage memory acquired.
*/
def releaseAllStorageMemory(): Unit = synchronized {
_storageMemoryUsed = 0
}

/**
* Release N bytes of unroll memory.
*/
def releaseUnrollMemory(numBytes: Long): Unit = synchronized {
releaseStorageMemory(numBytes)
}

/**
* Execution memory currently in use, in bytes.
*/
final def executionMemoryUsed: Long = synchronized {
_executionMemoryUsed
}

/**
* Storage memory currently in use, in bytes.
*/
final def storageMemoryUsed: Long = synchronized {
_storageMemoryUsed
}

/** Returns the memory consumption, in bytes, for the current task */
private[memory] def getMemoryConsumptionForTask(taskAttemptId: Long): Long = synchronized {
taskMemory.getOrElse(taskAttemptId, 0L)
memoryConsumptionForTask.getOrElse(taskAttemptId, 0L)
}

// -- Methods related to Tungsten managed memory -------------------------------------------------
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fields

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ private[spark] class StaticMemoryManager(
* Acquire N bytes of memory for execution.
* @return number of bytes successfully granted (<= N).
*/
override def acquireExecutionMemory(
override def doAcquireExecutionMemory(
numBytes: Long,
evictedBlocks: mutable.Buffer[(BlockId, BlockStatus)]): Long = synchronized {
assert(numBytes >= 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ private[spark] class UnifiedMemoryManager(
* Blocks evicted in the process, if any, are added to `evictedBlocks`.
* @return number of bytes successfully granted (<= N).
*/
override def acquireExecutionMemory(
private[memory] override def doAcquireExecutionMemory(
numBytes: Long,
evictedBlocks: mutable.Buffer[(BlockId, BlockStatus)]): Long = synchronized {
assert(numBytes >= 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.apache.spark.SparkConf
import org.apache.spark.storage.{BlockStatus, BlockId}

class GrantEverythingMemoryManager(conf: SparkConf) extends MemoryManager(conf) {
override def acquireExecutionMemory(
private[memory] override def doAcquireExecutionMemory(
numBytes: Long,
evictedBlocks: mutable.Buffer[(BlockId, BlockStatus)]): Long = numBytes
override def acquireStorageMemory(
Expand All @@ -34,7 +34,7 @@ class GrantEverythingMemoryManager(conf: SparkConf) extends MemoryManager(conf)
blockId: BlockId,
numBytes: Long,
evictedBlocks: mutable.Buffer[(BlockId, BlockStatus)]): Boolean = true
override def releaseExecutionMemory(numBytes: Long): Unit = { }
override def releaseExecutionMemory(numBytes: Long, taskAttemptId: Long): Unit = { }
override def releaseStorageMemory(numBytes: Long): Unit = { }
override def maxExecutionMemory: Long = Long.MaxValue
override def maxStorageMemory: Long = Long.MaxValue
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,18 @@ class StaticMemoryManagerSuite extends MemoryManagerSuite {
val maxExecutionMem = 1000L
val (mm, _) = makeThings(maxExecutionMem, Long.MaxValue)
assert(mm.executionMemoryUsed === 0L)
assert(mm.acquireExecutionMemory(10L, evictedBlocks) === 10L)
assert(mm.doAcquireExecutionMemory(10L, evictedBlocks) === 10L)
assert(mm.executionMemoryUsed === 10L)
assert(mm.acquireExecutionMemory(100L, evictedBlocks) === 100L)
assert(mm.doAcquireExecutionMemory(100L, evictedBlocks) === 100L)
// Acquire up to the max
assert(mm.acquireExecutionMemory(1000L, evictedBlocks) === 890L)
assert(mm.doAcquireExecutionMemory(1000L, evictedBlocks) === 890L)
assert(mm.executionMemoryUsed === maxExecutionMem)
assert(mm.acquireExecutionMemory(1L, evictedBlocks) === 0L)
assert(mm.doAcquireExecutionMemory(1L, evictedBlocks) === 0L)
assert(mm.executionMemoryUsed === maxExecutionMem)
mm.releaseExecutionMemory(800L)
assert(mm.executionMemoryUsed === 200L)
// Acquire after release
assert(mm.acquireExecutionMemory(1L, evictedBlocks) === 1L)
assert(mm.doAcquireExecutionMemory(1L, evictedBlocks) === 1L)
assert(mm.executionMemoryUsed === 201L)
// Release beyond what was acquired
mm.releaseExecutionMemory(maxExecutionMem)
Expand Down Expand Up @@ -108,10 +108,10 @@ class StaticMemoryManagerSuite extends MemoryManagerSuite {
val dummyBlock = TestBlockId("ain't nobody love like you do")
val (mm, ms) = makeThings(maxExecutionMem, maxStorageMem)
// Only execution memory should increase
assert(mm.acquireExecutionMemory(100L, evictedBlocks) === 100L)
assert(mm.doAcquireExecutionMemory(100L, evictedBlocks) === 100L)
assert(mm.storageMemoryUsed === 0L)
assert(mm.executionMemoryUsed === 100L)
assert(mm.acquireExecutionMemory(1000L, evictedBlocks) === 100L)
assert(mm.doAcquireExecutionMemory(1000L, evictedBlocks) === 100L)
assert(mm.storageMemoryUsed === 0L)
assert(mm.executionMemoryUsed === 200L)
// Only storage memory should increase
Expand Down
Loading