Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
address comments
  • Loading branch information
Davies Liu committed Apr 15, 2016
commit 696aafee07ace0fb8142295e9954bdcd00e29061
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,11 @@ case class Sort(
// Remember spill data size of this task before execute this operator so that we can
// figure out how many bytes we spilled for this operator.
val spillSizeBefore = metrics.memoryBytesSpilled
val beforeSort = System.currentTimeMillis()
val beforeSort = System.nanoTime()

val sortedIterator = sorter.sort(iter.asInstanceOf[Iterator[UnsafeRow]])

sortingTime += System.currentTimeMillis() - beforeSort
sortingTime += (System.nanoTime() - beforeSort) >> 20
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

">> 20"? I think it should be / 1000000.

peakMemory += sorter.getPeakMemoryUsage
spillSize += metrics.memoryBytesSpilled - spillSizeBefore
metrics.incPeakExecutionMemory(sorter.getPeakMemoryUsage)
Expand Down Expand Up @@ -147,13 +147,13 @@ case class Sort(
val beforeSortMs = ctx.freshName("beforeSortMs")
s"""
| if ($needToSort) {
| long $beforeSortMs = System.currentTimeMillis();
| long $beforeSortMs = System.nanoTime();
| long $spillSizeBefore = $metrics.memoryBytesSpilled();
|
| $addToSorter();
| $sortedIterator = $sorterVariable.sort();
|
| $sortingTime.add(System.currentTimeMillis() - $beforeSortMs);
| $sortingTime.add((System.nanoTime() - $beforeSortMs) >> 20);
| $peakMemory.add($sorterVariable.getPeakMemoryUsage());
| $spillSize.add($metrics.memoryBytesSpilled() - $spillSizeBefore);
| $metrics.incPeakExecutionMemory($sorterVariable.getPeakMemoryUsage());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,9 @@ case class TungstenAggregate(
s"""
| while (!$initAgg) {
| $initAgg = true;
| long $beforeAgg = System.currentTimeMillis();
| long $beforeAgg = System.nanoTime();
| $doAgg();
| $aggTime.add(System.currentTimeMillis() - $beforeAgg);
| $aggTime.add((System.nanoTime() - $beforeAgg) >> 20);
|
| // output the result
| ${genResult.trim}
Expand Down Expand Up @@ -544,9 +544,9 @@ case class TungstenAggregate(
s"""
if (!$initAgg) {
$initAgg = true;
long $beforeAgg = System.currentTimeMillis();
long $beforeAgg = System.nanoTime();
$doAgg();
$aggTime.add(System.currentTimeMillis() - $beforeAgg);
$aggTime.add((System.nanoTime() - $beforeAgg) >> 20);
}

// output the result
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,20 +69,20 @@ case class BroadcastExchange(
// This will run in another thread. Set the execution id so that we can connect these jobs
// with the correct execution.
SQLExecution.withExecutionId(sparkContext, executionId) {
val beforeCollect = System.currentTimeMillis()
val beforeCollect = System.nanoTime()
// Note that we use .executeCollect() because we don't want to convert data to Scala types
val input: Array[InternalRow] = child.executeCollect()
val beforeBuild = System.currentTimeMillis()
longMetric("collectTime") += beforeBuild - beforeCollect
val beforeBuild = System.nanoTime()
longMetric("collectTime") += (beforeBuild - beforeCollect) >> 20
longMetric("dataSize") += input.map(_.asInstanceOf[UnsafeRow].getSizeInBytes.toLong).sum

// Construct and broadcast the relation.
val relation = mode.transform(input)
val beforeBroadcast = System.currentTimeMillis()
longMetric("buildTime") += beforeBroadcast - beforeBuild
val beforeBroadcast = System.nanoTime()
longMetric("buildTime") += (beforeBroadcast - beforeBuild) >> 20

val broadcasted = sparkContext.broadcast(relation)
longMetric("broadcastTime") += System.currentTimeMillis() - beforeBroadcast
longMetric("broadcastTime") += (System.nanoTime() - beforeBroadcast) >> 20
broadcasted
}
}(BroadcastExchange.executionContext)
Expand Down