Skip to content
Closed
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -73,16 +73,6 @@ public void append(InternalRow row) {
currentRows.add(row);
}

/**
* Returns whether this iterator should stop fetching next row from [[CodegenSupport#inputRDDs]].
*
* If it returns true, the caller should exit the loop that [[InputAdapter]] generates.
* This interface is mainly used to limit the number of input rows.
*/
public boolean stopEarly() {
return false;
}

/**
* Returns whether `processNext()` should stop processing next row from `input` or not.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ private[sql] trait ColumnarBatchScan extends CodegenSupport {
|if ($batch == null) {
| $nextBatchFuncName();
|}
|while ($batch != null) {
|while ($limitNotReachedCond $batch != null) {
| int $numRows = $batch.numRows();
| int $localEnd = $numRows - $idx;
| for (int $localIdx = 0; $localIdx < $localEnd; $localIdx++) {
Expand Down Expand Up @@ -166,7 +166,7 @@ private[sql] trait ColumnarBatchScan extends CodegenSupport {
}
val inputRow = if (needsUnsafeRowConversion) null else row
s"""
|while ($input.hasNext()) {
|while ($limitNotReachedCond $input.hasNext()) {
| InternalRow $row = (InternalRow) $input.next();
| $numOutputRows.add(1);
| ${consume(ctx, outputVars, inputRow).trim}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ case class SortExec(
global: Boolean,
child: SparkPlan,
testSpillFrequency: Int = 0)
extends UnaryExecNode with CodegenSupport {
extends UnaryExecNode with BlockingOperatorWithCodegen {

override def output: Seq[Attribute] = child.output

Expand Down Expand Up @@ -124,14 +124,6 @@ case class SortExec(
// Name of sorter variable used in codegen.
private var sorterVariable: String = _

// The result rows come from the sort buffer, so this operator doesn't need to copy its result
// even if its child does.
override def needCopyResult: Boolean = false

// Sort operator always consumes all the input rows before outputting any result, so we don't need
// a stop check before sorting.
override def needStopCheck: Boolean = false

override protected def doProduce(ctx: CodegenContext): String = {
val needToSort =
ctx.addMutableState(CodeGenerator.JAVA_BOOLEAN, "needToSort", v => s"$v = true;")
Expand Down Expand Up @@ -172,7 +164,7 @@ case class SortExec(
| $needToSort = false;
| }
|
| while ($sortedIterator.hasNext()) {
| while ($limitNotReachedCond $sortedIterator.hasNext()) {
| UnsafeRow $outputRow = (UnsafeRow)$sortedIterator.next();
| ${consume(ctx, null, outputRow)}
| if (shouldStop()) return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,61 @@ trait CodegenSupport extends SparkPlan {
* don't require shouldStop() in the loop of producing rows.
*/
def needStopCheck: Boolean = parent.needStopCheck

/**
* A sequence of checks which evaluate to true if the downstream Limit operators have not received
* enough records and reached the limit. If current node is a data producing node, it can leverage
* this information to stop producing data and complete the data flow earlier. Common data
* producing nodes are leaf nodes like Range and Scan, and blocking nodes like Sort and Aggregate.
* These checks should be put into the loop condition of the data producing loop.
*/
def limitNotReachedChecks: Seq[String] = parent.limitNotReachedChecks

/**
* A helper method to generate the data producing loop condition according to the
* limit-not-reached checks.
*/
final def limitNotReachedCond: String = {
// InputAdapter is also a leaf node.
val isLeafNode = children.isEmpty || this.isInstanceOf[InputAdapter]
if (!isLeafNode && !this.isInstanceOf[BlockingOperatorWithCodegen]) {
val errMsg = "Only leaf nodes and blocking nodes need to call 'limitNotReachedCond' " +
"in its data producing loop."
if (Utils.isTesting) {
throw new IllegalStateException(errMsg)
} else {
logWarning(s"[BUG] $errMsg Please open a JIRA ticket to report it.")
}
}
if (parent.limitNotReachedChecks.isEmpty) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just one thought: since we propagate (correctly) the limitNotReachedChecks to all the children, shall we also enforce that we are calling this on a node which will not propagate the limitNotReachedChecks anymore? We may use the blocking flag proposed in the other comment maybe.

The reason I'd like to do this is to enforce that we are not introducing the same limit condition check more than once, in more than one operator, which would be useless and may cause (small) perf issue. WDYT?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's not very useful to enforce that. The consequence is so minor and I don't think it's worth the complexity. I want to have a simple and robust framework for the limit optimization first.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I want to have a simple and robust framework

yes, I 100%, that's why I'd like to early detect all the possible situations which we are not thinking as possible but may happen in corner cases we are not considering. What I am suggesting here is to enforce and fail that for testing only of course, in production we shouldn't do anything similar.

""
} else {
parent.limitNotReachedChecks.mkString("", " && ", " &&")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I am a bit affraid about 64KB Java bytecode overflow by using mkString. On the other hand, I understand that this condition generation is performance sensitive.

}
}
}

/**
* A special kind of operators which support whole stage codegen. Blocking means these operators
* will consume all the inputs first, before producing output. Typical blocking operators are
* sort and aggregate.
*/
trait BlockingOperatorWithCodegen extends CodegenSupport {

// Blocking operators usually have some kind of buffer to keep the data before producing them, so
// then don't to copy its result even if its child does.
override def needCopyResult: Boolean = false

// Blocking operators always consume all the input first, so its upstream operators don't need a
// stop check.
override def needStopCheck: Boolean = false

// Blocking operators need to consume all the inputs before producing any output. This means,
// Limit operator after this blocking operator will never reach its limit during the execution of
// this blocking operator's upstream operators. Here we override this method to return Nil, so
// that upstream operators will not generate useless conditions (which are always evaluated to
// false) for the Limit operators after this blocking operator.
override def limitNotReachedChecks: Seq[String] = Nil
}


Expand Down Expand Up @@ -381,7 +436,7 @@ case class InputAdapter(child: SparkPlan) extends UnaryExecNode with CodegenSupp
forceInline = true)
val row = ctx.freshName("row")
s"""
| while ($input.hasNext() && !stopEarly()) {
| while ($limitNotReachedCond $input.hasNext()) {
| InternalRow $row = (InternalRow) $input.next();
| ${consume(ctx, null, row).trim}
| if (shouldStop()) return;
Expand Down Expand Up @@ -677,6 +732,8 @@ case class WholeStageCodegenExec(child: SparkPlan)(val codegenStageId: Int)

override def needStopCheck: Boolean = true

override def limitNotReachedChecks: Seq[String] = Nil

override protected def otherCopyArgs: Seq[AnyRef] = Seq(codegenStageId.asInstanceOf[Integer])
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ case class HashAggregateExec(
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
child: SparkPlan)
extends UnaryExecNode with CodegenSupport {
extends UnaryExecNode with BlockingOperatorWithCodegen {

private[this] val aggregateBufferAttributes = {
aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes)
Expand Down Expand Up @@ -151,14 +151,6 @@ case class HashAggregateExec(
child.asInstanceOf[CodegenSupport].inputRDDs()
}

// The result rows come from the aggregate buffer, or a single row(no grouping keys), so this
// operator doesn't need to copy its result even if its child does.
override def needCopyResult: Boolean = false

// Aggregate operator always consumes all the input rows before outputting any result, so we
// don't need a stop check before aggregating.
override def needStopCheck: Boolean = false

protected override def doProduce(ctx: CodegenContext): String = {
if (groupingExpressions.isEmpty) {
doProduceWithoutKeys(ctx)
Expand Down Expand Up @@ -705,13 +697,16 @@ case class HashAggregateExec(

def outputFromRegularHashMap: String = {
s"""
|while ($iterTerm.next()) {
|while ($limitNotReachedCond $iterTerm.next()) {
| UnsafeRow $keyTerm = (UnsafeRow) $iterTerm.getKey();
| UnsafeRow $bufferTerm = (UnsafeRow) $iterTerm.getValue();
| $outputFunc($keyTerm, $bufferTerm);
|
| if (shouldStop()) return;
|}
|$iterTerm.close();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is an unrelated change, right? It changes nothing in the generated code, right? just want to double-check I am not missing something (what changes is that before we were not doing the cleanup in case of limit operator, instead now we do, I see this).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes it's unrelated and is a noop. outputFromRowBasedMap and outputFromVectorizedMap put the resource closing at the end, I want to be consistent here.

|if ($sorterTerm == null) {
| $hashMapTerm.free();
|}
""".stripMargin
}

Expand All @@ -728,11 +723,6 @@ case class HashAggregateExec(
// output the result
$outputFromFastHashMap
$outputFromRegularHashMap

$iterTerm.close();
if ($sorterTerm == null) {
$hashMapTerm.free();
}
"""
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ case class RangeExec(range: org.apache.spark.sql.catalyst.plans.logical.Range)
val numOutput = metricTerm(ctx, "numOutputRows")

val initTerm = ctx.addMutableState(CodeGenerator.JAVA_BOOLEAN, "initRange")
val number = ctx.addMutableState(CodeGenerator.JAVA_LONG, "number")
val nextIndex = ctx.addMutableState(CodeGenerator.JAVA_LONG, "nextIndex")

val value = ctx.freshName("value")
val ev = ExprCode.forNonNullValue(JavaCode.variable(value, LongType))
Expand All @@ -397,7 +397,7 @@ case class RangeExec(range: org.apache.spark.sql.catalyst.plans.logical.Range)
// within a batch, while the code in the outer loop is setting batch parameters and updating
// the metrics.

// Once number == batchEnd, it's time to progress to the next batch.
// Once nextIndex == batchEnd, it's time to progress to the next batch.
val batchEnd = ctx.addMutableState(CodeGenerator.JAVA_LONG, "batchEnd")

// How many values should still be generated by this range operator.
Expand All @@ -421,13 +421,13 @@ case class RangeExec(range: org.apache.spark.sql.catalyst.plans.logical.Range)
|
| $BigInt st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
| if (st.compareTo($BigInt.valueOf(Long.MAX_VALUE)) > 0) {
| $number = Long.MAX_VALUE;
| $nextIndex = Long.MAX_VALUE;
| } else if (st.compareTo($BigInt.valueOf(Long.MIN_VALUE)) < 0) {
| $number = Long.MIN_VALUE;
| $nextIndex = Long.MIN_VALUE;
| } else {
| $number = st.longValue();
| $nextIndex = st.longValue();
| }
| $batchEnd = $number;
| $batchEnd = $nextIndex;
|
| $BigInt end = index.add($BigInt.ONE).multiply(numElement).divide(numSlice)
| .multiply(step).add(start);
Expand All @@ -440,7 +440,7 @@ case class RangeExec(range: org.apache.spark.sql.catalyst.plans.logical.Range)
| }
|
| $BigInt startToEnd = $BigInt.valueOf(partitionEnd).subtract(
| $BigInt.valueOf($number));
| $BigInt.valueOf($nextIndex));
| $numElementsTodo = startToEnd.divide(step).longValue();
| if ($numElementsTodo < 0) {
| $numElementsTodo = 0;
Expand All @@ -452,46 +452,73 @@ case class RangeExec(range: org.apache.spark.sql.catalyst.plans.logical.Range)

val localIdx = ctx.freshName("localIdx")
val localEnd = ctx.freshName("localEnd")
val range = ctx.freshName("range")
val shouldStop = if (parent.needStopCheck) {
s"if (shouldStop()) { $number = $value + ${step}L; return; }"
s"if (shouldStop()) { $nextIndex = $value + ${step}L; return; }"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in this case we are not very accurate in the metrics right? I mean we always say that we are returning a full batch, even though we have consumed less rows than a batch.

What about updating the metrics before returning? Something like $inputMetrics.incRecordsRead($localIdx - $localEnd);?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You are right about the problem, but I'm not going to touch this part in this PR. Note that this PR focuses on limit whole stage codegen.

Personally I feel it's ok to make the metrics a little inaccurate for better performance, we can discuss it later in other PRs.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

BTW I do have a local branch that fixed this problem, I just don't have time to benchmark it yet. I'll send it out later and let's move the discussion there.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure why you need a benchmark for this (unless you did something different from what I have suggested in the earlier comment). In that case it is a single metric update which happens only when stopping, it shouldn't introduce any significant overhead. Am I missing something? Anyway let's move the discussion to the next PR then, thanks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Something like $inputMetrics.incRecordsRead($localIdx - $localEnd);?

localIdx is purely local to the loop, if we access it outside of the loop, we need to define localIdx outside of loop as well. This may have some performance penalty. cc @kiszk

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but shouldStop is called local to the loop, isn't it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

shouldStop is called local, but metrics updating is not.

Anyway, JVM JIT is mysterious and we need to be super careful when updating this kind of hot loops. That said, I'm not confident of any changes to the hot loop without a benchmark.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok, let's get back to this eventually later, this is anyway not worse than before.

Copy link
Member

@kiszk kiszk Oct 8, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for late comment. It would be good to discuss detail in another PR.

At first, I agree with necessary of benchmarking. Here are my thoughts.

  1. I think that localIdx can be defined as local variable outside of the loop. Or, how about storing localIdx to another local variable only if parent.needStopCheck is true.
  2. Since shouldStop() is simply without updating, we expect the JIT applies inlining and some optimizations.
  3. If we want to call incRecordRead, it would be good to exit a loop using break and then call incRecordRead.

} else {
"// shouldStop check is eliminated"
}
val loopCondition = if (limitNotReachedChecks.isEmpty) {
"true"
} else {
limitNotReachedChecks.mkString(" && ")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I am a bit affraid about 64KB Java bytecode overflow by using mkString. On the other hand, I understand that this condition generation is performance sensitive.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is whole-stage-codege. If bytecode overfolow happens, we will fallback

}

// An overview of the Range processing.
//
// For each partition, the Range task needs to produce records from partition start(inclusive)
// to end(exclusive). For better performance, we separate the partition range into batches, and
// use 2 loops to produce data. The outer while loop is used to iterate batches, and the inner
// for loop is used to iterate records inside a batch.
//
// `nextIndex` tracks the index of the next record that is going to be consumed, initialized
// with partition start. `batchEnd` tracks the end index of the current batch, initialized
// with `nextIndex`. In the outer loop, we first check if `nextIndex == batchEnd`. If it's true,
// it means the current batch is fully consumed, and we will update `batchEnd` to process the
// next batch. If `batchEnd` reaches partition end, exit the outer loop. Finally we enter the
// inner loop. Note that, when we enter inner loop, `nextIndex` must be different from
// `batchEnd`, otherwise we already exit the outer loop.
//
// The inner loop iterates from 0 to `localEnd`, which is calculated by
// `(batchEnd - nextIndex) / step`. Since `batchEnd` is increased by `nextBatchTodo * step` in
// the outer loop, and initialized with `nextIndex`, so `batchEnd - nextIndex` is always
// divisible by `step`. The `nextIndex` is increased by `step` during each iteration, and ends
// up being equal to `batchEnd` when the inner loop finishes.
//
// The inner loop can be interrupted, if the query has produced at least one result row, so that
// we don't buffer too many result rows and waste memory. It's ok to interrupt the inner loop,
// because `nextIndex` will be updated before interrupting.

s"""
| // initialize Range
| if (!$initTerm) {
| $initTerm = true;
| $initRangeFuncName(partitionIndex);
| }
|
| while (true) {
| long $range = $batchEnd - $number;
| if ($range != 0L) {
| int $localEnd = (int)($range / ${step}L);
| for (int $localIdx = 0; $localIdx < $localEnd; $localIdx++) {
| long $value = ((long)$localIdx * ${step}L) + $number;
| ${consume(ctx, Seq(ev))}
| $shouldStop
| while ($loopCondition) {
| if ($nextIndex == $batchEnd) {
| long $nextBatchTodo;
| if ($numElementsTodo > ${batchSize}L) {
| $nextBatchTodo = ${batchSize}L;
| $numElementsTodo -= ${batchSize}L;
| } else {
| $nextBatchTodo = $numElementsTodo;
| $numElementsTodo = 0;
| if ($nextBatchTodo == 0) break;
| }
| $number = $batchEnd;
| $numOutput.add($nextBatchTodo);
| $inputMetrics.incRecordsRead($nextBatchTodo);
| $batchEnd += $nextBatchTodo * ${step}L;
| }
|
| $taskContext.killTaskIfInterrupted();
|
| long $nextBatchTodo;
| if ($numElementsTodo > ${batchSize}L) {
| $nextBatchTodo = ${batchSize}L;
| $numElementsTodo -= ${batchSize}L;
| } else {
| $nextBatchTodo = $numElementsTodo;
| $numElementsTodo = 0;
| if ($nextBatchTodo == 0) break;
| int $localEnd = (int)(($batchEnd - $nextIndex) / ${step}L);
Copy link
Contributor Author

@cloud-fan cloud-fan Oct 4, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The change here simply moves the inner loop after the batchEnd and metrics update, so that we can get correct metrics when we stop earlier because of limit.

| for (int $localIdx = 0; $localIdx < $localEnd; $localIdx++) {
| long $value = ((long)$localIdx * ${step}L) + $nextIndex;
| ${consume(ctx, Seq(ev))}
| $shouldStop
| }
| $numOutput.add($nextBatchTodo);
| $inputMetrics.incRecordsRead($nextBatchTodo);
|
| $batchEnd += $nextBatchTodo * ${step}L;
| $nextIndex = $batchEnd;
| $taskContext.killTaskIfInterrupted();
| }
""".stripMargin
}
Expand Down
Loading