Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Manually bind references
  • Loading branch information
JoshRosen committed Jun 18, 2015
commit c60a44d4c9b59a613764484dedbe1cbbc514ad9b
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@

package org.apache.spark.sql.execution

import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions.{Ascending, SortOrder}
import org.apache.spark.sql.catalyst.expressions.{BoundReference, Ascending, SortOrder}
import org.apache.spark.sql.types.{IntegerType, StringType}

class SortSuite extends SparkPlanTest {

Expand All @@ -31,8 +31,8 @@ class SortSuite extends SparkPlanTest {
)

val sortOrder = Seq(
SortOrder('_1, Ascending),
SortOrder('_2, Ascending)
SortOrder(BoundReference(0, StringType, nullable = false), Ascending),
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's a little annoying to have to manually bind these references. It would be nice if there was some sort of rewrite that I could use that would bind references like '_1 to the proper columns.

SortOrder(BoundReference(1, IntegerType, nullable = false), Ascending)
)

checkAnswer(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class SparkPlanTest extends SparkFunSuite {
planFunction: SparkPlan => SparkPlan,
expectedAnswer: Seq[A]): Unit = {
val inputDf = TestSQLContext.createDataFrame(input)
val expectedRows = expectedAnswer.map(t => Row.apply(t))
val expectedRows = expectedAnswer.map(Row.fromTuple)
SparkPlanTest.checkAnswer(inputDf, planFunction, expectedRows) match {
case Some(errorMessage) => fail(errorMessage)
case None =>
Expand Down