diff --git a/mllib/src/test/scala/org/apache/spark/ml/fpm/FPGrowthSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/fpm/FPGrowthSuite.scala index 87f8b9034dde..ee5e5a721df7 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/fpm/FPGrowthSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/fpm/FPGrowthSuite.scala @@ -83,7 +83,7 @@ class FPGrowthSuite extends SparkFunSuite with MLlibTestSparkContext with Defaul )).toDF("id", "items") val model = new FPGrowth().setMinSupport(0.7).fit(dataset) val prediction = model.transform(df) - assert(prediction.select("prediction").where("id=3").first().getSeq[String](0).isEmpty) + assert(prediction.where("id=3").select("prediction").first().getSeq[String](0).isEmpty) } test("FPGrowth prediction should not contain duplicates") { diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index 72e7d5dd3638..7c21f6e9a37a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -1023,8 +1023,6 @@ class Analyzer( * clause. This rule detects such queries and adds the required attributes to the original * projection, so that they will be available during sorting. Another projection is added to * remove these attributes after sorting. - * - * The HAVING clause could also used a grouping columns that is not presented in the SELECT. */ object ResolveMissingReferences extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { @@ -1051,26 +1049,6 @@ class Analyzer( // in Sort case ae: AnalysisException => s } - - case f @ Filter(cond, child) if child.resolved => - try { - val newCond = resolveExpressionRecursively(cond, child) - val requiredAttrs = newCond.references.filter(_.resolved) - val missingAttrs = requiredAttrs -- child.outputSet - if (missingAttrs.nonEmpty) { - // Add missing attributes and then project them away. - Project(child.output, - Filter(newCond, addMissingAttr(child, missingAttrs))) - } else if (newCond != cond) { - f.copy(condition = newCond) - } else { - f - } - } catch { - // Attempting to resolve it might fail. When this happens, return the original plan. - // Users will see an AnalysisException for resolution failure of missing attributes - case ae: AnalysisException => f - } } /** diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index ef0de6f6f4ff..ac54565bca95 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -1844,4 +1844,20 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { .filter($"x1".isNotNull || !$"y".isin("a!")) .count } + + test("Unresolvable attribute in Filter should throw analysis exception") { + val df = Seq((1, "a"), (2, "b"), (3, "c")).toDF("x", "y") + val e1 = intercept[AnalysisException](df.select("y").where("x=1")) + assert(e1.message.contains("cannot resolve '`x`'")) + + Seq(1).toDF("c1").createOrReplaceTempView("onerow") + val e2 = intercept[AnalysisException] { + sql( + """ + | select 1 + | from (select 1 from onerow t2 LIMIT 1) + | where t2.c1=1""".stripMargin) + } + assert(e2.message.contains("cannot resolve '`t2.c1`'")) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala index 131abf7c1e5d..60f59de8aa59 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala @@ -655,7 +655,7 @@ class SubquerySuite extends QueryTest with SharedSQLContext { """ | select c1 from onerow t1 | where exists (select 1 - | from (select 1 from onerow t2 LIMIT 1) + | from (select 1 as c1 from onerow t2 LIMIT 1) t2 | where t1.c1=t2.c1)""".stripMargin), Row(1) :: Nil) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala index 939fd71b4f1e..167f31f74e2c 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala @@ -59,8 +59,8 @@ class HiveContextCompatibilitySuite extends SparkFunSuite with BeforeAndAfterEac import _hc.implicits._ val df1 = (1 to 20).map { i => (i, i) }.toDF("a", "x") val df2 = (1 to 100).map { i => (i, i % 10, i % 2 == 0) }.toDF("a", "b", "c") - .select($"a", $"b") .filter($"a" > 10 && $"b" > 6 && $"c") + .select($"a", $"b") val df3 = df1.join(df2, "a") val res = df3.collect() val expected = Seq((18, 18, 8)).toDF("a", "x", "b").collect()