diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala index 09c15473b21c..e5c9df05d567 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala @@ -20,6 +20,7 @@ package org.apache.spark.sql.hive import org.apache.spark.sql.{QueryTest, Row} import org.apache.spark.sql.execution.datasources.parquet.ParquetTest import org.apache.spark.sql.hive.test.TestHiveSingleton +import org.apache.spark.sql.internal.SQLConf case class Cases(lower: String, UPPER: String) @@ -76,4 +77,19 @@ class HiveParquetSuite extends QueryTest with ParquetTest with TestHiveSingleton } } } + + test("SPARK-25206: wrong records are returned by filter pushdown " + + "when Hive metastore schema and parquet schema are in different letter cases") { + withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> true.toString) { + withTempPath { path => + val data = spark.range(1, 10).toDF("id") + data.write.parquet(path.getCanonicalPath) + withTable("SPARK_25206") { + sql("CREATE TABLE SPARK_25206 (ID LONG) USING parquet LOCATION " + + s"'${path.getCanonicalPath}'") + checkAnswer(sql("select id from SPARK_25206 where id > 0"), data) + } + } + } + } }