diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index a43d28b045d0..23fe2268fbb5 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -482,7 +482,11 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with SQLConfHelper with Logg // Before calling `toMap`, we check duplicated keys to avoid silently ignore partition values // in partition spec like PARTITION(a='1', b='2', a='3'). The real semantical check for // partition columns will be done in analyzer. - checkDuplicateKeys(parts.toSeq, ctx) + if (conf.caseSensitiveAnalysis) { + checkDuplicateKeys(parts.toSeq, ctx) + } else { + checkDuplicateKeys(parts.map(kv => kv._1.toLowerCase(Locale.ROOT) -> kv._2).toSeq, ctx) + } parts.toMap } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala index c7446c7a9f44..67c5f12dc71d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala @@ -208,6 +208,28 @@ trait SQLInsertTestSuite extends QueryTest with SQLTestUtils { checkAnswer(spark.table("t"), Row("1", null)) } } + + test("SPARK-34556: " + + "checking duplicate static partition columns should respect case sensitive conf") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c string) USING PARQUET PARTITIONED BY (c)") + val e = intercept[AnalysisException] { + sql("INSERT OVERWRITE t PARTITION (c='2', C='3') VALUES (1)") + } + assert(e.getMessage.contains("Found duplicate keys 'c'")) + } + // The following code is skipped for Hive because columns stored in Hive Metastore is always + // case insensitive and we cannot create such table in Hive Metastore. + if (!format.startsWith("hive")) { + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + withTable("t") { + sql(s"CREATE TABLE t(i int, c string, C string) USING PARQUET PARTITIONED BY (c, C)") + sql("INSERT OVERWRITE t PARTITION (c='2', C='3') VALUES (1)") + checkAnswer(spark.table("t"), Row(1, "2", "3")) + } + } + } + } } class FileSourceSQLInsertTestSuite extends SQLInsertTestSuite with SharedSparkSession {