diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index b89c9c148e6fb..67d1474fc1ac7 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -500,7 +500,11 @@ class AstBuilder(conf: SQLConf) extends SqlBaseBaseVisitor[AnyRef] with Logging // Before calling `toMap`, we check duplicated keys to avoid silently ignore partition values // in partition spec like PARTITION(a='1', b='2', a='3'). The real semantical check for // partition columns will be done in analyzer. - checkDuplicateKeys(parts, ctx) + if (conf.caseSensitiveAnalysis) { + checkDuplicateKeys(parts, ctx) + } else { + checkDuplicateKeys(parts.map(kv => kv._1.toLowerCase(Locale.ROOT) -> kv._2), ctx) + } parts.toMap } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala index c6a533dfae4d0..5d7a3b5d371b2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala @@ -353,6 +353,17 @@ class DDLParserSuite extends AnalysisTest with SharedSparkSession { assert(e.contains("Found duplicate keys 'key1'")) } + test("SPARK-34556: duplicate keys in partition spec") { + val e = intercept[ParseException] { + parser.parsePlan("INSERT OVERWRITE t PARTITION (c='2', C='3') VALUES (1)") + }.getMessage + assert(e.contains("Found duplicate keys 'c'")) + val conf = new SQLConf() + conf.setConf(SQLConf.CASE_SENSITIVE, true) + val caseSensitiveParser = new SparkSqlParser(conf) + caseSensitiveParser.parsePlan("INSERT OVERWRITE t PARTITION (c='2', C='3') VALUES (1)") + } + test("duplicate columns in partition specs") { val e = intercept[ParseException] { parser.parsePlan(