Skip to content

Commit c7ff62c

Browse files
committed
[SPARK-21786][SQL] The 'spark.sql.parquet.compression.codec' configuration doesn't take effect on tables with partition field(s)
Fix test problem
1 parent 732266c commit c7ff62c

File tree

1 file changed

+35
-33
lines changed

1 file changed

+35
-33
lines changed

sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala

Lines changed: 35 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1438,39 +1438,41 @@ class HiveDDLSuite
14381438
}
14391439

14401440
test("create hive serde table with new syntax") {
1441-
withTable("t", "t2", "t3") {
1442-
withTempPath { path =>
1443-
sql(
1444-
s"""
1445-
|CREATE TABLE t(id int) USING hive
1446-
|OPTIONS(fileFormat 'orc', compression 'Zlib')
1447-
|LOCATION '${path.toURI}'
1448-
""".stripMargin)
1449-
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
1450-
assert(DDLUtils.isHiveTable(table))
1451-
assert(table.storage.serde == Some("org.apache.hadoop.hive.ql.io.orc.OrcSerde"))
1452-
assert(table.storage.properties.get("compression") == Some("Zlib"))
1453-
assert(spark.table("t").collect().isEmpty)
1454-
1455-
sql("INSERT INTO t SELECT 1")
1456-
checkAnswer(spark.table("t"), Row(1))
1457-
// Check if this is compressed as ZLIB.
1458-
val maybeOrcFile = path.listFiles().find(!_.getName.endsWith(".crc"))
1459-
assert(maybeOrcFile.isDefined)
1460-
val orcFilePath = maybeOrcFile.get.toPath.toString
1461-
val expectedCompressionKind =
1462-
OrcFileOperator.getFileReader(orcFilePath).get.getCompression
1463-
assert("ZLIB" === expectedCompressionKind.name())
1464-
1465-
sql("CREATE TABLE t2 USING HIVE AS SELECT 1 AS c1, 'a' AS c2")
1466-
val table2 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t2"))
1467-
assert(DDLUtils.isHiveTable(table2))
1468-
assert(table2.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
1469-
checkAnswer(spark.table("t2"), Row(1, "a"))
1470-
1471-
sql("CREATE TABLE t3(a int, p int) USING hive PARTITIONED BY (p)")
1472-
sql("INSERT INTO t3 PARTITION(p=1) SELECT 0")
1473-
checkAnswer(spark.table("t3"), Row(0, 1))
1441+
withSQLConf("spark.sql.orc.compression.codec" -> "zlib") {
1442+
withTable("t", "t2", "t3") {
1443+
withTempPath { path =>
1444+
sql(
1445+
s"""
1446+
|CREATE TABLE t(id int) USING hive
1447+
|OPTIONS(fileFormat 'orc', compression 'Zlib')
1448+
|LOCATION '${path.toURI}'
1449+
""".stripMargin)
1450+
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
1451+
assert(DDLUtils.isHiveTable(table))
1452+
assert(table.storage.serde == Some("org.apache.hadoop.hive.ql.io.orc.OrcSerde"))
1453+
assert(table.storage.properties.get("compression") == Some("Zlib"))
1454+
assert(spark.table("t").collect().isEmpty)
1455+
1456+
sql("INSERT INTO t SELECT 1")
1457+
checkAnswer(spark.table("t"), Row(1))
1458+
// Check if this is compressed as ZLIB.
1459+
val maybeOrcFile = path.listFiles().find(!_.getName.endsWith(".crc"))
1460+
assert(maybeOrcFile.isDefined)
1461+
val orcFilePath = maybeOrcFile.get.toPath.toString
1462+
val expectedCompressionKind =
1463+
OrcFileOperator.getFileReader(orcFilePath).get.getCompression
1464+
assert("ZLIB" === expectedCompressionKind.name())
1465+
1466+
sql("CREATE TABLE t2 USING HIVE AS SELECT 1 AS c1, 'a' AS c2")
1467+
val table2 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t2"))
1468+
assert(DDLUtils.isHiveTable(table2))
1469+
assert(table2.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
1470+
checkAnswer(spark.table("t2"), Row(1, "a"))
1471+
1472+
sql("CREATE TABLE t3(a int, p int) USING hive PARTITIONED BY (p)")
1473+
sql("INSERT INTO t3 PARTITION(p=1) SELECT 0")
1474+
checkAnswer(spark.table("t3"), Row(0, 1))
1475+
}
14741476
}
14751477
}
14761478
}

0 commit comments

Comments
 (0)