Skip to content

Commit 536346e

Browse files
committed
Add test case to test hive table
1 parent 46bfcc6 commit 536346e

File tree

1 file changed

+39
-23
lines changed
  • sql/core/src/test/scala/org/apache/spark/sql/execution/command

1 file changed

+39
-23
lines changed

sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala

Lines changed: 39 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -2249,48 +2249,64 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
22492249
}
22502250
}
22512251

2252-
test("Datasource partition table should load empty partitions") {
2252+
test("Partition table should load empty static partitions") {
22532253
// All static partitions
2254-
withTable("t", "t1") {
2254+
withTable("t", "t1", "t2") {
22552255
withTempPath { dir =>
22562256
spark.sql("CREATE TABLE t(a int) USING parquet")
22572257
spark.sql("CREATE TABLE t1(a int, c string, b string) " +
22582258
s"USING parquet PARTITIONED BY(c, b) LOCATION '${dir.toURI}'")
22592259

2260-
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
2261-
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
2262-
2263-
assert(spark.sql("SHOW PARTITIONS t1").count() == 0)
2264-
2265-
spark.sql("INSERT INTO TABLE t1 PARTITION(b='b', c='c') SELECT * FROM t WHERE 1 = 0")
2260+
// datasource table
2261+
validateStaticPartitionTable("t1")
22662262

2267-
assert(spark.sql("SHOW PARTITIONS t1").count() == 1)
2268-
2269-
assert(new File(dir, "c=c/b=b").exists())
2263+
// hive table
2264+
if (isUsingHiveMetastore) {
2265+
spark.sql("CREATE TABLE t2(a int) " +
2266+
s"PARTITIONED BY(c string, b string) LOCATION '${dir.toURI}'")
2267+
validateStaticPartitionTable("t2")
2268+
}
22702269

2271-
checkAnswer(spark.table("t1"), Nil)
2270+
def validateStaticPartitionTable(tableName: String): Unit = {
2271+
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tableName))
2272+
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
2273+
assert(spark.sql(s"SHOW PARTITIONS $tableName").count() == 0)
2274+
spark.sql(
2275+
s"INSERT INTO TABLE $tableName PARTITION(b='b', c='c') SELECT * FROM t WHERE 1 = 0")
2276+
assert(spark.sql(s"SHOW PARTITIONS $tableName").count() == 1)
2277+
assert(new File(dir, "c=c/b=b").exists())
2278+
checkAnswer(spark.table(tableName), Nil)
2279+
}
22722280
}
22732281
}
22742282

22752283
// Partial dynamic partitions
2276-
withTable("t", "t1") {
2284+
withTable("t", "t1", "t2") {
22772285
withTempPath { dir =>
22782286
spark.sql("CREATE TABLE t(a int) USING parquet")
22792287
spark.sql("CREATE TABLE t1(a int, b string, c string) " +
22802288
s"USING parquet PARTITIONED BY(c, b) LOCATION '${dir.toURI}'")
22812289

2282-
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
2283-
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
2284-
2285-
assert(spark.sql("SHOW PARTITIONS t1").count() == 0)
2286-
2287-
spark.sql("INSERT INTO TABLE t1 PARTITION(c='c', b) SELECT *, 'b' FROM t WHERE 1 = 0")
2290+
// datasource table
2291+
validatePartialStaticPartitionTable("t1")
22882292

2289-
assert(spark.sql("SHOW PARTITIONS t1").count() == 0)
2290-
2291-
assert(!new File(dir, "c=c/b=b").exists())
2293+
// hive table
2294+
if (isUsingHiveMetastore) {
2295+
spark.sql("CREATE TABLE t2(a int) " +
2296+
s"PARTITIONED BY(c string, b string) LOCATION '${dir.toURI}'")
2297+
validatePartialStaticPartitionTable("t2")
2298+
}
22922299

2293-
checkAnswer(spark.table("t1"), Nil)
2300+
def validatePartialStaticPartitionTable(tableName: String): Unit = {
2301+
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tableName))
2302+
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
2303+
assert(spark.sql(s"SHOW PARTITIONS $tableName").count() == 0)
2304+
spark.sql(
2305+
s"INSERT INTO TABLE $tableName PARTITION(c='c', b) SELECT *, 'b' FROM t WHERE 1 = 0")
2306+
assert(spark.sql(s"SHOW PARTITIONS $tableName").count() == 0)
2307+
assert(!new File(dir, "c=c/b=b").exists())
2308+
checkAnswer(spark.table(tableName), Nil)
2309+
}
22942310
}
22952311
}
22962312
}

0 commit comments

Comments
 (0)