Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -93,12 +93,16 @@ object CatalogStorageFormat {
* @param spec partition spec values indexed by column name
* @param storage storage format of the partition
* @param parameters some parameters for the partition
* @param createTime creation time of the partition, in milliseconds
* @param lastAccessTime last access time, in milliseconds
* @param stats optional statistics (number of rows, total size, etc.)
*/
case class CatalogTablePartition(
spec: CatalogTypes.TablePartitionSpec,
storage: CatalogStorageFormat,
parameters: Map[String, String] = Map.empty,
createTime: Long = System.currentTimeMillis,
lastAccessTime: Long = -1,
stats: Option[CatalogStatistics] = None) {

def toLinkedHashMap: mutable.LinkedHashMap[String, String] = {
Expand All @@ -109,6 +113,8 @@ case class CatalogTablePartition(
if (parameters.nonEmpty) {
map.put("Partition Parameters", s"{${parameters.map(p => p._1 + "=" + p._2).mkString(", ")}}")
}
map.put("Created Time", new Date(createTime).toString)
map.put("Last Access", new Date(lastAccessTime).toString)
stats.foreach(s => map.put("Partition Statistics", s.simpleString))
map
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1114,11 +1114,13 @@ abstract class SessionCatalogSuite extends AnalysisTest {
// And for hive serde table, hive metastore will set some values(e.g.transient_lastDdlTime)
// in table's parameters and storage's properties, here we also ignore them.
val actualPartsNormalize = actualParts.map(p =>
p.copy(parameters = Map.empty, storage = p.storage.copy(
p.copy(parameters = Map.empty, createTime = -1, lastAccessTime = -1,
storage = p.storage.copy(
properties = Map.empty, locationUri = None, serde = None))).toSet

val expectedPartsNormalize = expectedParts.map(p =>
p.copy(parameters = Map.empty, storage = p.storage.copy(
p.copy(parameters = Map.empty, createTime = -1, lastAccessTime = -1,
storage = p.storage.copy(
properties = Map.empty, locationUri = None, serde = None))).toSet

actualPartsNormalize == expectedPartsNormalize
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ Database default
Table t
Partition Values [ds=2017-08-01, hr=10]
Location [not included in comparison]sql/core/spark-warehouse/t/ds=2017-08-01/hr=10
Created Time [not included in comparison]
Last Access [not included in comparison]

# Storage Information
Location [not included in comparison]sql/core/spark-warehouse/t
Expand Down Expand Up @@ -89,6 +91,8 @@ Database default
Table t
Partition Values [ds=2017-08-01, hr=10]
Location [not included in comparison]sql/core/spark-warehouse/t/ds=2017-08-01/hr=10
Created Time [not included in comparison]
Last Access [not included in comparison]
Partition Statistics 1121 bytes, 3 rows

# Storage Information
Expand Down Expand Up @@ -122,6 +126,8 @@ Database default
Table t
Partition Values [ds=2017-08-01, hr=10]
Location [not included in comparison]sql/core/spark-warehouse/t/ds=2017-08-01/hr=10
Created Time [not included in comparison]
Last Access [not included in comparison]
Partition Statistics 1121 bytes, 3 rows

# Storage Information
Expand All @@ -147,6 +153,8 @@ Database default
Table t
Partition Values [ds=2017-08-01, hr=11]
Location [not included in comparison]sql/core/spark-warehouse/t/ds=2017-08-01/hr=11
Created Time [not included in comparison]
Last Access [not included in comparison]
Partition Statistics 1098 bytes, 4 rows

# Storage Information
Expand Down Expand Up @@ -180,6 +188,8 @@ Database default
Table t
Partition Values [ds=2017-08-01, hr=10]
Location [not included in comparison]sql/core/spark-warehouse/t/ds=2017-08-01/hr=10
Created Time [not included in comparison]
Last Access [not included in comparison]
Partition Statistics 1121 bytes, 3 rows

# Storage Information
Expand All @@ -205,6 +215,8 @@ Database default
Table t
Partition Values [ds=2017-08-01, hr=11]
Location [not included in comparison]sql/core/spark-warehouse/t/ds=2017-08-01/hr=11
Created Time [not included in comparison]
Last Access [not included in comparison]
Partition Statistics 1098 bytes, 4 rows

# Storage Information
Expand All @@ -230,6 +242,8 @@ Database default
Table t
Partition Values [ds=2017-09-01, hr=5]
Location [not included in comparison]sql/core/spark-warehouse/t/ds=2017-09-01/hr=5
Created Time [not included in comparison]
Last Access [not included in comparison]
Partition Statistics 1144 bytes, 2 rows

# Storage Information
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,8 @@ Table t
Partition Values [c=Us, d=1]
Location [not included in comparison]sql/core/spark-warehouse/t/c=Us/d=1
Storage Properties [a=1, b=2]
Created Time [not included in comparison]
Last Access [not included in comparison]

# Storage Information
Num Buckets 2
Expand Down Expand Up @@ -311,6 +313,8 @@ Table t
Partition Values [c=Us, d=1]
Location [not included in comparison]sql/core/spark-warehouse/t/c=Us/d=1
Storage Properties [a=1, b=2]
Created Time [not included in comparison]
Last Access [not included in comparison]

# Storage Information
Num Buckets 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,8 @@ struct<database:string,tableName:string,isTemporary:boolean,information:string>
-- !query 14 output
showdb show_t1 false Partition Values: [c=Us, d=1]
Location [not included in comparison]sql/core/spark-warehouse/showdb.db/show_t1/c=Us/d=1
Created Time [not included in comparison]
Last Access [not included in comparison]


-- !query 15
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -995,6 +995,8 @@ private[hive] object HiveClientImpl {
tpart.setTableName(ht.getTableName)
tpart.setValues(partValues.asJava)
tpart.setSd(storageDesc)
tpart.setCreateTime((p.createTime / 1000).toInt)
tpart.setLastAccessTime((p.lastAccessTime / 1000).toInt)
tpart.setParameters(mutable.Map(p.parameters.toSeq: _*).asJava)
new HivePartition(ht, tpart)
}
Expand All @@ -1019,6 +1021,8 @@ private[hive] object HiveClientImpl {
compressed = apiPartition.getSd.isCompressed,
properties = Option(apiPartition.getSd.getSerdeInfo.getParameters)
.map(_.asScala.toMap).orNull),
createTime = apiPartition.getCreateTime.toLong * 1000,
lastAccessTime = apiPartition.getLastAccessTime.toLong * 1000,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we use DurationConversions here?

parameters = properties,
stats = readHiveStats(properties))
}
Expand Down