Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
address comments
  • Loading branch information
gengliangwang committed Jun 7, 2019
commit 7d3a5686ea0fa41552840e8efa6aa66587367cba
Original file line number Diff line number Diff line change
Expand Up @@ -176,16 +176,16 @@ case class ParquetPartitionReaderFactory(
reader
}

private def createRowBaseReader(file: PartitionedFile): ParquetRecordReader[UnsafeRow] = {
buildReaderBase(file, createRowBaseReader0).asInstanceOf[ParquetRecordReader[UnsafeRow]]
private def createRowBaseReader(file: PartitionedFile): RecordReader[Void, UnsafeRow] = {
buildReaderBase(file, createRowBaseReader0)
}

private def createRowBaseReader0(
split: ParquetInputSplit,
partitionValues: InternalRow,
hadoopAttemptContext: TaskAttemptContextImpl,
pushed: Option[FilterPredicate],
convertTz: Option[TimeZone]): ParquetRecordReader[UnsafeRow] = {
convertTz: Option[TimeZone]): RecordReader[Void, UnsafeRow] = {
logDebug(s"Falling back to parquet-mr")
val taskContext = Option(TaskContext.get())
// ParquetRecordReader returns UnsafeRow
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,9 @@ case class ParquetScanBuilder(
this.filters
}

// The actual filter push down happens in [[ParquetPartitionReaderFactory]].
// Note: for Parquet, the actual filter push down happens in [[ParquetPartitionReaderFactory]].
// It requires the Parquet physical schema to determine whether a filter is convertible.
// So here we simply mark that all the filters are pushed down.
// All filters that can be converted to Parquet are pushed down.
override def pushedFilters(): Array[Filter] = _pushedFilters

override def build(): Scan = {
Expand Down