Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
1f93a93
update
XiangpengHao Jul 1, 2025
2e01e56
update
XiangpengHao Jul 1, 2025
0bd08c3
update
XiangpengHao Jul 1, 2025
d6ecbd4
update
XiangpengHao Jul 1, 2025
7cd5518
cleanup
XiangpengHao Jul 2, 2025
4520048
update
XiangpengHao Jul 2, 2025
e6281bc
update
XiangpengHao Jul 2, 2025
6b6d4fc
update
XiangpengHao Jul 2, 2025
b696b66
update
XiangpengHao Jul 2, 2025
f60581f
update
XiangpengHao Jul 2, 2025
1851f0b
clippy and license
XiangpengHao Jul 2, 2025
5e414a8
Merge remote-tracking branch 'apache/main' into pushdown-v4
alamb Jul 7, 2025
58add51
bug fix
XiangpengHao Jul 8, 2025
470cc01
Merge remote-tracking branch 'refs/remotes/origin/pushdown-v3' into p…
XiangpengHao Jul 8, 2025
2bf3d38
clippy
XiangpengHao Jul 8, 2025
2cf1a8f
bug fix
XiangpengHao Jul 8, 2025
86e149c
switch to boolean array for row selection
XiangpengHao Jul 15, 2025
4d24172
Merge remote-tracking branch 'apache/main' into pushdown-v4
alamb Jul 15, 2025
be134d6
Add comments (OCD) and rename some fields
alamb Jul 15, 2025
eecaf99
Merge pull request #4 from alamb/alamb/pushdown_suggestions
XiangpengHao Jul 15, 2025
5537bcb
fmt
XiangpengHao Jul 15, 2025
b835163
fmt
alamb Jul 16, 2025
5132de8
Simplify projection caching
alamb Jul 16, 2025
253dad3
Move cache options construction to ArrayReaderBuilder, add builders
alamb Jul 16, 2025
5d9781e
update memory accounting
XiangpengHao Jul 17, 2025
2e20902
Merge remote-tracking branch 'refs/remotes/origin/pushdown-v4' into p…
XiangpengHao Jul 17, 2025
721d00c
Merge pull request #5 from alamb/alamb/simplify_cache
XiangpengHao Jul 17, 2025
f8aed80
Merge pull request #6 from alamb/alamb/cleaner_api
XiangpengHao Jul 17, 2025
884b591
update
XiangpengHao Jul 17, 2025
4f6b918
array size
XiangpengHao Jul 17, 2025
6c53bfd
add test case
XiangpengHao Jul 17, 2025
8ebe579
fix bug
XiangpengHao Jul 17, 2025
c240a52
clippy & fmt
XiangpengHao Jul 17, 2025
30a0d1c
Add config option for predicate cache memory limit
alamb Jul 23, 2025
ed3ce13
Add option to control predicate cache, documentation, ArrowReaderMetr…
alamb Jul 23, 2025
42d5520
Update parquet/src/arrow/arrow_reader/mod.rs
alamb Jul 24, 2025
6e618b3
Merge pull request #7 from alamb/alamb/test_memory_limit
XiangpengHao Jul 24, 2025
f70e46a
Clarify in documentation that cache is only for async decoder
alamb Jul 25, 2025
15d6826
add comment
alamb Jul 25, 2025
bec6d9c
Revert backwards incompatible changes to the Parquet reader API
alamb Jul 25, 2025
3e05cb2
Merge pull request #9 from alamb/alamb/revert_api_changes
XiangpengHao Jul 25, 2025
4d64dc0
Merge pull request #8 from alamb/alamb/pushdown-v4-cleanup
XiangpengHao Jul 25, 2025
8da582b
Merge remote-tracking branch 'apache/main' into pushdown-v4
alamb Aug 6, 2025
315e463
exclude nested column from cache
XiangpengHao Aug 7, 2025
1db701a
only use expanded selection when the column is one of cache column
XiangpengHao Aug 7, 2025
bea4433
Merge remote-tracking branch 'upstream/main' into pushdown-v4
XiangpengHao Aug 7, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Add config option for predicate cache memory limit
  • Loading branch information
alamb committed Jul 23, 2025
commit 30a0d1cd2539447fe6d8750457ef08aefe18c91f
1 change: 1 addition & 0 deletions parquet/tests/arrow_reader/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ use std::sync::Arc;
use tempfile::NamedTempFile;

mod bad_data;
mod predicate_cache;
#[cfg(feature = "crc")]
mod checksum;
mod statistics;
Expand Down
144 changes: 144 additions & 0 deletions parquet/tests/arrow_reader/predicate_cache.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

//! Test for predicate cache in Parquet Arrow reader



use arrow::array::ArrayRef;
use std::sync::Arc;
use parquet::arrow::arrow_reader::ArrowReaderOptions;
use std::sync::LazyLock;
use arrow::array::Int64Array;
use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
use bytes::Bytes;
use arrow_array::{RecordBatch, StringViewArray};
use parquet::arrow::ArrowWriter;
use parquet::file::properties::WriterProperties;


// 1. the predicate cache is not used when there are no filters
#[test]
fn test() {
let test = ParquetPredicateCacheTest::new()
.with_expected_cache_used(false);
let builder = test.sync_builder(ArrowReaderOptions::default());
test.run(builder);
}


// Test:
// 2. the predicate cache is used when there are filters but the cache size is 0
// 3. the predicate cache is used when there are filters and the cache size is greater than 0





/// A test parquet file
struct ParquetPredicateCacheTest {
bytes: Bytes,
expected_cache_used: bool,
}
impl ParquetPredicateCacheTest {
/// Create a new `TestParquetFile` with:
/// 3 columns: "a", "b", "c"
///
/// 2 row groups, each with 200 rows
/// each data page has 100 rows
///
/// Values of column "a" are 0..399
/// Values of column "b" are 400..799
/// Values of column "c" are alternating strings of length 12 and longer
fn new() -> Self {
Self {
bytes: TEST_FILE_DATA.clone(),
expected_cache_used: false,
}
}

/// Set whether the predicate cache is expected to be used
fn with_expected_cache_used(mut self, used: bool) -> Self{
self.expected_cache_used = used;
self
}

/// Return a [`ParquetRecordBatchReaderBuilder`] for reading this file
fn sync_builder(
&self,
options: ArrowReaderOptions,
) -> ParquetRecordBatchReaderBuilder<Bytes> {
let reader = self.bytes.clone();
ParquetRecordBatchReaderBuilder::try_new_with_options(reader, options)
.expect("ParquetRecordBatchReaderBuilder")
}


/// Build the reader from the specified builder, reading all batches from it,
/// and asserts the
fn run(
&self,
builder: ParquetRecordBatchReaderBuilder<Bytes>,
) {
let reader = builder.build().unwrap();
for batch in reader {
match batch {
Ok(_) => {}
Err(e) => panic!("Error reading batch: {e}"),
}
}
// TODO check if the cache was used
}
}

/// Create a parquet file in memory for testing. See [`test_file`] for details.
static TEST_FILE_DATA: LazyLock<Bytes> = LazyLock::new(|| {
// Input batch has 400 rows, with 3 columns: "a", "b", "c"
// Note c is a different types (so the data page sizes will be different)
let a: ArrayRef = Arc::new(Int64Array::from_iter_values(0..400));
let b: ArrayRef = Arc::new(Int64Array::from_iter_values(400..800));
let c: ArrayRef = Arc::new(StringViewArray::from_iter_values((0..400).map(|i| {
if i % 2 == 0 {
format!("string_{i}")
} else {
format!("A string larger than 12 bytes and thus not inlined {i}")
}
})));

let input_batch = RecordBatch::try_from_iter(vec![("a", a), ("b", b), ("c", c)]).unwrap();

let mut output = Vec::new();

let writer_options = WriterProperties::builder()
.set_max_row_group_size(200)
.set_data_page_row_count_limit(100)
.build();
let mut writer =
ArrowWriter::try_new(&mut output, input_batch.schema(), Some(writer_options)).unwrap();

// since the limits are only enforced on batch boundaries, write the input
// batch in chunks of 50
let mut row_remain = input_batch.num_rows();
while row_remain > 0 {
let chunk_size = row_remain.min(50);
let chunk = input_batch.slice(input_batch.num_rows() - row_remain, chunk_size);
writer.write(&chunk).unwrap();
row_remain -= chunk_size;
}
writer.close().unwrap();
Bytes::from(output)
});