Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
files metadata table
  • Loading branch information
Gowthami03B committed Apr 18, 2024
commit 79b7828bd73997d46fcf5c3aa9c418a8847e6fc4
52 changes: 52 additions & 0 deletions pyiceberg/table/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3537,6 +3537,58 @@ def update_partitions_map(
schema=table_schema,
)

def files(self) -> "pa.Table":
import pyarrow as pa

files_schema = pa.schema([
pa.field('content', pa.int8(), nullable=False),
pa.field('file_path', pa.string(), nullable=False),
pa.field('file_format', pa.string(), nullable=False),
pa.field('record_count', pa.int64(), nullable=False),
pa.field('file_size_in_bytes', pa.int64(), nullable=False),
pa.field('column_sizes', pa.map_(pa.int32(), pa.int64()), nullable=True),
pa.field('value_counts', pa.map_(pa.int32(), pa.int64()), nullable=True),
pa.field('null_value_counts', pa.map_(pa.int32(), pa.int64()), nullable=True),
pa.field('nan_value_counts', pa.map_(pa.int32(), pa.int64()), nullable=True),
pa.field('lower_bounds', pa.map_(pa.int32(), pa.binary()), nullable=True),
pa.field('upper_bounds', pa.map_(pa.int32(), pa.binary()), nullable=True),
pa.field('key_metadata', pa.binary(), nullable=True),
pa.field('split_offsets', pa.list_(pa.int64()), nullable=True),
pa.field('equality_ids', pa.list_(pa.int32()), nullable=True),
])

files = []

snapshot = self.tbl.current_snapshot()
if not snapshot:
return pa.pylist([])

io = self.tbl.io
for manifest_list in snapshot.manifests(io):
for manifest_entry in manifest_list.fetch_manifest_entry(io):
data_file = manifest_entry.data_file
files.append({
'content': data_file.content,
'file_path': data_file.file_path,
'file_format': data_file.file_format,
'record_count': data_file.record_count,
'file_size_in_bytes': data_file.file_size_in_bytes,
'column_sizes': dict(data_file.column_sizes),
'value_counts': dict(data_file.value_counts),
'null_value_counts': dict(data_file.null_value_counts),
'nan_value_counts': dict(data_file.nan_value_counts),
'lower_bounds': dict(data_file.lower_bounds),
'upper_bounds': dict(data_file.upper_bounds),
'key_metadata': data_file.key_metadata,
'split_offsets': data_file.split_offsets,
'equality_ids': data_file.equality_ids,
})

return pa.Table.from_pylist(
files,
schema=files_schema,
)


@dataclass(frozen=True)
class TablePartition:
Expand Down
2 changes: 1 addition & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2060,7 +2060,7 @@ def spark() -> "SparkSession":
.config("spark.sql.catalog.hive.warehouse", "s3://warehouse/hive/")
.config("spark.sql.catalog.hive.s3.endpoint", "http://localhost:9000")
.config("spark.sql.catalog.hive.s3.path-style-access", "true")
.config("spark.sql.execution.arrow.pyspark.enabled", "true")
.config("spark.sql.execution.arrow.pyspark.enabled", "false")
.getOrCreate()
)

Expand Down
60 changes: 60 additions & 0 deletions tests/integration/test_inspect_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,3 +445,63 @@ def check_pyiceberg_df_equals_spark_df(df: pa.Table, spark_df: DataFrame) -> Non
df = tbl.inspect.partitions(snapshot_id=snapshot.snapshot_id)
spark_df = spark.sql(f"SELECT * FROM {identifier}.partitions VERSION AS OF {snapshot.snapshot_id}")
check_pyiceberg_df_equals_spark_df(df, spark_df)


@pytest.mark.integration
@pytest.mark.parametrize("format_version", [1, 2])
def test_inspect_files(
spark: SparkSession, session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int
) -> None:
identifier = "default.table_metadata_files"
tbl = _create_table(session_catalog, identifier, properties={"format-version": format_version})

# write some data
tbl.append(arrow_table_with_null)

df = tbl.refresh().inspect.files()

assert df.column_names == [
'content',
'file_path',
'file_format',
'record_count',
'file_size_in_bytes',
'column_sizes',
'value_counts',
'null_value_counts',
'nan_value_counts',
'lower_bounds',
'upper_bounds',
'key_metadata',
'split_offsets',
'equality_ids',
]

for file_size_in_bytes in df['file_size_in_bytes']:
assert isinstance(file_size_in_bytes.as_py(), int)

for split_offsets in df['split_offsets']:
assert isinstance(split_offsets.as_py(), list)

for file_format in df['file_format']:
assert file_format.as_py() == "PARQUET"

for file_path in df['file_path']:
assert file_path.as_py().startswith("s3://")

lhs = spark.table(f"{identifier}.files").toPandas()
rhs = df.to_pandas()
for column in df.column_names:
for left, right in zip(lhs[column].to_list(), rhs[column].to_list()):
if column in [
'column_sizes',
'value_counts',
'null_value_counts',
'nan_value_counts',
'lower_bounds',
'upper_bounds',
]:
# Arrow returns a list of tuples, instead of a dict
right = dict(right)

assert left == right, f"Difference in column {column}: {left} != {right}"