Skip to content
Merged
Show file tree
Hide file tree
Changes from 30 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
d4ca653
s/"main"/MAIN_BRANCH
kevinjqliu Jan 27, 2024
0b7aaaf
replace string literals
kevinjqliu Jan 27, 2024
23f04ec
default writes to main branch
kevinjqliu Jan 27, 2024
af6ff9a
Added some more methods for branches
vinjai Jul 18, 2024
6fbf3f1
s/"main"/MAIN_BRANCH
kevinjqliu Jan 27, 2024
8ce1509
replace string literals
kevinjqliu Jan 27, 2024
6daf29e
default writes to main branch
kevinjqliu Jan 27, 2024
09321cd
Added some more methods for branches
vinjai Jul 18, 2024
60fef31
Merged with master
vinjai Oct 12, 2024
45b01a6
Updated antries for branches
vinjai Oct 12, 2024
917108b
Resolved Merge Conflict
vinjai Oct 12, 2024
917b044
Fixed some bugs
vinjai Oct 14, 2024
398f6c0
Fixed bugs in delete and overwrite
vinjai Oct 15, 2024
b7b8ba0
Added tests and some refactoring
vinjai Oct 16, 2024
ee591b4
Added another integration test
vinjai Oct 16, 2024
e81907d
Fixed bug: concurrent same name branch and tag writes
vinjai Oct 16, 2024
4cf9198
Merge with main branch
vinjai Nov 13, 2024
bc6fb68
Added integration tests with spark
vinjai Nov 14, 2024
82e65e1
Fixed comments for AssertSnapshotRef
vinjai Feb 23, 2025
82e5b90
Fixed comments and linter issues
vinjai Feb 23, 2025
84d0971
Fixed comments
vinjai Feb 23, 2025
3efe53c
Fixed comments
vinjai Feb 23, 2025
dfedc63
Fixed a bug in tests
vinjai Feb 24, 2025
076a6d5
Fixed some more tests
vinjai Feb 24, 2025
53a7f84
Merge branch 'main' into feature/write-to-branch
vinjai May 25, 2025
e4463df
Fixed linter and code errors
vinjai May 25, 2025
49f75b4
Fixed bug for empty tables
vinjai May 26, 2025
4ed0607
Fixed bugs and added more tests
vinjai May 27, 2025
958aac4
changed design context for branch writes
vinjai May 27, 2025
a0aae4d
Merge branch 'main' into feature/write-to-branch
vinjai Jun 3, 2025
76249e9
Merge branch 'main' into feature/write-to-branch
vinjai Jun 23, 2025
079802a
Fixed linter, comments and other bugs
vinjai Jun 24, 2025
f45df8b
Usage of builder pattern
vinjai Jun 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions pyiceberg/cli/console.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from pyiceberg.cli.output import ConsoleOutput, JsonOutput, Output
from pyiceberg.exceptions import NoSuchNamespaceError, NoSuchPropertyException, NoSuchTableError
from pyiceberg.table import TableProperties
from pyiceberg.table.refs import SnapshotRef
from pyiceberg.table.refs import SnapshotRef, SnapshotRefType
from pyiceberg.utils.properties import property_as_int


Expand Down Expand Up @@ -419,7 +419,7 @@ def list_refs(ctx: Context, identifier: str, type: str, verbose: bool) -> None:
refs = table.refs()
if type:
type = type.lower()
if type not in {"branch", "tag"}:
if type not in {SnapshotRefType.BRANCH, SnapshotRefType.TAG}:
raise ValueError(f"Type must be either branch or tag, got: {type}")

relevant_refs = [
Expand All @@ -433,7 +433,7 @@ def list_refs(ctx: Context, identifier: str, type: str, verbose: bool) -> None:

def _retention_properties(ref: SnapshotRef, table_properties: Dict[str, str]) -> Dict[str, str]:
retention_properties = {}
if ref.snapshot_ref_type == "branch":
if ref.snapshot_ref_type == SnapshotRefType.BRANCH:
default_min_snapshots_to_keep = property_as_int(
table_properties,
TableProperties.MIN_SNAPSHOTS_TO_KEEP,
Expand Down
112 changes: 82 additions & 30 deletions pyiceberg/table/__init__.py

Large diffs are not rendered by default.

8 changes: 6 additions & 2 deletions pyiceberg/table/update/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
from pyiceberg.partitioning import PARTITION_FIELD_ID_START, PartitionSpec
from pyiceberg.schema import Schema
from pyiceberg.table.metadata import SUPPORTED_TABLE_FORMAT_VERSION, TableMetadata, TableMetadataUtil
from pyiceberg.table.refs import MAIN_BRANCH, SnapshotRef
from pyiceberg.table.refs import MAIN_BRANCH, SnapshotRef, SnapshotRefType
from pyiceberg.table.snapshots import (
MetadataLogEntry,
Snapshot,
Expand Down Expand Up @@ -139,7 +139,7 @@ class AddSnapshotUpdate(IcebergBaseModel):
class SetSnapshotRefUpdate(IcebergBaseModel):
action: Literal["set-snapshot-ref"] = Field(default="set-snapshot-ref")
ref_name: str = Field(alias="ref-name")
type: Literal["tag", "branch"]
type: Literal[SnapshotRefType.TAG, SnapshotRefType.BRANCH]
snapshot_id: int = Field(alias="snapshot-id")
max_ref_age_ms: Annotated[Optional[int], Field(alias="max-ref-age-ms", default=None)]
max_snapshot_age_ms: Annotated[Optional[int], Field(alias="max-snapshot-age-ms", default=None)]
Expand Down Expand Up @@ -702,6 +702,10 @@ class AssertRefSnapshotId(ValidatableTableRequirement):
def validate(self, base_metadata: Optional[TableMetadata]) -> None:
if base_metadata is None:
raise CommitFailedException("Requirement failed: current table metadata is missing")
elif len(base_metadata.snapshots) == 0 and self.ref != MAIN_BRANCH:
raise CommitFailedException(
f"Requirement failed: Table has no snapshots and can only be written to the {MAIN_BRANCH} BRANCH."
)
elif snapshot_ref := base_metadata.refs.get(self.ref):
ref_type = snapshot_ref.snapshot_ref_type
if self.snapshot_id is None:
Expand Down
173 changes: 110 additions & 63 deletions pyiceberg/table/update/snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
from pyiceberg.partitioning import (
PartitionSpec,
)
from pyiceberg.table.refs import MAIN_BRANCH, SnapshotRefType
from pyiceberg.table.snapshots import (
Operation,
Snapshot,
Expand Down Expand Up @@ -104,6 +105,7 @@ class _SnapshotProducer(UpdateTableMetadata[U], Generic[U]):
_added_data_files: List[DataFile]
_manifest_num_counter: itertools.count[int]
_deleted_data_files: Set[DataFile]
_target_branch = MAIN_BRANCH

def __init__(
self,
Expand All @@ -112,20 +114,31 @@ def __init__(
io: FileIO,
commit_uuid: Optional[uuid.UUID] = None,
snapshot_properties: Dict[str, str] = EMPTY_DICT,
branch: str = MAIN_BRANCH,
) -> None:
super().__init__(transaction)
self.commit_uuid = commit_uuid or uuid.uuid4()
self._io = io
self._operation = operation
self._snapshot_id = self._transaction.table_metadata.new_snapshot_id()
# Since we only support the main branch for now
self._parent_snapshot_id = (
snapshot.snapshot_id if (snapshot := self._transaction.table_metadata.current_snapshot()) else None
)
self._added_data_files = []
self._deleted_data_files = set()
self.snapshot_properties = snapshot_properties
self._manifest_num_counter = itertools.count(0)
self._target_branch = self._validate_target_branch(branch=branch)
self._parent_snapshot_id = (
snapshot.snapshot_id if (snapshot := self._transaction.table_metadata.snapshot_by_name(self._target_branch)) else None
)

def _validate_target_branch(self, branch: str) -> str:
# Default is already set to MAIN_BRANCH. So branch name can't be None.
if branch is None:
raise ValueError("Invalid branch name: null")
if branch in self._transaction.table_metadata.refs:
ref = self._transaction.table_metadata.refs[branch]
if ref.snapshot_ref_type != SnapshotRefType.BRANCH:
raise ValueError(f"{branch} is a tag, not a branch. Tags cannot be targets for producing snapshots")
return branch

def append_data_file(self, data_file: DataFile) -> _SnapshotProducer[U]:
self._added_data_files.append(data_file)
Expand Down Expand Up @@ -271,10 +284,20 @@ def _commit(self) -> UpdatesAndRequirements:
(
AddSnapshotUpdate(snapshot=snapshot),
SetSnapshotRefUpdate(
snapshot_id=self._snapshot_id, parent_snapshot_id=self._parent_snapshot_id, ref_name="main", type="branch"
snapshot_id=self._snapshot_id,
parent_snapshot_id=self._parent_snapshot_id,
ref_name=self._target_branch,
type=SnapshotRefType.BRANCH,
),
),
(
AssertRefSnapshotId(
snapshot_id=self._transaction.table_metadata.refs[self._target_branch].snapshot_id
if self._target_branch in self._transaction.table_metadata.refs
else None,
ref=self._target_branch,
),
),
(AssertRefSnapshotId(snapshot_id=self._transaction.table_metadata.current_snapshot_id, ref="main"),),
)

@property
Expand Down Expand Up @@ -321,10 +344,11 @@ def __init__(
operation: Operation,
transaction: Transaction,
io: FileIO,
branch: str,
commit_uuid: Optional[uuid.UUID] = None,
snapshot_properties: Dict[str, str] = EMPTY_DICT,
):
super().__init__(operation, transaction, io, commit_uuid, snapshot_properties)
super().__init__(operation, transaction, io, commit_uuid, snapshot_properties, branch)
self._predicate = AlwaysFalse()
self._case_sensitive = True

Expand Down Expand Up @@ -384,46 +408,52 @@ def _copy_with_new_status(entry: ManifestEntry, status: ManifestEntryStatus) ->
total_deleted_entries = []
partial_rewrites_needed = False
self._deleted_data_files = set()
if snapshot := self._transaction.table_metadata.current_snapshot():
for manifest_file in snapshot.manifests(io=self._io):
if manifest_file.content == ManifestContent.DATA:
if not manifest_evaluators[manifest_file.partition_spec_id](manifest_file):
# If the manifest isn't relevant, we can just keep it in the manifest-list
existing_manifests.append(manifest_file)
else:
# It is relevant, let's check out the content
deleted_entries = []
existing_entries = []
for entry in manifest_file.fetch_manifest_entry(io=self._io, discard_deleted=True):
if strict_metrics_evaluator(entry.data_file) == ROWS_MUST_MATCH:
# Based on the metadata, it can be dropped right away
deleted_entries.append(_copy_with_new_status(entry, ManifestEntryStatus.DELETED))
self._deleted_data_files.add(entry.data_file)
else:
# Based on the metadata, we cannot determine if it can be deleted
existing_entries.append(_copy_with_new_status(entry, ManifestEntryStatus.EXISTING))
if inclusive_metrics_evaluator(entry.data_file) != ROWS_MIGHT_NOT_MATCH:
partial_rewrites_needed = True

if len(deleted_entries) > 0:
total_deleted_entries += deleted_entries

# Rewrite the manifest
if len(existing_entries) > 0:
with write_manifest(
format_version=self._transaction.table_metadata.format_version,
spec=self._transaction.table_metadata.specs()[manifest_file.partition_spec_id],
schema=self._transaction.table_metadata.schema(),
output_file=self.new_manifest_output(),
snapshot_id=self._snapshot_id,
) as writer:
for existing_entry in existing_entries:
writer.add_entry(existing_entry)
existing_manifests.append(writer.to_manifest_file())
else:

# Determine the snapshot to read manifests from for deletion
# Should be the current tip of the _target_branch
parent_snapshot_id_for_delete_source = self._parent_snapshot_id
if parent_snapshot_id_for_delete_source is not None:
snapshot = self._transaction.table_metadata.snapshot_by_id(parent_snapshot_id_for_delete_source)
if snapshot: # Ensure snapshot is found
for manifest_file in snapshot.manifests(io=self._io):
if manifest_file.content == ManifestContent.DATA:
if not manifest_evaluators[manifest_file.partition_spec_id](manifest_file):
# If the manifest isn't relevant, we can just keep it in the manifest-list
existing_manifests.append(manifest_file)
else:
existing_manifests.append(manifest_file)
else:
# It is relevant, let's check out the content
deleted_entries = []
existing_entries = []
for entry in manifest_file.fetch_manifest_entry(io=self._io, discard_deleted=True):
if strict_metrics_evaluator(entry.data_file) == ROWS_MUST_MATCH:
# Based on the metadata, it can be dropped right away
deleted_entries.append(_copy_with_new_status(entry, ManifestEntryStatus.DELETED))
self._deleted_data_files.add(entry.data_file)
else:
# Based on the metadata, we cannot determine if it can be deleted
existing_entries.append(_copy_with_new_status(entry, ManifestEntryStatus.EXISTING))
if inclusive_metrics_evaluator(entry.data_file) != ROWS_MIGHT_NOT_MATCH:
partial_rewrites_needed = True

if len(deleted_entries) > 0:
total_deleted_entries += deleted_entries

# Rewrite the manifest
if len(existing_entries) > 0:
with write_manifest(
format_version=self._transaction.table_metadata.format_version,
spec=self._transaction.table_metadata.specs()[manifest_file.partition_spec_id],
schema=self._transaction.table_metadata.schema(),
output_file=self.new_manifest_output(),
snapshot_id=self._snapshot_id,
) as writer:
for existing_entry in existing_entries:
writer.add_entry(existing_entry)
existing_manifests.append(writer.to_manifest_file())
else:
existing_manifests.append(manifest_file)
else:
existing_manifests.append(manifest_file)

return existing_manifests, total_deleted_entries, partial_rewrites_needed

Expand Down Expand Up @@ -483,12 +513,13 @@ def __init__(
operation: Operation,
transaction: Transaction,
io: FileIO,
branch: str,
commit_uuid: Optional[uuid.UUID] = None,
snapshot_properties: Dict[str, str] = EMPTY_DICT,
) -> None:
from pyiceberg.table import TableProperties

super().__init__(operation, transaction, io, commit_uuid, snapshot_properties)
super().__init__(operation, transaction, io, commit_uuid, snapshot_properties, branch)
self._target_size_bytes = property_as_int(
self._transaction.table_metadata.properties,
TableProperties.MANIFEST_TARGET_SIZE_BYTES,
Expand Down Expand Up @@ -534,7 +565,7 @@ def _existing_manifests(self) -> List[ManifestFile]:
"""Determine if there are any existing manifest files."""
existing_files = []

if snapshot := self._transaction.table_metadata.current_snapshot():
if snapshot := self._transaction.table_metadata.snapshot_by_name(name=self._target_branch):
for manifest_file in snapshot.manifests(io=self._io):
entries = manifest_file.fetch_manifest_entry(io=self._io, discard_deleted=True)
found_deleted_data_files = [entry.data_file for entry in entries if entry.data_file in self._deleted_data_files]
Expand All @@ -551,19 +582,17 @@ def _existing_manifests(self) -> List[ManifestFile]:
output_file=self.new_manifest_output(),
snapshot_id=self._snapshot_id,
) as writer:
[
writer.add_entry(
ManifestEntry.from_args(
status=ManifestEntryStatus.EXISTING,
snapshot_id=entry.snapshot_id,
sequence_number=entry.sequence_number,
file_sequence_number=entry.file_sequence_number,
data_file=entry.data_file,
for entry in entries:
if entry.data_file not in found_deleted_data_files:
writer.add_entry(
ManifestEntry.from_args(
status=ManifestEntryStatus.EXISTING,
snapshot_id=entry.snapshot_id,
sequence_number=entry.sequence_number,
file_sequence_number=entry.file_sequence_number,
data_file=entry.data_file,
)
)
)
for entry in entries
if entry.data_file not in found_deleted_data_files
]
existing_files.append(writer.to_manifest_file())
return existing_files

Expand Down Expand Up @@ -604,21 +633,37 @@ def _get_entries(manifest: ManifestFile) -> List[ManifestEntry]:
class UpdateSnapshot:
_transaction: Transaction
_io: FileIO
_branch: str
_snapshot_properties: Dict[str, str]

def __init__(self, transaction: Transaction, io: FileIO, snapshot_properties: Dict[str, str] = EMPTY_DICT) -> None:
def __init__(
self,
transaction: Transaction,
io: FileIO,
branch: str,
snapshot_properties: Dict[str, str] = EMPTY_DICT,
) -> None:
self._transaction = transaction
self._io = io
self._snapshot_properties = snapshot_properties
self._branch = branch

def fast_append(self) -> _FastAppendFiles:
return _FastAppendFiles(
operation=Operation.APPEND, transaction=self._transaction, io=self._io, snapshot_properties=self._snapshot_properties
operation=Operation.APPEND,
transaction=self._transaction,
io=self._io,
branch=self._branch,
snapshot_properties=self._snapshot_properties,
)

def merge_append(self) -> _MergeAppendFiles:
return _MergeAppendFiles(
operation=Operation.APPEND, transaction=self._transaction, io=self._io, snapshot_properties=self._snapshot_properties
operation=Operation.APPEND,
transaction=self._transaction,
io=self._io,
branch=self._branch,
snapshot_properties=self._snapshot_properties,
)

def overwrite(self, commit_uuid: Optional[uuid.UUID] = None) -> _OverwriteFiles:
Expand All @@ -629,6 +674,7 @@ def overwrite(self, commit_uuid: Optional[uuid.UUID] = None) -> _OverwriteFiles:
else Operation.APPEND,
transaction=self._transaction,
io=self._io,
branch=self._branch,
snapshot_properties=self._snapshot_properties,
)

Expand All @@ -637,6 +683,7 @@ def delete(self) -> _DeleteFiles:
operation=Operation.DELETE,
transaction=self._transaction,
io=self._io,
branch=self._branch,
snapshot_properties=self._snapshot_properties,
)

Expand Down
10 changes: 5 additions & 5 deletions pyiceberg/utils/concurrent.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,11 @@
class ExecutorFactory:
_instance: Optional[Executor] = None

@staticmethod
def max_workers() -> Optional[int]:
"""Return the max number of workers configured."""
return Config().get_int("max-workers")

@staticmethod
def get_or_create() -> Executor:
"""Return the same executor in each call."""
Expand All @@ -33,8 +38,3 @@ def get_or_create() -> Executor:
ExecutorFactory._instance = ThreadPoolExecutor(max_workers=max_workers)

return ExecutorFactory._instance

@staticmethod
def max_workers() -> Optional[int]:
"""Return the max number of workers configured."""
return Config().get_int("max-workers")
Loading