-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-15698][SQL][Streaming] Add the ability to remove the old MetadataLog in FileStreamSource #13513
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Closed
Closed
[SPARK-15698][SQL][Streaming] Add the ability to remove the old MetadataLog in FileStreamSource #13513
Changes from 1 commit
Commits
Show all changes
10 commits
Select commit
Hold shift + click to select a range
6cc43a3
Add the ability to remove the old MetadataLog in FileStreamSource
jerryshao b1299dd
Fix flaky test
jerryshao 5300d9d
refactor according to comments
jerryshao 4187999
fix compile error
jerryshao fb5a72c
Remove white space
jerryshao bbf7663
Address the comments
jerryshao 56a00ae
readd the test
jerryshao be1abfa
Address the comments
jerryshao bddbc7f
Address comments
jerryshao 84d3d27
Fix test compile issue
jerryshao File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
fix compile error
- Loading branch information
commit 4187999cc52d8e29e00032f37ead5718c29694a6
There are no files selected for viewing
249 changes: 249 additions & 0 deletions
249
...re/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,249 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one or more | ||
| * contributor license agreements. See the NOTICE file distributed with | ||
| * this work for additional information regarding copyright ownership. | ||
| * The ASF licenses this file to You under the Apache License, Version 2.0 | ||
| * (the "License"); you may not use this file except in compliance with | ||
| * the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
|
|
||
| package org.apache.spark.sql.execution.streaming | ||
|
|
||
| import java.io.IOException | ||
| import java.nio.charset.StandardCharsets.UTF_8 | ||
| import java.util.concurrent.TimeUnit | ||
|
|
||
| import scala.reflect.ClassTag | ||
|
|
||
| import org.apache.hadoop.fs.{Path, PathFilter} | ||
|
|
||
| import org.apache.spark.sql.SparkSession | ||
|
|
||
| /** | ||
| * An abstract class for compactible metadata logs. It will write one log file for each batch. | ||
| * The first line of the log file is the version number, and there are multiple JSON lines | ||
| * following. | ||
| * | ||
| * As reading from many small files is usually pretty slow, also too many | ||
| * small files in one folder will mess the FS, [[CompactibleFileStreamLog]] will | ||
| * compact log files every 10 batches by default into a big file. When | ||
| * doing a compaction, it will read all old log files and merge them with the new batch. | ||
| */ | ||
| abstract class CompactibleFileStreamLog[T: ClassTag]( | ||
| sparkSession: SparkSession, | ||
| path: String) | ||
| extends HDFSMetadataLog[Array[T]](sparkSession, path) { | ||
|
|
||
| import CompactibleFileStreamLog._ | ||
|
|
||
| /** | ||
| * If we delete the old files after compaction at once, there is a race condition in S3: other | ||
| * processes may see the old files are deleted but still cannot see the compaction file using | ||
| * "list". The `allFiles` handles this by looking for the next compaction file directly, however, | ||
| * a live lock may happen if the compaction happens too frequently: one processing keeps deleting | ||
| * old files while another one keeps retrying. Setting a reasonable cleanup delay could avoid it. | ||
| */ | ||
| protected val fileCleanupDelayMs = TimeUnit.MINUTES.toMillis(10) | ||
|
|
||
| protected val isDeletingExpiredLog = true | ||
|
|
||
| protected val compactInterval = 10 | ||
|
|
||
| /** | ||
| * Serialize the data into encoded string. | ||
| */ | ||
| protected def serializeData(t: T): String | ||
|
|
||
| /** | ||
| * Deserialize the string into data object. | ||
| */ | ||
| protected def deserializeData(encodedString: String): T | ||
|
|
||
| /** | ||
| * Filter out the unwanted logs, by default it filters out nothing, inherited class could | ||
| * override this method to do filtering. | ||
| */ | ||
| protected def compactLogs(oldLogs: Seq[T], newLogs: Seq[T]): Seq[T] = { | ||
| oldLogs ++ newLogs | ||
| } | ||
|
|
||
| override def batchIdToPath(batchId: Long): Path = { | ||
| if (isCompactionBatch(batchId, compactInterval)) { | ||
| new Path(metadataPath, s"$batchId$COMPACT_FILE_SUFFIX") | ||
| } else { | ||
| new Path(metadataPath, batchId.toString) | ||
| } | ||
| } | ||
|
|
||
| override def pathToBatchId(path: Path): Long = { | ||
| getBatchIdFromFileName(path.getName) | ||
| } | ||
|
|
||
| override def isBatchFile(path: Path): Boolean = { | ||
| try { | ||
| getBatchIdFromFileName(path.getName) | ||
| true | ||
| } catch { | ||
| case _: NumberFormatException => false | ||
| } | ||
| } | ||
|
|
||
| override def serialize(logData: Array[T]): Array[Byte] = { | ||
| (VERSION +: logData.map(serializeData)).mkString("\n").getBytes(UTF_8) | ||
| } | ||
|
|
||
| override def deserialize(bytes: Array[Byte]): Array[T] = { | ||
| val lines = new String(bytes, UTF_8).split("\n") | ||
| if (lines.length == 0) { | ||
| throw new IllegalStateException("Incomplete log file") | ||
| } | ||
| val version = lines(0) | ||
| if (version != VERSION) { | ||
| throw new IllegalStateException(s"Unknown log version: ${version}") | ||
| } | ||
| lines.slice(1, lines.length).map(deserializeData) | ||
| } | ||
|
|
||
| override def add(batchId: Long, logs: Array[T]): Boolean = { | ||
| if (isCompactionBatch(batchId, compactInterval)) { | ||
| compact(batchId, logs) | ||
| } else { | ||
| super.add(batchId, logs) | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Compacts all logs before `batchId` plus the provided `logs`, and writes them into the | ||
| * corresponding `batchId` file. It will delete expired files as well if enabled. | ||
| */ | ||
| private def compact(batchId: Long, logs: Array[T]): Boolean = { | ||
| val validBatches = getValidBatchesBeforeCompactionBatch(batchId, compactInterval) | ||
| val allLogs = validBatches.flatMap(batchId => get(batchId)).flatten | ||
| if (super.add(batchId, compactLogs(allLogs, logs).toArray)) { | ||
| if (isDeletingExpiredLog) { | ||
| deleteExpiredLog(batchId) | ||
| } | ||
| true | ||
| } else { | ||
| // Return false as there is another writer. | ||
| false | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Returns all files except the deleted ones. | ||
| */ | ||
| def allFiles(): Array[T] = { | ||
| var latestId = getLatest().map(_._1).getOrElse(-1L) | ||
| // There is a race condition when `FileStreamSink` is deleting old files and `StreamFileCatalog` | ||
| // is calling this method. This loop will retry the reading to deal with the | ||
| // race condition. | ||
| while (true) { | ||
| if (latestId >= 0) { | ||
| val startId = getAllValidBatches(latestId, compactInterval)(0) | ||
| try { | ||
| val logs = super.get(Some(startId), Some(latestId)).flatMap(_._2) | ||
| return compactLogs(logs, Seq.empty).toArray | ||
| } catch { | ||
| case e: IOException => | ||
| // Another process using `FileStreamSink` may delete the batch files when | ||
| // `StreamFileCatalog` are reading. However, it only happens when a compaction is | ||
| // deleting old files. If so, let's try the next compaction batch and we should find it. | ||
| // Otherwise, this is a real IO issue and we should throw it. | ||
| latestId = nextCompactionBatchId(latestId, compactInterval) | ||
| get(latestId).getOrElse { | ||
| throw e | ||
| } | ||
| } | ||
| } else { | ||
| return Array.empty | ||
| } | ||
| } | ||
| Array.empty | ||
| } | ||
|
|
||
| /** | ||
| * Since all logs before `compactionBatchId` are compacted and written into the | ||
| * `compactionBatchId` log file, they can be removed. However, due to the eventual consistency of | ||
| * S3, the compaction file may not be seen by other processes at once. So we only delete files | ||
| * created `fileCleanupDelayMs` milliseconds ago. | ||
| */ | ||
| private def deleteExpiredLog(compactionBatchId: Long): Unit = { | ||
| val expiredTime = System.currentTimeMillis() - fileCleanupDelayMs | ||
| fileManager.list(metadataPath, new PathFilter { | ||
| override def accept(path: Path): Boolean = { | ||
| try { | ||
| val batchId = getBatchIdFromFileName(path.getName) | ||
| batchId < compactionBatchId | ||
| } catch { | ||
| case _: NumberFormatException => | ||
| false | ||
| } | ||
| } | ||
| }).foreach { f => | ||
| if (f.getModificationTime <= expiredTime) { | ||
| fileManager.delete(f.getPath) | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| object CompactibleFileStreamLog { | ||
| val VERSION = "v1" | ||
| val COMPACT_FILE_SUFFIX = ".compact" | ||
|
|
||
| def getBatchIdFromFileName(fileName: String): Long = { | ||
| fileName.stripSuffix(COMPACT_FILE_SUFFIX).toLong | ||
| } | ||
|
|
||
| /** | ||
| * Returns if this is a compaction batch. FileStreamSinkLog will compact old logs every | ||
| * `compactInterval` commits. | ||
| * | ||
| * E.g., if `compactInterval` is 3, then 2, 5, 8, ... are all compaction batches. | ||
| */ | ||
| def isCompactionBatch(batchId: Long, compactInterval: Int): Boolean = { | ||
| (batchId + 1) % compactInterval == 0 | ||
| } | ||
|
|
||
| /** | ||
| * Returns all valid batches before the specified `compactionBatchId`. They contain all logs we | ||
| * need to do a new compaction. | ||
| * | ||
| * E.g., if `compactInterval` is 3 and `compactionBatchId` is 5, this method should returns | ||
| * `Seq(2, 3, 4)` (Note: it includes the previous compaction batch 2). | ||
| */ | ||
| def getValidBatchesBeforeCompactionBatch( | ||
| compactionBatchId: Long, | ||
| compactInterval: Int): Seq[Long] = { | ||
| assert(isCompactionBatch(compactionBatchId, compactInterval), | ||
| s"$compactionBatchId is not a compaction batch") | ||
| (math.max(0, compactionBatchId - compactInterval)) until compactionBatchId | ||
| } | ||
|
|
||
| /** | ||
| * Returns all necessary logs before `batchId` (inclusive). If `batchId` is a compaction, just | ||
| * return itself. Otherwise, it will find the previous compaction batch and return all batches | ||
| * between it and `batchId`. | ||
| */ | ||
| def getAllValidBatches(batchId: Long, compactInterval: Long): Seq[Long] = { | ||
| assert(batchId >= 0) | ||
| val start = math.max(0, (batchId + 1) / compactInterval * compactInterval - 1) | ||
| start to batchId | ||
| } | ||
|
|
||
| /** | ||
| * Returns the next compaction batch id after `batchId`. | ||
| */ | ||
| def nextCompactionBatchId(batchId: Long, compactInterval: Long): Long = { | ||
| (batchId + compactInterval + 1) / compactInterval * compactInterval - 1 | ||
| } | ||
| } | ||
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could you make VERSION be a constructor parameter in order to support to change source or sink format separately?